id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,200 | test_pool_dataset_unlock_restart_vms.py | truenas_middleware/tests/api2/test_pool_dataset_unlock_restart_vms.py | import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, mock, ssh
PASSPHRASE = "12345678"
def encryption_props():
return {
"encryption_options": {"generate_key": False, "passphrase": PASSPHRASE},
"encryption": True,
"inherit_encryption": False
}
@pytest.mark.parametrize("zvol", [True, False])
def test_restart_vm_on_dataset_unlock(zvol):
if zvol:
data = {"type": "VOLUME", "volsize": 1048576}
else:
data = {}
with dataset("test", {**data, **encryption_props()}) as ds:
call("pool.dataset.lock", ds, job=True)
if zvol:
device = {"dtype": "DISK", "attributes": {"path": f"/dev/zvol/{ds}"}}
else:
device = {"dtype": "RAW", "attributes": {"path": f"/mnt/{ds}/child"}}
with mock("vm.query", return_value=[{"id": 1, "devices": [device]}]):
with mock("vm.status", return_value={"state": "RUNNING"}):
ssh("rm -f /tmp/test-vm-stop")
with mock("vm.stop", """
from middlewared.service import job
@job()
def mock(self, job, *args):
with open("/tmp/test-vm-stop", "w") as f:
pass
"""):
ssh("rm -f /tmp/test-vm-start")
with mock("vm.start", declaration="""
def mock(self, job, *args):
with open("/tmp/test-vm-start", "w") as f:
pass
"""):
call(
"pool.dataset.unlock",
ds,
{"datasets": [{"name": ds, "passphrase": PASSPHRASE}]},
job=True,
)
call("filesystem.stat", "/tmp/test-vm-stop")
call("filesystem.stat", "/tmp/test-vm-start")
| 2,042 | Python | .py | 46 | 29.217391 | 83 | 0.468246 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,201 | test_catalogs.py | truenas_middleware/tests/api2/test_catalogs.py | import os.path
import pytest
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.docker import IX_APPS_CATALOG_PATH
@pytest.fixture(scope='module')
def docker_pool(request):
with another_pool() as pool:
yield pool['name']
@pytest.mark.dependency(name='unconfigure_apps')
def test_unconfigure_apps():
config = call('docker.update', {'pool': None}, job=True)
assert config['pool'] is None, config
@pytest.mark.dependency(depends=['unconfigure_apps'])
def test_catalog_sync():
call('catalog.sync', job=True)
assert call('catalog.synced') is True
@pytest.mark.dependency(depends=['unconfigure_apps'])
def test_catalog_cloned_location():
config = call('catalog.config')
assert config['location'] == '/var/run/middleware/ix-apps/catalogs', config
@pytest.mark.dependency(depends=['unconfigure_apps'])
def test_apps_are_being_reported():
assert call('app.available', [], {'count': True}) != 0
@pytest.mark.dependency(name='docker_setup')
def test_docker_setup(docker_pool):
config = call('docker.update', {'pool': docker_pool}, job=True)
assert config['pool'] == docker_pool, config
@pytest.mark.dependency(depends=['docker_setup'])
def test_catalog_synced_properly():
assert call('catalog.synced') is True
@pytest.mark.dependency(depends=['docker_setup'])
def test_catalog_sync_location():
assert call('catalog.config')['location'] == IX_APPS_CATALOG_PATH
@pytest.mark.dependency(depends=['docker_setup'])
def test_catalog_location_existence():
docker_config = call('docker.config')
assert docker_config['pool'] is not None
assert call('filesystem.statfs', IX_APPS_CATALOG_PATH)['source'] == os.path.join(
docker_config['dataset'], 'truenas_catalog'
)
@pytest.mark.dependency(depends=['docker_setup'])
def test_apps_are_being_reported_after_docker_setup():
assert call('app.available', [], {'count': True}) != 0
@pytest.mark.dependency(depends=['docker_setup'])
def test_categories_are_being_reported():
assert len(call('app.categories')) != 0
@pytest.mark.dependency(depends=['docker_setup'])
def test_app_version_details():
app_details = call('catalog.get_app_details', 'plex', {'train': 'stable'})
assert app_details['name'] == 'plex', app_details
assert len(app_details['versions']) != 0, app_details
@pytest.mark.dependency(depends=['docker_setup'])
def test_unconfigure_apps_after_setup():
config = call('docker.update', {'pool': None}, job=True)
assert config['pool'] is None, config
| 2,629 | Python | .py | 56 | 43.482143 | 85 | 0.727094 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,202 | test_account_ssh_key.py | truenas_middleware/tests/api2/test_account_ssh_key.py | from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
def test_account_create_update_ssh_key_in_existing_dir():
with dataset("home") as ds:
homedir = f"/mnt/{ds}"
with user({
"username": "test",
"full_name": "Test",
"home": homedir,
"password": "test1234",
"group_create": True,
"sshpubkey": "old",
}) as u:
call("user.delete", u["id"])
with user({
"username": "test",
"full_name": "Test",
"home": homedir,
"password": "test1234",
"group_create": True,
"sshpubkey": "new",
}) as u:
u = call("user.get_instance", u["id"])
assert u["sshpubkey"] == "new"
def test_account_update_ssh_key_and_set_homedir():
with dataset("home") as ds:
homedir = f"/mnt/{ds}"
with user({
"username": "test",
"full_name": "Test",
"password": "test1234",
"group_create": True,
}) as u:
call("user.update", u["id"], {
"home": homedir,
"sshpubkey": "new",
})
u = call("user.get_instance", u["id"])
assert u["sshpubkey"] == "new"
def test_account_sets_ssh_key_on_user_create():
with dataset("home") as ds:
homedir = f"/mnt/{ds}"
with user({
"username": "test",
"full_name": "Test",
"home": homedir,
"password": "test1234",
"group_create": True,
"sshpubkey": "old",
}):
assert ssh(f"cat {homedir}/test/.ssh/authorized_keys") == "old\n"
def test_account_delete_ssh_key_on_user_delete():
with dataset("home") as ds:
homedir = f"/mnt/{ds}"
with user({
"username": "test",
"full_name": "Test",
"home": homedir,
"password": "test1234",
"group_create": True,
"sshpubkey": "old",
}) as u:
call("user.delete", u["id"])
assert ssh(f"cat {homedir}/test/.ssh/authorized_keys", check=False) == ""
| 2,352 | Python | .py | 65 | 24.876923 | 85 | 0.489661 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,203 | test_network_configuration.py | truenas_middleware/tests/api2/test_network_configuration.py | from middlewared.test.integration.utils import call, ssh
from auto_config import ha
NEW_HOSTNAME = 'dummy123'
def fetch_hostname():
name = ssh('hostname').strip()
if ha:
return name.removesuffix('-nodea').removesuffix('-nodeb')
return name
def config_read_hostname():
config = call('network.configuration.config')
if ha:
return config['hostname_virtual']
else:
return config['hostname']
def config_set_hostname(name):
if ha:
payload = {'hostname': f'{name}-nodea',
'hostname_b': f'{name}-nodeb',
'hostname_virtual': name}
else:
payload = {'hostname': name}
call('network.configuration.update', payload)
def test_changing_hostname():
current_hostname = config_read_hostname()
config_set_hostname(NEW_HOSTNAME)
try:
assert fetch_hostname() == NEW_HOSTNAME
finally:
config_set_hostname(current_hostname)
assert fetch_hostname() == current_hostname
| 1,010 | Python | .py | 30 | 27.166667 | 65 | 0.656347 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,204 | test_iscsi_target_crud_roles.py | truenas_middleware/tests/api2/test_iscsi_target_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.target.query", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.target.create", role, False)
common_checks(unprivileged_user_fixture, "iscsi.target.update", role, False)
common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_TARGET_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.target.create", role, True)
common_checks(unprivileged_user_fixture, "iscsi.target.update", role, True)
common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, True)
| 1,178 | Python | .py | 15 | 75.2 | 106 | 0.771626 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,205 | test_user_ssh_password.py | truenas_middleware/tests/api2/test_user_ssh_password.py | import pytest
from middlewared.test.integration.assets.account import user, group
from middlewared.test.integration.utils import call, ssh
@pytest.mark.parametrize("ssh_password_enabled", [True, False])
def test_user_ssh_password_enabled(ssh_password_enabled):
with user({
"username": "test",
"full_name": "Test",
"group_create": True,
"home": f"/nonexistent",
"password": "test1234",
"ssh_password_enabled": ssh_password_enabled,
}):
result = ssh("whoami", check=False, complete_response=True, user="test",
password="test1234")
if ssh_password_enabled:
assert "test" in result["output"]
else:
assert "Permission denied" in result["stderr"]
@pytest.fixture(scope="module")
def group1_with_user():
with group({"name": "group1"}) as g1:
with user({
"username": "test",
"full_name": "Test",
"group_create": True,
"groups": [g1["id"]],
"home": f"/nonexistent",
"password": "test1234",
}):
yield
@pytest.mark.parametrize("ssh_password_enabled", [True, False])
def test_group_ssh_password_enabled(group1_with_user, ssh_password_enabled):
call("ssh.update", {"password_login_groups": ["group1"] if ssh_password_enabled else []})
result = ssh("whoami", check=False, complete_response=True, user="test",
password="test1234")
if ssh_password_enabled:
assert "test" in result["output"]
else:
assert "Permission denied" in result["stderr"]
| 1,614 | Python | .py | 40 | 32.2 | 93 | 0.615581 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,206 | test_ftp_crud_roles.py | truenas_middleware/tests/api2/test_ftp_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_FTP_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "ftp.config", role, True, valid_role_exception=False)
common_checks(unprivileged_user_fixture, "ftp.connection_count", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_FTP_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "ftp.update", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_FTP_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "ftp.update", role, True)
common_checks(
unprivileged_user_fixture, "service.start", role, True, method_args=["ftp"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.restart", role, True, method_args=["ftp"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.reload", role, True, method_args=["ftp"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.stop", role, True, method_args=["ftp"], valid_role_exception=False
)
| 1,395 | Python | .py | 24 | 53.5 | 113 | 0.73607 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,207 | test_websocket_interface.py | truenas_middleware/tests/api2/test_websocket_interface.py | from auto_config import interface
from middlewared.test.integration.utils import call
def test_websocket_interface():
"""This tests to ensure we return the interface name
by which the websocket connection has been established."""
assert call("interface.websocket_interface")["id"] == interface
| 308 | Python | .py | 6 | 48 | 67 | 0.783333 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,208 | test_apps_images_roles.py | truenas_middleware/tests/api2/test_apps_images_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
('app.image.query', 'APPS_READ', True, False),
('app.image.query', 'APPS_WRITE', True, False),
('app.image.query', 'DOCKER_READ', False, False),
('app.image.pull', 'APPS_READ', False, False),
('app.image.pull', 'APPS_WRITE', True, False),
('app.image.delete', 'APPS_READ', False, False),
('app.image.delete', 'APPS_WRITE', True, True),
))
def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
| 739 | Python | .py | 13 | 53.153846 | 113 | 0.70816 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,209 | test_435_smb_registry.py | truenas_middleware/tests/api2/test_435_smb_registry.py | import contextlib
import os
import pytest
from middlewared.service_exception import ValidationError
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, ssh
DATASET_NAME = 'smb-reg'
SHARES = [f'REGISTRYTEST_{i}' for i in range(0, 5)]
PRESETS = [
"DEFAULT_SHARE",
"ENHANCED_TIMEMACHINE",
"MULTI_PROTOCOL_NFS",
"PRIVATE_DATASETS",
"WORM_DROPBOX"
]
DETECTED_PRESETS = None
"""
Note: following sample auxiliary parameters and comments were
provided by a community member for testing. They do not represent
the opinion or recommendation of iXsystems.
"""
SAMPLE_AUX = [
'follow symlinks = yes ',
'veto files = /.windows/.mac/.zfs/',
'# needed explicitly for each share to prevent default being set',
'admin users = MY_ACCOUNT',
'## NOTES:', '',
"; aio-fork might cause smbd core dump/signal 6 in log in v11.1- see bug report [https://redmine.ixsystems.com/issues/27470]. Looks helpful but disabled until clear if it's responsible.", '', '',
'### VFS OBJECTS (shadow_copy2 not included if no periodic snaps, so do it manually)', '',
'# Include recycle, crossrename, and exclude readonly, as share=RW', '',
'#vfs objects = zfs_space zfsacl winmsa streams_xattr recycle shadow_copy2 crossrename aio_pthread', '',
'vfs objects = aio_pthread streams_xattr shadow_copy_zfs acl_xattr crossrename winmsa recycle', '',
'# testing without shadow_copy2', '',
'valid users = MY_ACCOUNT @ALLOWED_USERS',
'invalid users = root anonymous guest',
'hide dot files = yes',
]
SAMPLE_OPTIONS = [
'mangled names = no',
'dos charset = CP850',
'unix charset = UTF-8',
'strict sync = no',
'',
'min protocol = SMB2',
'vfs objects = fruit streams_xattr ',
'fruit:model = MacSamba', 'fruit:posix_rename = yes ',
'fruit:veto_appledouble = no',
'fruit:wipe_intentionally_left_blank_rfork = yes ',
'fruit:delete_empty_adfiles = yes ',
'',
'fruit:locking=none',
'fruit:metadata=netatalk',
'fruit:resource=file',
'streams_xattr:prefix=user.',
'streams_xattr:store_stream_type=no',
'strict locking=auto',
'# oplocks=no # breaks Time Machine',
' level2 oplocks=no',
'# spotlight=yes # invalid without further config'
]
@contextlib.contextmanager
def create_smb_share(path, share_name, mkdir=False, options=None):
cr_opts = options or {}
if mkdir:
call('filesystem.mkdir', path)
with smb_share(path, share_name, cr_opts) as share:
yield share
@contextlib.contextmanager
def setup_smb_shares(mountpoint):
SHARE_DICT = {}
for share in SHARES:
share_path = os.path.join(mountpoint, share)
call('filesystem.mkdir', share_path)
new_share = call('sharing.smb.create', {
'comment': 'My Test SMB Share',
'name': share,
'home': False,
'path': share_path,
})
SHARE_DICT[share] = new_share['id']
try:
yield SHARE_DICT
finally:
for share_id in SHARE_DICT.values():
call('sharing.smb.delete', share_id)
@pytest.fixture(scope='module')
def setup_for_tests():
with dataset(DATASET_NAME, data={'share_type': 'SMB'}) as ds:
smb_registry_mp = os.path.join('/mnt', ds)
call('filesystem.setperm', {
'path': smb_registry_mp,
'mode': '777',
'options': {'stripacl': True, 'recursive': True}
}, job=True)
with setup_smb_shares(smb_registry_mp) as shares:
yield (smb_registry_mp, ds, shares)
@pytest.fixture(scope='module')
def share_presets():
yield call('sharing.smb.presets')
def test__setup_for_tests(setup_for_tests):
reg_shares = call('sharing.smb.reg_listshares')
for share in SHARES:
assert share in reg_shares
@pytest.mark.parametrize('smb_share', SHARES)
def test__rename_shares(setup_for_tests, smb_share):
mp, ds, SHARE_DICT = setup_for_tests
call('sharing.smb.update', SHARE_DICT[smb_share], {
'name': f'NEW_{smb_share}'
})
def test__renamed_shares_in_registry(setup_for_tests):
"""
Share renames need to be explicitly tested because
it will actually result in share being removed from
registry and re-added with different name.
"""
reg_shares = call('sharing.smb.reg_listshares')
for share in SHARES:
assert f'NEW_{share}' in reg_shares
assert len(reg_shares) == len(SHARES)
def check_aux_param(param, share, expected, fruit_enable=False):
val = call('smb.getparm', param, share)
if param == 'vfs objects':
expected_vfs_objects = expected.split()
# We have to override someone's poor life choices and insert
# vfs_fruit so that they don't have mysteriously broken time
# machine shares
if fruit_enable:
expected_vfs_objects.append('fruit')
assert set(expected_vfs_objects) == set(val)
else:
assert val == expected
@pytest.mark.parametrize('preset', PRESETS)
def test__test_presets(setup_for_tests, share_presets, preset):
"""
This test iterates through SMB share presets,
applies them to a single share, and then validates
that the preset was applied correctly.
In case of bool in API, simple check that appropriate
value is set in return from sharing.smb.update will
be sufficient. In case of auxiliary parameters, we
need to be a bit more thorough. The preset will not
be reflected in returned auxsmbconf and so we'll need
to directly reach out and run smb.getparm.
"""
mp, ds, SHARE_DICT = setup_for_tests
if 'TIMEMACHINE' in preset:
call('smb.update', {'aapl_extensions': True})
to_test = share_presets[preset]['params']
to_test_aux = to_test['auxsmbconf']
new_conf = call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], {
'purpose': preset
})
for entry in to_test_aux.splitlines():
aux, val = entry.split('=', 1)
check_aux_param(aux.strip(), new_conf['name'], val.strip())
for k in to_test.keys():
if k == "auxsmbconf":
continue
assert to_test[k] == new_conf[k]
def test__reset_smb(setup_for_tests):
"""
Remove all parameters that might turn us into
a MacOS-style SMB server (fruit).
"""
mp, ds, SHARE_DICT = setup_for_tests
call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], {
"purpose": "NO_PRESET",
"timemachine": False
})
call('smb.update', {'aapl_extensions': False})
def test__test_aux_param_on_update(setup_for_tests):
SHARE_DICT = setup_for_tests[2]
share_id = SHARE_DICT['REGISTRYTEST_0']
share = call('sharing.smb.query', [['id', '=', share_id]], {'get': True})
old_aux = share['auxsmbconf']
results = call('sharing.smb.update', share_id, {
'auxsmbconf': '\n'.join(SAMPLE_AUX)
})
new_aux = results['auxsmbconf']
new_name = results['name']
ncomments_sent = 0
ncomments_recv = 0
for entry in old_aux.splitlines():
"""
Verify that aux params from last preset applied
are still in effect. Parameters included in
SAMPLE_AUX will never be in a preset so risk of
collision is minimal.
"""
aux, val = entry.split('=', 1)
check_aux_param(aux.strip(), new_name, val.strip())
for entry in new_aux.splitlines():
"""
Verify that non-comment parameters were successfully
applied to the running configuration.
"""
if not entry:
continue
if entry.startswith(('#', ';')):
ncomments_recv += 1
continue
aux, val = entry.split('=', 1)
check_aux_param(aux.strip(), new_name, val.strip())
"""
Verify comments aren't being stripped on update
"""
for entry in SAMPLE_AUX:
if entry.startswith(('#', ';')):
ncomments_sent += 1
assert ncomments_sent == ncomments_recv, new_aux
@contextlib.contextmanager
def setup_aapl_extensions(newvalue):
oldvalue = call('smb.config')['aapl_extensions']
try:
if oldvalue != newvalue:
call('smb.update', {'aapl_extensions': newvalue})
yield
finally:
if oldvalue != newvalue:
call('smb.update', {'aapl_extensions': oldvalue})
@pytest.fixture(scope='function')
def setup_tm_share(setup_for_tests):
share_name = 'AUX_CREATE'
path = os.path.join(setup_for_tests[0], share_name)
with setup_aapl_extensions(True):
with create_smb_share(path, share_name, True, {
"home": False,
"purpose": "ENHANCED_TIMEMACHINE",
"auxsmbconf": '\n'.join(SAMPLE_AUX)
}) as s:
yield s
def test__test_aux_param_on_create(share_presets, setup_tm_share):
share = setup_tm_share
new_aux = share['auxsmbconf']
pre_aux = share_presets["ENHANCED_TIMEMACHINE"]["params"]["auxsmbconf"]
ncomments_sent = 0
ncomments_recv = 0
for entry in pre_aux.splitlines():
"""
Verify that aux params from preset were applied
successfully to the running configuration.
"""
aux, val = entry.split('=', 1)
check_aux_param(aux.strip(), share['name'], val.strip())
for entry in new_aux.splitlines():
"""
Verify that non-comment parameters were successfully
applied to the running configuration.
"""
if not entry:
continue
if entry.startswith(('#', ';')):
ncomments_recv += 1
continue
aux, val = entry.split('=', 1)
check_aux_param(aux.strip(), share['name'], val.strip(), True)
"""
Verify comments aren't being stripped on update
"""
for entry in SAMPLE_AUX:
if entry.startswith(('#', ';')):
ncomments_sent += 1
assert ncomments_sent == ncomments_recv, f'new: {new_aux}, sample: {SAMPLE_AUX}'
def test__delete_shares(setup_for_tests):
SHARE_DICT = setup_for_tests[2]
for key in list(SHARE_DICT.keys()):
call('sharing.smb.delete', SHARE_DICT[key])
SHARE_DICT.pop(key)
reg_shares = call('sharing.smb.reg_listshares')
assert len(reg_shares) == 0, str(reg_shares)
share_count = call('sharing.smb.query', [], {'count': True})
assert share_count == 0
"""
Following battery of tests validate behavior of registry
with regard to homes shares
"""
def test__create_homes_share(setup_for_tests):
mp, ds, share_dict = setup_for_tests
home_path = os.path.join(mp, 'HOME_SHARE')
call('filesystem.mkdir', home_path)
new_share = call('sharing.smb.create', {
"comment": "My Test SMB Share",
"path": home_path,
"home": True,
"purpose": "NO_PRESET",
"name": 'HOME_SHARE',
})
share_dict['HOME'] = new_share['id']
reg_shares = call('sharing.smb.reg_listshares')
assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
def test__toggle_homes_share(setup_for_tests):
mp, ds, share_dict = setup_for_tests
try:
call('sharing.smb.update', share_dict['HOME'], {'home': False})
reg_shares = call('sharing.smb.reg_listshares')
assert not any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
finally:
call('sharing.smb.update', share_dict['HOME'], {'home': True})
reg_shares = call('sharing.smb.reg_listshares')
assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
def test__registry_rebuild_homes(setup_for_tests):
"""
Abusive test.
In this test we run behind middleware's back and
delete a our homes share from the registry, and then
attempt to rebuild by registry sync method. This
method is called (among other places) when the CIFS
service reloads.
"""
ssh('net conf delshare HOMES')
call('service.reload', 'cifs')
reg_shares = call('sharing.smb.reg_listshares')
assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
def test__test_smb_options():
"""
Validate that user comments are preserved as-is
"""
new_config = call('smb.update', {'smb_options': '\n'.join(SAMPLE_OPTIONS)})
assert new_config['smb_options'].splitlines() == SAMPLE_OPTIONS
def test__test_invalid_share_aux_param_create(setup_for_tests):
init_share_count = call('sharing.smb.query', [], {'count': True})
with pytest.raises(ValidationError) as ve:
call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0], 'auxsmbconf': 'oplocks = canary'})
assert ve.value.attribute == 'sharingsmb_create.auxsmbconf'
assert init_share_count == call('sharing.smb.query', [], {'count': True})
def test__test_invalid_share_aux_param_update(setup_for_tests):
this_share = call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0]})
try:
with pytest.raises(ValidationError) as ve:
call('sharing.smb.update', this_share['id'], {'auxsmbconf': 'oplocks = canary'})
finally:
call('sharing.smb.delete', this_share['id'])
assert ve.value.attribute == 'sharingsmb_update.auxsmbconf'
| 13,341 | Python | .py | 334 | 33.730539 | 199 | 0.646604 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,210 | test_475_syslog.py | truenas_middleware/tests/api2/test_475_syslog.py | from time import sleep
import pytest
from auto_config import password, user
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
def do_syslog(ident, message, facility='syslog.LOG_USER', priority='syslog.LOG_INFO'):
"""
This generates a syslog message on the TrueNAS server we're currently testing.
We don't need to override IP addr or creds because we are not a syslog target.
"""
cmd = 'python3 -c "import syslog;'
cmd += f'syslog.openlog(ident=\\\"{ident}\\\", facility={facility});'
cmd += f'syslog.syslog({priority},\\\"{message}\\\");syslog.closelog()"'
ssh(cmd)
def check_syslog(log_path, message, target_ip=None, target_user=user, target_passwd=password, timeout=30):
"""
Common function to check whether a particular message exists in a log file.
This will be used to check local and remote syslog servers.
Current implementation performs simple grep through the log file, and so
onus is on test developer to not under-specify `message` in order to avoid
false positives.
"""
target_ip = target_ip or truenas_server.ip
sleep_time = 1
while timeout > 0:
found = ssh(
f'grep -R "{message}" {log_path}',
check=False,
user=target_user,
password=target_passwd,
ip=target_ip
)
if not found:
sleep(sleep_time)
timeout -= sleep_time
else:
return found
@pytest.mark.parametrize('params', [
{
'ident': 'iscsi-scstd',
'msg': 'ZZZZ: random scst test',
'path': '/var/log/scst.log',
},
{
'ident': 'iscsi-scstd',
'msg': 'ZZZZ: random scst test',
'path': '/var/log/scst.log', # This is just to make sure our exclude filter works as intended
},
])
def test_local_syslog_filter(request, params):
"""
This test validates that our syslog-ng filters are correctly placing
messages into their respective paths in /var/log
"""
do_syslog(
params['ident'],
params['msg'],
params.get('facility', 'syslog.LOG_USER'),
params.get('priority', 'syslog.LOG_INFO')
)
assert check_syslog(params['path'], params['msg'], timeout=10)
@pytest.mark.parametrize('log_path', [
'/var/log/messages',
'/var/log/syslog',
'/var/log/daemon.log'
])
def test_filter_leak(request, log_path):
"""
This test validates that our exclude filter works properly and that
particularly spammy applications aren't polluting useful logs.
"""
results = ssh(f'grep -R "ZZZZ:" {log_path}', complete_response=True, check=False)
assert results['result'] is False, str(results['result'])
def test_07_check_can_set_remote_syslog(request):
"""
Basic test to validate that setting a remote syslog target
doesn't break syslog-ng config
"""
try:
data = call('system.advanced.update', {'syslogserver': '127.0.0.1'})
assert data['syslogserver'] == '127.0.0.1'
call('service.restart', 'syslogd', {'silent': False})
finally:
call('system.advanced.update', {'syslogserver': ''})
| 3,214 | Python | .py | 84 | 32.154762 | 106 | 0.651267 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,211 | test_040_ad_user_group_cache.py | truenas_middleware/tests/api2/test_040_ad_user_group_cache.py | #!/usr/bin/env python3
import errno
import pytest
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import SSH_TEST
from auto_config import password, user
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.directory_service import active_directory
from middlewared.test.integration.utils import call
WINBIND_SEPARATOR = "\\"
@pytest.fixture(scope="module")
def do_ad_connection(request):
with active_directory() as ad:
# make sure we are extra sure cache fill complete
cache_fill_job = call(
'core.get_jobs',
[['method', '=', 'directoryservices.cache.refresh_impl']],
{'order_by': ['-id'], 'get': True}
)
if cache_fill_job['state'] == 'RUNNING':
call('core.job_wait', cache_fill_job['id'], job=True)
users = [x['username'] for x in call(
'user.query', [['local', '=', False]],
)]
set_users = set(users)
assert len(set_users) == len(users)
groups = [x['name'] for x in call(
'group.query', [['local', '=', False]],
)]
set_groups = set(groups)
assert len(set_groups) == len(groups)
yield ad | {'users': set_users, 'groups': set_groups}
def get_ad_user_and_group(ad_connection):
WORKGROUP = ad_connection['dc_info']['Pre-Win2k Domain']
domain_prefix = f'{WORKGROUP.upper()}{WINBIND_SEPARATOR}'
ad_user = ad_connection['user_obj']['pw_name']
ad_group = f'{domain_prefix}domain users'
user = call(
'user.query', [['username', '=', ad_user]],
{'get': True}
)
group = call(
'group.query', [['name', '=', ad_group]],
{'get': True}
)
return (user, group)
def test_check_for_ad_users(do_ad_connection):
"""
This test validates that wbinfo -u output matches entries
we get through user.query
"""
cmd = "wbinfo -u"
results = SSH_TEST(cmd, user, password)
assert results['result'], str(results['output'])
wbinfo_entries = set(results['stdout'].splitlines())
assert wbinfo_entries == do_ad_connection['users']
def test_check_for_ad_groups(do_ad_connection):
"""
This test validates that wbinfo -g output matches entries
we get through group.query
"""
cmd = "wbinfo -g"
results = SSH_TEST(cmd, user, password)
assert results['result'], str(results['output'])
wbinfo_entries = set(results['stdout'].splitlines())
assert wbinfo_entries == do_ad_connection['groups']
def test_check_directoryservices_cache_refresh(do_ad_connection):
"""
This test validates that middleware can successfully rebuild the
directory services cache from scratch using the public API.
This currently happens once per 24 hours. Result of failure here will
be lack of users/groups visible in webui.
"""
# Cache resides in tdb files. Remove the files to clear cache.
cmd = 'rm -f /root/tdb/persistent/*'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
# directoryservices.cache_refresh job causes us to rebuild / refresh LDAP / AD users.
call('directoryservices.cache.refresh_impl', job=True)
users = set([x['username'] for x in call(
'user.query', [['local', '=', False]]
)])
assert users == do_ad_connection['users']
groups = set([x['name'] for x in call(
'group.query', [['local', '=', False]],
)])
assert groups == do_ad_connection['groups']
def test_check_lazy_initialization_of_users_and_groups_by_name(do_ad_connection):
"""
When users explicitly search for a directory service or other user
by name or id we should hit pwd and grp modules and synthesize a
result if the user / group is not in the cache. This special behavior
only occurs when single filter of "name =" or "id =". So after the
initial query that should result in insertion, we add a second filter
to only hit the cache. Code paths are slightly different for lookups
by id or by name and so they are tested separately.
"""
cmd = 'rm -f /root/tdb/persistent/*'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
assert ad_user['id_type_both'] is True
assert ad_user['immutable'] is True
assert ad_user['local'] is False
assert ad_group['id_type_both'] is True
assert ad_group['local'] is False
cache_names = set([x['username'] for x in call(
'user.query', [['local', '=', False]],
)])
assert cache_names == {ad_user['username']}
cache_names = set([x['name'] for x in call(
'group.query', [['local', '=', False]],
)])
assert cache_names == {ad_group['name']}
def test_check_lazy_initialization_of_users_and_groups_by_id(do_ad_connection):
"""
When users explicitly search for a directory service or other user
by name or id we should hit pwd and grp modules and synthesize a
result if the user / group is not in the cache. This special behavior
only occurs when single filter of "name =" or "id =". So after the
initial query that should result in insertion, we add a second filter
to only hit the cache. Code paths are slightly different for lookups
by id or by name and so they are tested separately.
"""
ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
cmd = 'rm -f /root/tdb/persistent/*'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
call('user.query', [['uid', '=', ad_user['uid']]], {'get': True})
call('group.query', [['gid', '=', ad_group['gid']]], {'get': True})
cache_names = set([x['username'] for x in call(
'user.query', [['local', '=', False]],
)])
assert cache_names == {ad_user['username']}
cache_names = set([x['name'] for x in call(
'group.query', [['local', '=', False]],
)])
assert cache_names == {ad_group['name']}
@pytest.mark.parametrize('op_type', ('UPDATE', 'DELETE'))
def test_update_delete_failures(do_ad_connection, op_type):
ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
for acct, prefix in ((ad_user, 'user'), (ad_group, 'group')):
with pytest.raises(CallError) as ce:
if op_type == 'UPDATE':
call(f'{prefix}.update', acct['id'], {'smb': False})
else:
call(f'{prefix}.delete', acct['id'])
assert ce.value.errno == errno.EPERM
| 6,632 | Python | .py | 151 | 37.933775 | 89 | 0.645924 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,212 | test_account_privilege_role_private_fields.py | truenas_middleware/tests/api2/test_account_privilege_role_private_fields.py | import contextlib
import pytest
from middlewared.test.integration.assets.account import unprivileged_user_client
from middlewared.test.integration.assets.api_key import api_key
from middlewared.test.integration.assets.cloud_backup import task as cloud_backup_task
from middlewared.test.integration.assets.cloud_sync import local_ftp_credential, local_ftp_task
from middlewared.test.integration.assets.crypto import root_certificate_authority
from middlewared.test.integration.assets.datastore import row
from middlewared.test.integration.assets.keychain import ssh_keypair
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, client, mock
REDACTED = "********"
@pytest.fixture(scope="module")
def readonly_client():
with unprivileged_user_client(["READONLY_ADMIN"]) as c:
yield c
@contextlib.contextmanager
def wrap(id):
yield id
@contextlib.contextmanager
def certificateauthority():
with root_certificate_authority("ca_test") as ca:
yield ca["id"]
@contextlib.contextmanager
def cloudbackup():
with local_ftp_credential() as credential:
with dataset("cloud_backup") as local_dataset:
with mock("cloud_backup.ensure_initialized", return_value=None):
with cloud_backup_task({
"path": f"/mnt/{local_dataset}",
"credentials": credential["id"],
"attributes": {
"folder": "",
},
"password": "test",
}) as task:
yield task["id"]
@contextlib.contextmanager
def cloudsync_credential():
with local_ftp_credential() as credential:
yield credential["id"]
@contextlib.contextmanager
def cloudsync():
with local_ftp_task() as task:
yield task["id"]
@contextlib.contextmanager
def disk():
disks = call("disk.query")
yield disks[0]["identifier"]
@contextlib.contextmanager
def dns_authenticator():
with row(
"system.acmednsauthenticator",
{
"authenticator": "cloudflare",
"name": "test",
"attributes": {
"api_key": "key",
"api_token": "token",
},
},
) as id:
yield id
@contextlib.contextmanager
def idmap():
with row(
"directoryservice.idmap_domain",
{
"name": "test",
"dns_domain_name": "test",
"range_low": 1000,
"range_high": 1001,
"idmap_backend": "LDAP",
"options": {
"ldap_base_dn": "cn=BASEDN",
"ldap_user_dn": "cn=USERDN",
"ldap_url": "ldap://127.0.0.1",
"ldap_user_dn_password": "password"
},
},
{"prefix": "idmap_domain_"},
) as id:
yield id
@contextlib.contextmanager
def vm_device():
with row(
"vm.vm",
{
"id": 5,
"name": "",
"memory": 225
}):
with row(
"vm.device",
{
"id": 7,
"dtype": "DISPLAY",
"vm": 5,
"attributes": {
"bind": "127.0.0.1",
"port": 1,
"web_port": 1,
"password": "pass",
}
}
) as id:
yield id
@contextlib.contextmanager
def iscsi_auth():
auth = call("iscsi.auth.create", {
"tag": 1,
"user": "test",
"secret": "secretsecret",
"peeruser": "peeruser",
"peersecret": "peersecretsecret",
})
try:
yield auth["id"]
finally:
call("iscsi.auth.delete", auth["id"])
@contextlib.contextmanager
def keychaincredential():
with ssh_keypair() as k:
yield k["id"]
@contextlib.contextmanager
def vmware():
with row(
"storage.vmwareplugin",
{
"password": "password",
},
) as id:
yield id
@pytest.mark.parametrize("how", ["multiple", "single", "get_instance"])
@pytest.mark.parametrize("service,id,options,redacted_fields", (
("acme.dns.authenticator", dns_authenticator, {}, ["attributes"]),
("certificate", 1, {}, ["privatekey", "issuer"]),
("certificateauthority", certificateauthority, {}, ["privatekey", "issuer"]),
("cloud_backup", cloudbackup, {}, ["credentials.attributes", "password"]),
("cloudsync.credentials", cloudsync_credential, {}, ["attributes"]),
("cloudsync", cloudsync, {}, ["credentials.attributes", "encryption_password"]),
("disk", disk, {"extra": {"passwords": True}}, ["passwd"]),
("idmap", idmap, {}, ["options.ldap_user_dn_password"]),
("iscsi.auth", iscsi_auth, {}, ["secret", "peersecret"]),
("keychaincredential", keychaincredential, {}, ["attributes"]),
("user", 1, {}, ["unixhash", "smbhash"]),
("vmware", vmware, {}, ["password"]),
("vm.device", vm_device, {}, ["attributes.password"]),
))
def test_crud(readonly_client, how, service, id, options, redacted_fields):
identifier = "id" if service != "disk" else "identifier"
with (id() if callable(id) else wrap(id)) as id:
if how == "multiple":
result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], options)[0]
elif how == "single":
result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], {**options, "get": True})
elif how == "get_instance":
result = readonly_client.call(f"{service}.get_instance", id, options)
else:
assert False
for k in redacted_fields:
obj = result
for path in k.split("."):
obj = obj[path]
assert obj == REDACTED, (k, obj, REDACTED)
@pytest.mark.parametrize("service,redacted_fields", (
("system.general", ["ui_certificate"]),
("ldap", ["bindpw"]),
("mail", ["pass", "oauth"]),
("snmp", ["v3_password", "v3_privpassphrase"]),
("truecommand", ["api_key"]),
))
def test_config(readonly_client, service, redacted_fields):
result = readonly_client.call(f"{service}.config")
for k in redacted_fields:
assert result[k] == REDACTED
def test_fields_are_visible_if_has_write_access():
with unprivileged_user_client(["ACCOUNT_WRITE"]) as c:
result = c.call("user.get_instance", 1)
assert result["unixhash"] != REDACTED
def test_fields_are_visible_for_api_key():
with api_key() as key:
with client(auth=None) as c:
assert c.call("auth.login_with_api_key", key)
result = c.call("user.get_instance", 1)
assert result["unixhash"] != REDACTED
def test_vm_display_device(readonly_client):
with vm_device():
result = readonly_client.call("vm.get_display_devices", 5)
assert result[0]["attributes"]["password"] == REDACTED
| 6,965 | Python | .py | 190 | 28.515789 | 112 | 0.58737 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,213 | test_audit_sudo.py | truenas_middleware/tests/api2/test_audit_sudo.py | import contextlib
import secrets
import string
import time
import pytest
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.time_utils import utc_now
from datetime import timezone
EVENT_KEYS = {'timestamp', 'message_timestamp', 'service_data', 'username', 'service', 'audit_id', 'address', 'event_data', 'event', 'session', 'success'}
ACCEPT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'}
REJECT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'reason', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'}
LS_COMMAND = '/bin/ls'
ECHO_COMMAND = '/bin/echo'
SUDO_TO_USER = 'sudo-to-user'
SUDO_TO_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
def get_utc():
utc_time = int(utc_now().replace(tzinfo=timezone.utc).timestamp())
return utc_time
def user_sudo_events(username, count=False):
payload = {
'services': ['SUDO'],
'query-filters': [['username', '=', username]],
}
if count:
payload['query-options'] = {'count': True}
return call('audit.query', payload)
def wait_for_events(username, newcount, retries=20, delay=0.5):
assert retries > 0 and retries <= 20
assert delay >= 0.1 and delay <= 1
while newcount != user_sudo_events(username, True) and retries:
time.sleep(delay)
retries -= 1
return newcount
def assert_accept(event):
assert type(event) is dict
set(event.keys()) == EVENT_KEYS
assert set(event['event_data'].keys()) == {'sudo'}
assert set(event['event_data']['sudo'].keys()) == {'accept'}
assert set(event['event_data']['sudo']['accept'].keys()) == ACCEPT_KEYS
return event['event_data']['sudo']['accept']
def assert_reject(event):
assert type(event) is dict
set(event.keys()) == EVENT_KEYS
assert set(event['event_data'].keys()) == {'sudo'}
assert set(event['event_data']['sudo'].keys()) == {'reject'}
assert set(event['event_data']['sudo']['reject'].keys()) == REJECT_KEYS
return event['event_data']['sudo']['reject']
def assert_timestamp(event, event_data):
"""
NAS-130373: message_timestamp should be UTC
"""
assert type(event) is dict
submit_time = event_data['submit_time']['seconds']
msg_ts = event['message_timestamp']
utc_ts = get_utc()
# Confirm consistency and correctness of timestamps.
# The message_timestamp and the submit_time should be UTC and
# are expected to be mostly the same value. We allow for a generous delta between
# current UTC and the audit message timestamps.
assert abs(utc_ts - msg_ts) < 5, f"utc_ts={utc_ts}, msg_ts={msg_ts}"
assert abs(utc_ts - int(submit_time)) < 5, f"utc_ts={utc_ts}, submit_time={submit_time}"
assert abs(msg_ts - int(submit_time)) < 5, f"msg_ts={msg_ts}, submit_time={submit_time}"
@contextlib.contextmanager
def initialize_for_sudo_tests(username, password, data):
data.update({
'username': username,
'full_name': username,
'group_create': True,
'password': password,
'shell': '/usr/bin/bash',
'ssh_password_enabled': True,
})
with user(data) as newuser:
yield newuser
@pytest.fixture(scope='module')
def sudo_to_user():
with initialize_for_sudo_tests(SUDO_TO_USER, SUDO_TO_PASSWORD, {}) as u:
yield u
class SudoTests:
def generate_command(self, cmd, runuser=None, password=None):
command = ['sudo']
if password:
command.append('-S')
if runuser:
command.extend(['-u', runuser])
command.append(cmd)
return " ".join(command)
def allowed_all(self):
"""All of the sudo commands are allowed"""
# First get a baseline # of events
count = user_sudo_events(self.USER, True)
# Now create an event and do some basic checking
self.sudo_command('ls /etc')
assert count + 1 == wait_for_events(self.USER, count + 1)
event = user_sudo_events(self.USER)[-1]
accept = assert_accept(event)
assert accept['submituser'] == self.USER
assert accept['command'] == LS_COMMAND
assert accept['runuser'] == 'root'
assert accept['runargv'].split(',') == ['ls', '/etc']
# NAS-130373
assert_timestamp(event, accept)
# One more completely unique command
magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20))
self.sudo_command(f'echo {magic}')
assert count + 2 == wait_for_events(self.USER, count + 2)
accept = assert_accept(user_sudo_events(self.USER)[-1])
assert accept['submituser'] == self.USER
assert accept['command'] == ECHO_COMMAND
assert accept['runuser'] == 'root'
assert accept['runargv'].split(',') == ['echo', magic]
# sudo to a non-root user
self.sudo_command('ls /tmp', SUDO_TO_USER)
assert count + 3 == wait_for_events(self.USER, count + 3)
accept = assert_accept(user_sudo_events(self.USER)[-1])
assert accept['submituser'] == self.USER
assert accept['command'] == LS_COMMAND
assert accept['runuser'] == SUDO_TO_USER
assert accept['runargv'].split(',') == ['ls', '/tmp']
def allowed_some(self):
"""Some of the sudo commands are allowed"""
# First get a baseline # of events
count = user_sudo_events(self.USER, True)
# Generate a sudo command that we ARE allowed perform
magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20))
self.sudo_command(f'echo {magic}')
assert count + 1 == wait_for_events(self.USER, count + 1)
accept = assert_accept(user_sudo_events(self.USER)[-1])
assert accept['submituser'] == self.USER
assert accept['command'] == ECHO_COMMAND
assert accept['runuser'] == 'root'
assert accept['runargv'].split(',') == ['echo', magic]
# Generate a sudo command that we are NOT allowed perform
with pytest.raises(AssertionError):
self.sudo_command('ls /etc')
# Returned exception depends upon whether passwd or nopasswd
assert count + 2 == wait_for_events(self.USER, count + 2)
reject = assert_reject(user_sudo_events(self.USER)[-1])
assert reject['submituser'] == self.USER
assert reject['command'] == LS_COMMAND
assert reject['runuser'] == 'root'
assert reject['runargv'].split(',') == ['ls', '/etc']
assert reject['reason'] == 'command not allowed'
def allowed_none(self):
"""None of the sudo commands are allowed"""
# First get a baseline # of events
count = user_sudo_events(self.USER, True)
# Now create an event and do some basic checking to ensure it failed
with pytest.raises(AssertionError) as ve:
self.sudo_command('ls /etc')
assert 'is not allowed to execute ' in str(ve), str(ve)
assert count + 1 == wait_for_events(self.USER, count + 1)
event = user_sudo_events(self.USER)[-1]
reject = assert_reject(event)
assert reject['submituser'] == self.USER
assert reject['command'] == LS_COMMAND
assert reject['runuser'] == 'root'
assert reject['runargv'].split(',') == ['ls', '/etc']
assert reject['reason'] == 'command not allowed'
# NAS-130373
assert_timestamp(event, reject)
class SudoNoPasswd:
def sudo_command(self, cmd, runuser=None):
command = self.generate_command(cmd, runuser)
ssh(command, user=self.USER, password=self.PASSWORD)
class SudoPasswd:
def sudo_command(self, cmd, runuser=None):
command = f'echo {self.PASSWORD} | {self.generate_command(cmd, runuser, self.PASSWORD)}'
ssh(command, user=self.USER, password=self.PASSWORD)
class TestSudoAllowedAllNoPasswd(SudoTests, SudoNoPasswd):
USER = 'sudo-allowed-all-nopw-user'
PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
@pytest.fixture(scope='class')
def create_user(self):
with initialize_for_sudo_tests(self.USER,
self.PASSWORD,
{'sudo_commands_nopasswd': ['ALL']}) as u:
yield u
def test_audit_query(self, sudo_to_user, create_user):
self.allowed_all()
class TestSudoAllowedAllPasswd(SudoTests, SudoPasswd):
USER = 'sudo-allowed-all-pw-user'
PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
@pytest.fixture(scope='class')
def create_user(self):
with initialize_for_sudo_tests(self.USER,
self.PASSWORD,
{'sudo_commands': ['ALL']}) as u:
yield u
def test_audit_query(self, sudo_to_user, create_user):
self.allowed_all()
class TestSudoAllowedNonePasswd(SudoTests, SudoPasswd):
USER = 'sudo-allowed-none-pw-user'
PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
@pytest.fixture(scope='class')
def create_user(self):
with initialize_for_sudo_tests(self.USER, self.PASSWORD, {}) as u:
yield u
def test_audit_query(self, create_user):
self.allowed_none()
class TestSudoAllowedSomeNoPasswd(SudoTests, SudoNoPasswd):
USER = 'sudo-allowed-some-nopw-user'
PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
@pytest.fixture(scope='class')
def create_user(self):
with initialize_for_sudo_tests(self.USER,
self.PASSWORD,
{'sudo_commands_nopasswd': [ECHO_COMMAND]}) as u:
yield u
def test_audit_query(self, create_user):
self.allowed_some()
class TestSudoAllowedSomePasswd(SudoTests, SudoPasswd):
USER = 'sudo-allowed-some-pw-user'
PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
@pytest.fixture(scope='class')
def create_user(self):
with initialize_for_sudo_tests(self.USER,
self.PASSWORD,
{'sudo_commands': [ECHO_COMMAND]}) as u:
yield u
def test_audit_query(self, create_user):
self.allowed_some()
| 10,729 | Python | .py | 223 | 40.053812 | 186 | 0.631942 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,214 | test_snapshot_query.py | truenas_middleware/tests/api2/test_snapshot_query.py | import pytest
from middlewared.test.integration.assets.pool import dataset, pool, another_pool
from middlewared.test.integration.utils import call
@pytest.fixture(scope="module")
def fixture1():
with another_pool():
with dataset("test"):
with dataset("test/test1"):
with dataset("test/test2"):
with dataset("test", pool="test"):
with dataset("test/test1", pool="test"):
with dataset("test/test2", pool="test"):
call(
"zfs.snapshot.create",
{"dataset": f"{pool}/test", "name": "snap-1", "recursive": True},
)
call(
"zfs.snapshot.create",
{"dataset": f"{pool}/test", "name": "snap-2", "recursive": True},
)
call(
"zfs.snapshot.create",
{"dataset": "test/test", "name": "snap-1", "recursive": True},
)
call(
"zfs.snapshot.create",
{"dataset": "test/test", "name": "snap-2", "recursive": True},
)
yield
def test_query_all_names(fixture1):
names = {
snapshot["name"]
for snapshot in call("zfs.snapshot.query", [], {"select": ["name"]})
}
assert {f"{pool}/test@snap-1", f"{pool}/test@snap-2", f"{pool}/test/test1@snap-1", f"{pool}/test/test1@snap-2",
f"{pool}/test/test2@snap-1", f"{pool}/test/test2@snap-2",
f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1", f"test/test/test1@snap-2",
f"test/test/test2@snap-1", f"test/test/test2@snap-2"}.issubset(names)
@pytest.mark.parametrize("filters,names", [
([["pool", "=", "test"]], {f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1",
f"test/test/test1@snap-2", f"test/test/test2@snap-1", f"test/test/test2@snap-2"}),
([["dataset", "=", f"{pool}/test"]], {f"{pool}/test@snap-1", f"{pool}/test@snap-2"}),
([["dataset", "in", [f"{pool}/test/test1", "test/test/test2"]]], {f"{pool}/test/test1@snap-1",
f"{pool}/test/test1@snap-2",
f"test/test/test2@snap-1",
f"test/test/test2@snap-2"}),
])
def test_query_names_by_pool_or_dataset(fixture1, filters, names):
assert {
snapshot["name"]
for snapshot in call("zfs.snapshot.query", filters, {"select": ["name"]})
} == names
| 2,976 | Python | .py | 52 | 36.288462 | 115 | 0.442044 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,215 | test_iscsi_host_crud_roles.py | truenas_middleware/tests/api2/test_iscsi_host_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.host.query", role, True, valid_role_exception=False)
common_checks(unprivileged_user_fixture, "iscsi.host.get_initiators", role, True)
common_checks(unprivileged_user_fixture, "iscsi.host.get_targets", role, True)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.host.create", role, False)
common_checks(unprivileged_user_fixture, "iscsi.host.update", role, False)
common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, False)
common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_HOST_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.host.create", role, True)
common_checks(unprivileged_user_fixture, "iscsi.host.update", role, True)
common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, True)
common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, True)
| 1,500 | Python | .py | 19 | 75.263158 | 104 | 0.766621 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,216 | test_serial_consoles.py | truenas_middleware/tests/api2/test_serial_consoles.py | import pytest
from middlewared.test.integration.utils import call, ssh
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
def test_enabling_serial_port():
ports = call('system.advanced.serial_port_choices')
assert 'ttyS0' in ports, ports
for port in ports:
test_config = {'serialconsole': True, 'serialport': port}
config = call('system.advanced.update', test_config)
for k, v in test_config.items():
assert config[k] == v, config
assert_serial_port_configuration({p: p == port for p in ports})
def test_disabling_serial_port():
ports = call('system.advanced.serial_port_choices')
assert 'ttyS0' in ports, ports
for port in ports:
test_config = {'serialconsole': False, 'serialport': port}
config = call('system.advanced.update', test_config)
for k, v in test_config.items():
assert config[k] == v, config
assert_serial_port_configuration({p: False for p in ports})
def assert_serial_port_configuration(ports):
for port, enabled in ports.items():
is_enabled = ssh(f'systemctl is-enabled serial-getty@{port}.service', False).strip() == 'enabled'
assert is_enabled is enabled, f'{port!r} enabled assertion failed: {is_enabled!r} != {enabled!r}'
is_enabled = ssh(f'systemctl is-active --quiet serial-getty@{port}.service', False, True)['returncode'] == 0
assert is_enabled is enabled, f'{port!r} active assertion failed: {is_enabled!r} != {enabled!r}'
| 1,528 | Python | .py | 30 | 44.666667 | 116 | 0.678091 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,217 | test_lock.py | truenas_middleware/tests/api2/test_lock.py | import time
import pytest
from middlewared.test.integration.utils import client, mock
@pytest.mark.flaky(reruns=5, reruns_delay=5)
def test_no_lock():
with mock("test.test1", """
from middlewared.service import lock
async def mock(self, *args):
import asyncio
await asyncio.sleep(5)
"""):
start = time.monotonic()
with client() as c:
c1 = c.call("test.test1", background=True, register_call=True)
c2 = c.call("test.test1", background=True, register_call=True)
c.wait(c1, timeout=10)
c.wait(c2)
assert time.monotonic() - start < 6
@pytest.mark.flaky(reruns=5, reruns_delay=5)
def test_async_lock():
with mock("test.test1", """
from middlewared.service import lock
@lock("test")
async def mock(self, *args):
import asyncio
await asyncio.sleep(5)
"""):
start = time.monotonic()
with client() as c:
c1 = c.call("test.test1", background=True, register_call=True)
c2 = c.call("test.test1", background=True, register_call=True)
c.wait(c1)
c.wait(c2)
assert time.monotonic() - start >= 10
@pytest.mark.flaky(reruns=5, reruns_delay=5)
def test_threading_lock():
with mock("test.test1", """
from middlewared.service import lock
@lock("test")
def mock(self, *args):
import time
time.sleep(5)
"""):
start = time.monotonic()
with client() as c:
c1 = c.call("test.test1", background=True, register_call=True)
c2 = c.call("test.test1", background=True, register_call=True)
c.wait(c1)
c.wait(c2)
assert time.monotonic() - start >= 10
| 1,811 | Python | .py | 50 | 27.36 | 74 | 0.585436 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,218 | test_cloud_sync_crud.py | truenas_middleware/tests/api2/test_cloud_sync_crud.py | import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.cloud_sync import credential as _credential, task as _task
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
try:
from config import (
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
AWS_BUCKET
)
except ImportError:
Reason = 'AWS credential are missing in config.py'
pytestmark = pytest.mark.skip(reason=Reason)
@pytest.fixture(scope='module')
def credentials():
with _credential({
"provider": "S3",
"attributes": {
"access_key_id": AWS_ACCESS_KEY_ID,
"secret_access_key": AWS_SECRET_ACCESS_KEY,
}
}) as c:
yield c
@pytest.fixture(scope='module')
def task(credentials):
with dataset("cloudsync_local") as local_dataset:
with _task({
"direction": "PUSH",
"transfer_mode": "COPY",
"path": f"/mnt/{local_dataset}",
"credentials": credentials["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "",
},
}) as t:
yield t
def test_update_cloud_credentials(credentials):
call("cloudsync.credentials.update", credentials["id"], {
"attributes": {
"access_key_id": "garbage",
"secret_access_key": AWS_SECRET_ACCESS_KEY,
}
})
assert call("cloudsync.credentials.get_instance", credentials["id"])["attributes"]["access_key_id"] == "garbage"
call("cloudsync.credentials.update", credentials["id"], {
"attributes": {
"access_key_id": AWS_ACCESS_KEY_ID,
"secret_access_key": AWS_SECRET_ACCESS_KEY,
},
})
def test_update_cloud_sync(task):
assert call("cloudsync.update", task["id"], {"direction": "PULL"})
def test_run_cloud_sync(task):
call("cloudsync.sync", task["id"], job=True)
print(ssh(f"ls {task['path']}"))
assert ssh(f"cat {task['path']}/freenas-test.txt") == "freenas-test\n"
def test_restore_cloud_sync(task):
restore_task = call("cloudsync.restore", task["id"], {
"transfer_mode": "COPY",
"path": task["path"],
})
call("cloudsync.delete", restore_task["id"])
def test_delete_cloud_credentials_error(credentials, task):
with pytest.raises(CallError) as ve:
call("cloudsync.credentials.delete", credentials["id"])
assert "This credential is used by cloud sync task" in ve.value.errmsg
| 2,569 | Python | .py | 68 | 30.602941 | 116 | 0.629585 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,219 | test_account_root_password.py | truenas_middleware/tests/api2/test_account_root_password.py | import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.utils import call, client
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
def test_root_password_disabled():
with client() as c:
root_user_id = c.call(
"datastore.query",
"account.bsdusers",
[["username", "=", "root"]],
{"get": True, "prefix": "bsdusr_"},
)["id"]
c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": True})
c.call("etc.generate", "user")
try:
alerts = c.call("alert.list")
assert any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts
builtin_administrators_group_id = c.call(
"datastore.query",
"account.bsdgroups",
[["group", "=", "builtin_administrators"]],
{"get": True, "prefix": "bsdgrp_"},
)["id"]
with dataset(f"admin_homedir") as homedir:
events = []
def callback(type, **message):
events.append((type, message))
c.subscribe("user.web_ui_login_disabled", callback, sync=True)
with user({
"username": "admin",
"full_name": "Admin",
"group_create": True,
"groups": [builtin_administrators_group_id],
"home": f"/mnt/{homedir}",
"password": "test1234",
}, get_instance=False):
alerts = c.call("alert.list")
assert not any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts
# Root should not be able to log in with password anymore
with pytest.raises(CallError):
call("system.info", client_kwargs=dict(auth_required=False))
assert events[0][1]["fields"]["usernames"] == ["admin"]
c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False})
c.call("etc.generate", "user")
finally:
# In case of a failure
c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False})
c.call("etc.generate", "user")
| 2,484 | Python | .py | 49 | 36.306122 | 117 | 0.540239 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,220 | test_boot_scrub.py | truenas_middleware/tests/api2/test_boot_scrub.py | from time import time, sleep
from middlewared.test.integration.utils import call
def test_get_boot_scrub(request):
job_id = call("boot.scrub")
stop_time = time() + 600
while True:
job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
if job["state"] in ("RUNNING", "WAITING"):
if stop_time <= time():
assert False, "Job Timeout\n\n" + job
break
sleep(1)
else:
assert job["state"] == "SUCCESS", job
break
| 535 | Python | .py | 15 | 26.733333 | 73 | 0.537718 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,221 | test_audit_api_key.py | truenas_middleware/tests/api2/test_audit_api_key.py | import datetime
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_method_calls
API_KEY_NAME = 'AUDIT_API_KEY'
def test_api_key_audit():
payload = {'username': 'root', 'name': API_KEY_NAME}
payload2 = {'expires_at': None}
api_key_id = None
try:
with expect_audit_method_calls([{
'method': 'api_key.create',
'params': [payload],
'description': f'Create API key {API_KEY_NAME}',
}]):
api_key = call('api_key.create', payload)
api_key_id = api_key['id']
# Set expiration 60 minutes in future
payload2['expires_at'] = api_key['created_at'] + datetime.timedelta(minutes=60)
with expect_audit_method_calls([{
'method': 'api_key.update',
'params': [api_key_id, payload2],
'description': f'Update API key {API_KEY_NAME}',
}]):
call('api_key.update', api_key_id, payload2)
finally:
if api_key_id:
with expect_audit_method_calls([{
'method': 'api_key.delete',
'params': [api_key_id],
'description': f'Delete API key {API_KEY_NAME}',
}]):
call('api_key.delete', api_key_id)
| 1,318 | Python | .py | 32 | 31.1875 | 91 | 0.56651 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,222 | test_draid_record_and_block_size.py | truenas_middleware/tests/api2/test_draid_record_and_block_size.py | import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
from auto_config import ha
@pytest.fixture(scope='module')
def check_unused_disks():
if len(call('disk.get_unused')) < 4:
pytest.skip('Insufficient number of disks to perform these tests')
@pytest.fixture(scope='module')
def draid_pool():
unused_disks = call('disk.get_unused')
with another_pool({
'name': 'test_draid_pool',
'topology': {
'data': [{
'disks': [disk['name'] for disk in unused_disks[:2]],
'type': 'DRAID1',
'draid_data_disks': 1
}],
},
'allow_duplicate_serials': True,
}) as pool_name:
yield pool_name
@pytest.fixture(scope='module')
def mirror_pool():
unused_disks = call('disk.get_unused')
with another_pool({
'name': 'test_mirror_pool',
'topology': {
'data': [{
'disks': [disk['name'] for disk in unused_disks[:2]],
'type': 'MIRROR',
}],
},
'allow_duplicate_serials': True,
}) as pool_name:
yield pool_name
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'record_size', ['1M']
)
def test_draid_pool_default_record_size(draid_pool, record_size):
assert call('pool.dataset.get_instance', draid_pool['name'])['recordsize']['value'] == record_size
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'record_size', ['128K']
)
def test_non_draid_pool_default_record_size(mirror_pool, record_size):
assert call('pool.dataset.get_instance', mirror_pool['name'])['recordsize']['value'] == record_size
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'update_recordsize, validation_error', [
('512K', False),
('256K', False),
('128K', False),
('2M', False),
('512', True),
('4K', True),
('64K', True),
]
)
def test_draid_root_dataset_valid_recordsize(draid_pool, update_recordsize, validation_error):
if not validation_error:
assert call(
'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize}
)['recordsize']['value'] == update_recordsize
else:
with pytest.raises(ValidationErrors) as ve:
call('pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize})
assert ve.value.errors[0].attribute == 'pool_dataset_update.recordsize'
assert ve.value.errors[0].errmsg == f"'{update_recordsize}' is an invalid recordsize."
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'update_recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K']
)
def test_non_draid_root_dataset_valid_recordsize(mirror_pool, update_recordsize):
assert call(
'pool.dataset.update', mirror_pool['name'], {'recordsize': update_recordsize}
)['recordsize']['value'] == update_recordsize
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'recordsize, validation_error', [
('512K', False),
('256K', False),
('128K', False),
('2M', False),
('512', True),
('4K', True),
('64K', True),
]
)
def test_draid_dataset_valid_recordsize(draid_pool, recordsize, validation_error):
if not validation_error:
assert call(
'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize}
)['recordsize']['value'] == recordsize
else:
with pytest.raises(ValidationErrors) as ve:
call('pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}',
'recordsize': recordsize})
assert ve.value.errors[0].attribute == 'pool_dataset_create.recordsize'
assert ve.value.errors[0].errmsg == f"'{recordsize}' is an invalid recordsize."
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K']
)
def test_non_draid_dataset_valid_recordsize(mirror_pool, recordsize):
assert call(
'pool.dataset.create', {'name': f'{mirror_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize}
)['recordsize']['value'] == recordsize
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'blocksize,validation_error', [
('16K', True),
('32K', False),
]
)
def test_draid_zvol_valid_blocksize(draid_pool, blocksize, validation_error):
if not validation_error:
assert call(
'pool.dataset.create', {
'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
'volblocksize': blocksize, 'type': 'VOLUME',
}
)['volblocksize']['value'] == blocksize
else:
with pytest.raises(ValidationErrors) as ve:
call(
'pool.dataset.create', {
'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
'volblocksize': blocksize, 'type': 'VOLUME'
}
)
assert ve.value.errors[0].attribute == 'pool_dataset_create.volblocksize'
assert ve.value.errors[0].errmsg == 'Volume block size must be greater than or equal to 32K for dRAID pools'
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'blocksize', ['16K', '32K']
)
def test_non_draid_zvol_valid_blocksize(mirror_pool, blocksize):
assert call(
'pool.dataset.create', {
'name': f'{mirror_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
'volblocksize': blocksize, 'type': 'VOLUME',
}
)['volblocksize']['value'] == blocksize
@pytest.mark.usefixtures('check_unused_disks')
@pytest.mark.parametrize(
'update_recordsize, default_record_size', [
('512K', '1M'),
]
)
def test_draid_dataset_default_recordsize(draid_pool, update_recordsize, default_record_size):
assert call(
'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize}
)['recordsize']['value'] == update_recordsize
assert call(
'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset'}
)['recordsize']['value'] == default_record_size
| 6,549 | Python | .py | 160 | 33.8875 | 120 | 0.628086 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,223 | test_260_iscsi.py | truenas_middleware/tests/api2/test_260_iscsi.py | import random
import string
from time import sleep
import pytest
from assets.websocket.iscsi import initiator, portal, target, target_extent_associate
from auto_config import hostname, pool_name
from functions import SSH_TEST
from middlewared.test.integration.assets.iscsi import iscsi_extent
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
try:
from config import BSD_HOST, BSD_PASSWORD, BSD_USERNAME
have_bsd_host_cfg = True
except ImportError:
have_bsd_host_cfg = False
pytestmark = pytest.mark.skipif(not have_bsd_host_cfg, reason='BSD host configuration is missing in ixautomation.conf')
digit = ''.join(random.choices(string.digits, k=2))
file_mountpoint = f'/tmp/iscsi-file-{hostname}'
zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}'
target_name = f"target{digit}"
basename = "iqn.2005-10.org.freenas.ctl"
zvol_name = f"ds{digit}"
zvol = f'{pool_name}/{zvol_name}'
zvol_url = zvol.replace('/', '%2F')
def has_session_present(target):
results = call('iscsi.global.sessions', [['target', '=', target]])
assert isinstance(results, list), results
return bool(len(results))
def waiting_for_iscsi_to_disconnect(base_target, wait):
timeout = 0
# First check that the client no longer sees the target logged in
while timeout < wait:
cmd = 'iscsictl -L'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
if base_target not in results['output']:
break
timeout += 1
sleep(1)
# Next check that the SCALE does not see a session to the target
while timeout < wait:
if not has_session_present(base_target):
return True
timeout += 1
sleep(1)
else:
return False
def wait_for_iscsi_connection_before_grabbing_device_name(iqn, wait=60):
timeout = 0
device_name = ""
while timeout < wait:
cmd = f'iscsictl -L | grep {iqn}'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
if results['result'] and "Connected:" in results['output']:
device_name = results['stdout'].strip().split()[3]
if device_name.startswith('probe'):
timeout += 1
sleep(1)
continue
assert True
break
timeout += 1
sleep(1)
while timeout < wait:
cmd = f'test -e /dev/{device_name}'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
if results['result']:
assert True
break
timeout += 1
sleep(1)
assert timeout < wait, f"Timed out waiting {wait} seconds for {iqn} to surface"
return device_name
@pytest.fixture(scope='module')
def fix_initiator():
with initiator() as config:
yield config
@pytest.fixture(scope='module')
def fix_portal():
with portal() as config:
yield {'portal': config}
@pytest.fixture(scope='module')
def fix_iscsi_enabled():
payload = {"enable": True}
config = call('service.update', 'iscsitarget', payload)
try:
yield config
finally:
payload = {"enable": False}
config = call('service.update', 'iscsitarget', payload)
@pytest.fixture(scope='module')
def fix_iscsi_started(fix_iscsi_enabled):
call('service.start', 'iscsitarget')
sleep(1)
try:
yield
finally:
call('service.stop', 'iscsitarget')
def test_add_iscsi_initiator(fix_initiator):
result = call('iscsi.initiator.query')
assert len(result) == 1, result
assert result[0]['comment'] == 'Default initiator', result
def test_add_iscsi_portal(fix_portal):
result = call('iscsi.portal.query')
assert len(result) == 1, result
assert result[0]['listen'][0]['ip'] == '0.0.0.0', result
def test_enable_iscsi_service(fix_iscsi_enabled):
pass
def test_start_iscsi_service(fix_iscsi_started):
result = call('service.query', [['service', '=', 'iscsitarget']], {'get': True})
assert result["state"] == "RUNNING", result
class FileExtent:
@pytest.fixture(scope='class')
def fix_extent(self):
filepath = f'/mnt/{pool_name}/iscsi_file_extent'
data = {
'type': 'FILE',
'name': 'extent',
'filesize': 536870912,
'path': filepath
}
try:
with iscsi_extent(data) as config:
yield config
finally:
ssh(f'rm -f {filepath}')
class ZvolExtent:
@pytest.fixture(scope='class')
def fix_extent(self):
zvol_data = {
'type': 'VOLUME',
'volsize': 655360,
'volblocksize': '16K'
}
with dataset(zvol_name, zvol_data, pool_name):
extent_data = {
'type': 'DISK',
'disk': f'zvol/{zvol}',
'name': 'zvol_extent',
}
with iscsi_extent(extent_data) as config:
yield config
class Target:
@pytest.fixture(scope='class')
def fix_target(self, fix_portal):
result = {}
result.update(fix_portal)
with target(self.TARGET_NAME, [{'portal': fix_portal['portal']['id']}]) as config:
result.update({'target': config})
result.update({'iqn': f'{basename}:{self.TARGET_NAME}'})
yield result
@pytest.fixture(scope='class')
def fix_targetextent(self, fix_target, fix_extent):
result = {}
result.update(fix_target)
result.update(fix_extent)
with target_extent_associate(fix_target['target']['id'], fix_extent['id'], 1) as config:
result.update({'targetextent': config})
yield result
def test_add_iscsi_target(self, fix_target):
result = call('iscsi.target.query', [['name', '=', fix_target['target']['name']]])
assert len(result) == 1, result
def test_add_iscsi_file_extent(self, fix_extent):
result = call('iscsi.extent.query')
assert len(result) == 1, result
def test_associate_iscsi_target(self, fix_targetextent):
result = call('iscsi.targetextent.query')
assert len(result) == 1, result
class LoggedInTarget:
@pytest.fixture(scope='class')
def fix_connect_to_target(self, fix_iscsi_started, fix_targetextent):
iqn = fix_targetextent['iqn']
cmd = f'iscsictl -A -p {truenas_server.ip}:3260 -t {iqn}'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
try:
yield fix_targetextent
finally:
cmd = f'iscsictl -R -t {iqn}'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
# Currently FreeBSD (13.1-RELEASE-p5) does *not* issue a LOGOUT (verified by
# network capture), so give the target time to react. SCST will log an error, e.g.
# iscsi-scst: ***ERROR***: Connection 00000000e749085f with initiator iqn.1994-09.org.freebsd:freebsd13.local unexpectedly closed!
assert waiting_for_iscsi_to_disconnect(f'{iqn}', 30)
@pytest.fixture(scope='class')
def fix_target_surfaced(self, fix_connect_to_target):
result = {}
result.update(fix_connect_to_target)
iqn = fix_connect_to_target['iqn']
device_name = wait_for_iscsi_connection_before_grabbing_device_name(iqn)
assert device_name != ""
result.update({'device': device_name})
yield result
def test_connect_to_iscsi_target(self, fix_connect_to_target):
pass
def test_target_surfaced(self, fix_target_surfaced):
pass
class Formatted:
@pytest.fixture(scope='class')
def fix_format_target_volume(self, fix_target_surfaced):
device_name = fix_target_surfaced['device']
cmd = f'umount "/media/{device_name}"'
SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
cmd2 = f'newfs "/dev/{device_name}"'
results = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
yield fix_target_surfaced
def test_format_target_volume(self, fix_format_target_volume):
pass
class Mounted:
@pytest.fixture(scope='class')
def fix_create_iscsi_mountpoint(self):
cmd = f'mkdir -p {self.MOUNTPOINT}'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
try:
yield
finally:
cmd = f'rm -rf "{self.MOUNTPOINT}"'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
@pytest.fixture(scope='class')
def fix_mount_target_volume(self, fix_target_surfaced, fix_create_iscsi_mountpoint):
device_name = fix_target_surfaced['device']
cmd = f'mount "/dev/{device_name}" "{self.MOUNTPOINT}"'
# Allow some settle time (if we've just logged in a previously formatted target)
sleep(5)
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
try:
result = {}
result.update(fix_target_surfaced)
result.update({'mountpoint': self.MOUNTPOINT})
yield
finally:
cmd = f'umount "{self.MOUNTPOINT}"'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_create_iscsi_mountpoint(self, fix_create_iscsi_mountpoint):
pass
def test_mount_target_volume(self, fix_mount_target_volume):
pass
class TestFileTarget(FileExtent, Target):
TARGET_NAME = target_name
class TestLoggedIn(LoggedInTarget):
pass
class TestFormatted(Formatted):
pass
class TestMounted(Mounted):
MOUNTPOINT = file_mountpoint
def test_create_file(self, fix_mount_target_volume):
cmd = 'touch "%s/testfile"' % self.MOUNTPOINT
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_move_file(self, fix_mount_target_volume):
cmd = 'mv "%s/testfile" "%s/testfile2"' % (self.MOUNTPOINT, self.MOUNTPOINT)
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_copy_file(self, fix_mount_target_volume):
cmd = 'cp "%s/testfile2" "%s/testfile"' % (self.MOUNTPOINT, self.MOUNTPOINT)
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_delete_file(self, fix_mount_target_volume):
results = SSH_TEST('rm "%s/testfile2"' % self.MOUNTPOINT,
BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
class TestZvolTarget(ZvolExtent, Target):
TARGET_NAME = zvol_name
class TestLoggedIn(LoggedInTarget):
pass
class TestFormatted(Formatted):
pass
class TestMounted(Mounted):
MOUNTPOINT = zvol_mountpoint
def test_create_file(self, fix_mount_target_volume):
cmd = 'touch "%s/myfile.txt"' % self.MOUNTPOINT
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_move_file(self, fix_mount_target_volume):
cmd = 'mv "%s/myfile.txt" "%s/newfile.txt"' % (self.MOUNTPOINT, self.MOUNTPOINT)
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'] is True, f"{results['output']}, {results['stderr']}"
def test_create_directory_in_zvol_iscsi_share(self, fix_mount_target_volume):
cmd = f'mkdir "{self.MOUNTPOINT}/mydir"'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'], f"{results['output']}, {results['stderr']}"
def test_copy_file_to_new_dir_in_zvol_iscsi_share(self, fix_mount_target_volume):
cmd = f'cp "{self.MOUNTPOINT}/newfile.txt" "{self.MOUNTPOINT}/mydir/myfile.txt"'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results['result'], f"{results['output']}, {results['stderr']}"
def test_verify_the_zvol_mountpoint_is_empty(self):
cmd = f'test -f {zvol_mountpoint}/newfile.txt'
results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert not results['result'], f"{results['output']}, {results['stderr']}"
class TestLoggedInAgain(LoggedInTarget):
pass
class TestMounted(Mounted):
MOUNTPOINT = zvol_mountpoint
def test_verify_files_and_directory_kept_on_the_zvol_iscsi_share(self):
cmd1 = f'test -f {zvol_mountpoint}/newfile.txt'
results1 = SSH_TEST(cmd1, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results1['result'], results1['output']
cmd2 = f'test -f "{zvol_mountpoint}/mydir/myfile.txt"'
results2 = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
assert results2['result'], results2['output']
| 14,228 | Python | .py | 306 | 36.986928 | 142 | 0.614879 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,224 | test_006_pool_and_sysds.py | truenas_middleware/tests/api2/test_006_pool_and_sysds.py | import time
import pytest
from pytest_dependency import depends
from auto_config import ha, pool_name
from truenas_api_client import ValidationErrors
from middlewared.test.integration.assets.directory_service import active_directory
from middlewared.test.integration.utils import call, fail
from middlewared.test.integration.utils.client import client
def wait_for_standby(ws_client):
# we sleep here since this function is called directly after
# the system dataset has been explicitly migrated to another
# zpool. On HA systems, the standby node will reboot but we
# need to give the other controller some time to actually
# reboot before we start checking to make sure it's online
time.sleep(5)
sleep_time, max_wait_time = 1, 300
rebooted = False
waited_time = 0
while waited_time < max_wait_time and not rebooted:
if ws_client.call('failover.remote_connected'):
rebooted = True
else:
waited_time += sleep_time
time.sleep(sleep_time)
assert rebooted, f'Standby did not connect after {max_wait_time} seconds'
waited_time = 0 # need to reset this
is_backup = False
while waited_time < max_wait_time and not is_backup:
try:
is_backup = ws_client.call('failover.call_remote', 'failover.status') == 'BACKUP'
except Exception:
pass
if not is_backup:
waited_time += sleep_time
time.sleep(sleep_time)
assert is_backup, f'Standby node did not become BACKUP after {max_wait_time} seconds'
pass
@pytest.fixture(scope='module')
def ws_client():
# by the time this test is called in the pipeline,
# the HA VM should have networking configured so
# we can use the VIP
with client() as c:
yield c
@pytest.fixture(scope='module')
def pool_data():
return dict()
@pytest.mark.dependency(name='SYSDS')
def test_001_check_sysdataset_exists_on_boot_pool(ws_client):
"""
When a system is first installed or all zpools are deleted
then we place the system dataset on the boot pool. Since our
CI pipelines always start with a fresh VM, we can safely assume
that there are no zpools (created or imported) by the time this
test runs and so we can assert this accordingly.
"""
bp_name = ws_client.call('boot.pool_name')
bp_basename = f'{bp_name}/.system'
sysds = ws_client.call('systemdataset.config')
assert bp_name == sysds['pool']
assert bp_basename == sysds['basename']
def test_activedirectory_requires_pool(request):
depends(request, ['SYSDS'])
with pytest.raises(ValidationErrors) as ve:
with active_directory():
pass
assert ve.value.errors[0].errmsg.startswith(
'Active Directory service may not be enabled before data pool is created'
)
def test_002_create_permanent_zpool(request, ws_client):
"""
This creates the "permanent" zpool which is used by every other
test module in the pipeline.
More specifically we do the following:
1. get unused disks
2. create a 1 disk striped zpool
3. verify system dataset automagically migrated to this pool
"""
depends(request, ['SYSDS'])
unused_disks = ws_client.call('disk.details')['unused']
assert len(unused_disks) >= 2
try:
ws_client.call(
'pool.create', {
'name': pool_name,
'topology': {'data': [{'type': 'STRIPE', 'disks': [unused_disks[0]['name']]}]}
},
job=True
)
except Exception as e:
fail(f"Unable to create test pool: {e!r}. Aborting tests.")
else:
results = ws_client.call('systemdataset.config')
assert results['pool'] == pool_name
assert results['basename'] == f'{pool_name}/.system'
try:
sysdataset_update = ws_client.call('core.get_jobs', [
['method', '=', 'systemdataset.update']
], {'order_by': ['-id'], 'get': True})
except Exception:
fail('Failed to get status of systemdataset update')
if sysdataset_update['state'] != 'SUCCESS':
fail(f'System dataset move failed: {sysdataset_update["error"]}')
@pytest.mark.dependency(name='POOL_FUNCTIONALITY1')
def test_003_verify_unused_disk_and_sysds_functionality_on_2nd_pool(ws_client, pool_data):
"""
This tests a few items related to zpool creation logic:
1. disk.details()['unused'] should NOT show disks that are a part of a zpool that is
currently imported (inversely, disk.details()['used'] should show disks that are
currently in use by a zpool that is imported)
2. make sure the system dataset doesn't migrate to the 2nd zpool that we create
since it should only be migrating to the 1st zpool that is created
3. after verifying system dataset doesn't migrate to the 2nd zpool, explicitly
migrate it to the 2nd zpool. Migrating system datasets between zpools is a
common operation and can be very finicky so explicitly testing this operation
is of paramount importance.
4. after the system dataset was migrated to the 2nd pool, migrate it back to the
1st pool. The 2nd pool is a temporary pool used to test other functionality
and isn't used through the CI test suite so best to clean it up.
"""
unused_disks = ws_client.call('disk.get_unused')
assert len(unused_disks) >= 1
temp_pool_name = 'temp'
try:
pool = ws_client.call(
'pool.create', {
'name': temp_pool_name,
'topology': {'data': [{'type': 'STRIPE', 'disks': [unused_disks[0]['name']]}]}
},
job=True
)
except Exception as e:
assert False, e
else:
pool_data[pool['name']] = pool
disk_deets = ws_client.call('disk.details')
# disk should not show up in `exported_zpool` keys since it's still imported
assert not any((i['exported_zpool'] == pool['name'] for i in disk_deets['unused']))
# disk should show up in `imported_zpool` key
assert any((i['imported_zpool'] == pool['name'] for i in disk_deets['used']))
sysds = ws_client.call('systemdataset.config')
assert pool['name'] != sysds['pool']
assert f'{pool["name"]}/.system' != sysds['basename']
# explicitly migrate sysdataset to temp pool
try:
ws_client.call('systemdataset.update', {'pool': temp_pool_name}, job=True)
except Exception as e:
fail(f'Failed to move system dataset to temporary pool: {e}')
else:
if ha:
wait_for_standby(ws_client)
try:
ws_client.call('systemdataset.update', {'pool': pool_name}, job=True)
except Exception as e:
fail(f'Failed to return system dataset from temporary pool: {e}')
else:
if ha:
wait_for_standby(ws_client)
def test_004_verify_pool_property_unused_disk_functionality(request, ws_client, pool_data):
"""
This does a few things:
1. export the zpool without wiping the disk and verify that disk.get_used
still shows the relevant disk as being part of an exported zpool
2. clean up the pool by exporting and wiping the disks
3. finally, if this is HA enable failover since all tests after this one
expect it to be turned on
"""
depends(request, ['POOL_FUNCTIONALITY1'])
zp_name = list(pool_data.keys())[0]
with pytest.raises(Exception):
# should prevent setting this property at root dataset
ws_client.call('zfs.dataset.update', zp_name, {'properties': {'sharenfs': {'value': 'on'}}})
# export zpool
try:
ws_client.call('pool.export', pool_data[zp_name]['id'], job=True)
except Exception as e:
assert False, e
imported = False
try:
# disk should show up in `exported_zpool` keys since zpool was exported
# without wiping the disk
used_disks = ws_client.call('disk.get_used')
assert any((i['exported_zpool'] == zp_name for i in used_disks))
# pool should be available to be imported again
available_pools = ws_client.call('pool.import_find', job=True)
assert len(available_pools) == 1 and available_pools[0]['name'] == zp_name
# import it
imported = ws_client.call('pool.import_pool', {'guid': available_pools[0]['guid']}, job=True)
assert imported
finally:
if imported:
temp_id = ws_client.call('pool.query', [['name', '=', zp_name]], {'get': True})['id']
options = {'cascade': True, 'restart_services': True, 'destroy': True}
ws_client.call('pool.export', temp_id, options, job=True)
if ha:
# every test after this one expects this to be enabled
ws_client.call('failover.update', {'disabled': False, 'master': True})
assert ws_client.call('failover.config')['disabled'] is False
def test__check_root_level_dataset_properties():
""" validate that our root-level dataset has expected properties """
ds = call('pool.dataset.get_instance', pool_name)
assert ds['acltype']['value'] == 'POSIX'
assert ds['aclmode']['value'] == 'DISCARD'
assert ds['xattr']['value'] == 'ON'
assert ds['deduplication']['value'] == 'OFF'
assert ds['casesensitivity']['value'] == 'SENSITIVE'
assert ds['compression']['value'] == 'LZ4'
assert ds['snapdev']['value'] == 'HIDDEN'
assert ds['sync']['value'] == 'STANDARD'
assert ds['checksum']['value'] == 'ON'
assert ds['snapdir']['value'] == 'HIDDEN'
| 9,608 | Python | .py | 211 | 38.421801 | 101 | 0.654023 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,225 | test_440_snmp.py | truenas_middleware/tests/api2/test_440_snmp.py | #!/usr/bin/env python3
# License: BSD
import os
import pytest
from time import sleep
from contextlib import ExitStack
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.pool import dataset, snapshot
from middlewared.test.integration.assets.filesystem import directory, mkfile
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.system import reset_systemd_svcs
from pysnmp.hlapi import (CommunityData, ContextData, ObjectIdentity,
ObjectType, SnmpEngine, UdpTransportTarget, getCmd)
from auto_config import ha, interface, password, user, pool_name
from functions import async_SSH_done, async_SSH_start
skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests")
COMMUNITY = 'public'
TRAPS = False
CONTACT = 'root@localhost.com'
LOCATION = 'Maryville, TN'
PASSWORD = 'testing1234'
SNMP_USER_NAME = 'snmpJoe'
SNMP_USER_AUTH = 'MD5'
SNMP_USER_PWD = "abcd1234"
SNMP_USER_PRIV = 'AES'
SNMP_USER_PHRS = "A priv pass phrase"
SNMP_USER_CONFIG = {
"v3": True,
"v3_username": SNMP_USER_NAME,
"v3_authtype": SNMP_USER_AUTH,
"v3_password": SNMP_USER_PWD,
"v3_privproto": SNMP_USER_PRIV,
"v3_privpassphrase": SNMP_USER_PHRS
}
EXPECTED_DEFAULT_CONFIG = {
"location": "",
"contact": "",
"traps": False,
"v3": False,
"community": "public",
"v3_username": "",
"v3_authtype": "SHA",
"v3_password": "",
"v3_privproto": None,
"v3_privpassphrase": None,
"options": "",
"loglevel": 3,
"zilstat": False
}
EXPECTED_DEFAULT_STATE = {
"enable": False,
"state": "STOPPED",
}
CMD_STATE = {
"RUNNING": "start",
"STOPPED": "stop"
}
# =====================================================================
# Fixtures and utilities
# =====================================================================
@pytest.fixture(scope='module')
def initialize_and_start_snmp():
""" Initialize and start SNMP """
try:
# Get initial config and start SNMP
orig_config = call('snmp.config')
call('service.start', 'snmp')
yield orig_config
finally:
# Restore default config (which will also delete any created user),
# stop SNMP and restore default enable state
call('snmp.update', EXPECTED_DEFAULT_CONFIG)
call(f'service.{CMD_STATE[EXPECTED_DEFAULT_STATE["state"]]}', 'snmp')
call('service.update', 'snmp', {"enable": EXPECTED_DEFAULT_STATE['enable']})
@pytest.fixture(scope='class')
def add_SNMPv3_user():
# Reset the systemd restart counter
reset_systemd_svcs("snmpd snmp-agent")
call('snmp.update', SNMP_USER_CONFIG)
assert get_systemctl_status('snmp-agent') == "RUNNING"
res = call('snmp.get_snmp_users')
assert SNMP_USER_NAME in res
yield
@pytest.fixture(scope='function')
def create_nested_structure():
"""
Create the following structure:
tank -+-> dataset_1 -+-> dataset_2 -+-> dataset_3
|-> zvol_1a |-> zvol-L_2a |-> zvol L_3a
|-> zvol_1b |-> zvol-L_2b |-> zvol L_3b
|-> file_1 |-> file_2 |-> file_3
|-> dir_1 |-> dir_2 |-> dir_3
TODO: Make this generic and move to assets
"""
ds_path = ""
ds_list = []
zv_list = []
dir_list = []
file_list = []
# Test '-' and ' ' in the name (we skip index 0)
zvol_name = ["bogus", "zvol", "zvol-L", "zvol L"]
with ExitStack() as es:
for i in range(1, 4):
preamble = f"{ds_path + '/' if i > 1 else ''}"
vol_path = f"{preamble}{zvol_name[i]}_{i}"
# Create zvols
for c in crange('a', 'b'):
zv = es.enter_context(dataset(vol_path + c, {"type": "VOLUME", "volsize": 1048576}))
zv_list.append(zv)
# Create directories
d = es.enter_context(directory(f"/mnt/{pool_name}/{preamble}dir_{i}"))
dir_list.append(d)
# Create files
f = es.enter_context(mkfile(f"/mnt/{pool_name}/{preamble}file_{i}", 1048576))
file_list.append(f)
# Create datasets
ds_path += f"{'/' if i > 1 else ''}dataset_{i}"
ds = es.enter_context(dataset(ds_path))
ds_list.append(ds)
yield {'zv': zv_list, 'ds': ds_list, 'dir': dir_list, 'file': file_list}
def crange(c1, c2):
"""
Generates the characters from `c1` to `c2`, inclusive.
Simple lowercase ascii only.
NOTE: Not safe for runtime code
"""
ord_a = 97
ord_z = 122
c1_ord = ord(c1)
c2_ord = ord(c2)
assert c1_ord < c2_ord, f"'{c1}' must be 'less than' '{c2}'"
assert ord_a <= c1_ord <= ord_z
assert ord_a <= c2_ord <= ord_z
for c in range(c1_ord, c2_ord + 1):
yield chr(c)
def get_systemctl_status(service):
""" Return 'RUNNING' or 'STOPPED' """
try:
res = ssh(f'systemctl status {service}')
except AssertionError:
# Return code is non-zero if service is not running
return "STOPPED"
action = [line for line in res.splitlines() if line.lstrip().startswith('Active')]
return "RUNNING" if action[0].split()[2] == "(running)" else "STOPPED"
def get_sysname(hostip, community):
iterator = getCmd(SnmpEngine(),
CommunityData(community),
UdpTransportTarget((hostip, 161)),
ContextData(),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysName', 0)))
errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
assert errorIndication is None, errorIndication
assert errorStatus == 0, errorStatus
value = str(varBinds[0])
_prefix = "SNMPv2-MIB::sysName.0 = "
assert value.startswith(_prefix), value
return value[len(_prefix):]
def validate_snmp_get_sysname_uses_same_ip(hostip):
"""Test that when we query a particular interface by SNMP the response comes from the same IP."""
# Write the test in a manner that is portable between Linux and FreeBSD ... which means
# *not* using 'any' as the interface name. We will use the interface supplied by the
# test runner instead.
print(f"Testing {hostip} ", end='')
p = async_SSH_start(f"tcpdump -t -i {interface} -n udp port 161 -c2", user, password, hostip)
# Give some time so that the tcpdump has started before we proceed
sleep(5)
get_sysname(hostip, COMMUNITY)
# Now collect and process the tcpdump output
outs, errs = async_SSH_done(p, 20)
output = outs.strip()
assert len(output), f"No output from tcpdump:{outs}"
lines = output.split("\n")
assert len(lines) == 2, f"Unexpected number of lines output by tcpdump: {outs}"
for line in lines:
assert line.split()[0] == 'IP'
# print(errs)
get_dst = lines[0].split()[3].rstrip(':')
reply_src = lines[1].split()[1]
assert get_dst == reply_src
assert get_dst.endswith(".161")
def user_list_users(snmp_config):
"""Run an snmpwalk as a SNMP v3 user"""
add_cmd = None
if snmp_config['v3_privproto']:
authpriv_setting = 'authPriv'
add_cmd = f"-x {snmp_config['v3_privproto']} -X \"{snmp_config['v3_privpassphrase']}\" "
else:
authpriv_setting = 'authNoPriv'
cmd = f"snmpwalk -v3 -u {snmp_config['v3_username']} -l {authpriv_setting} "
cmd += f"-a {snmp_config['v3_authtype']} -A {snmp_config['v3_password']} "
if add_cmd:
cmd += add_cmd
cmd += "localhost iso.3.6.1.6.3.15.1.2.2.1.3"
# This call will timeout if SNMP is not running
res = ssh(cmd)
return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
def v2c_snmpwalk(mib):
"""
Run snmpwalk with v2c protocol
mib is the item to be gathered. mib format examples:
iso.3.6.1.6.3.15.1.2.2.1.3
1.3.6.1.4.1.50536.1.2
"""
cmd = f"snmpwalk -v2c -cpublic localhost {mib}"
# This call will timeout if SNMP is not running
res = ssh(cmd)
return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
# =====================================================================
# Tests
# =====================================================================
class TestSNMP:
def test_configure_SNMP(self, initialize_and_start_snmp):
config = initialize_and_start_snmp
# We should be starting with the default config
# Check the hard way so that we can identify the culprit
for k, v in EXPECTED_DEFAULT_CONFIG.items():
assert config.get(k) == v, f'Expected {k}:"{v}", but found {k}:"{config.get(k)}"'
# Make some changes that will be checked in a later test
call('snmp.update', {
'community': COMMUNITY,
'traps': TRAPS,
'contact': CONTACT,
'location': LOCATION
})
def test_enable_SNMP_service_at_boot(self):
id = call('service.update', 'snmp', {'enable': True})
assert isinstance(id, int)
res = call('service.query', [['service', '=', 'snmp']])
assert res[0]['enable'] is True
def test_SNMP_service_is_running(self):
res = call('service.query', [['service', '=', 'snmp']])
assert res[0]['state'] == 'RUNNING'
def test_SNMP_settings_are_preserved(self):
data = call('snmp.config')
assert data['community'] == COMMUNITY
assert data['traps'] == TRAPS
assert data['contact'] == CONTACT
assert data['location'] == LOCATION
def test_sysname_reply_uses_same_ip(self):
validate_snmp_get_sysname_uses_same_ip(truenas_server.ip)
@skip_ha_tests
def test_ha_sysname_reply_uses_same_ip(self):
validate_snmp_get_sysname_uses_same_ip(truenas_server.ip)
validate_snmp_get_sysname_uses_same_ip(truenas_server.nodea_ip)
validate_snmp_get_sysname_uses_same_ip(truenas_server.nodeb_ip)
def test_SNMPv3_private_user(self):
"""
The SNMP system user should always be available
"""
# Reset the systemd restart counter
reset_systemd_svcs("snmpd snmp-agent")
# Make sure the createUser command is not present
res = ssh("tail -2 /var/lib/snmp/snmpd.conf")
assert 'createUser' not in res
# Make sure the SNMP system user is a rwuser
res = ssh("cat /etc/snmp/snmpd.conf")
assert "rwuser snmpSystemUser" in res
# List the SNMP users and confirm the system user
# This also confirms the functionality of the system user
res = call('snmp.get_snmp_users')
assert "snmpSystemUser" in res
@pytest.mark.parametrize('payload,attrib,errmsg', [
({'v3': False, 'community': ''},
'snmp_update.community', 'This field is required when SNMPv3 is disabled'),
({'v3': True},
'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'),
({'v3_authtype': 'AES'},
'snmp_update.v3_authtype', 'Invalid choice: AES'),
({'v3': True, 'v3_authtype': 'MD5'},
'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'),
({'v3_password': 'short'},
'snmp_update.v3_password', 'Password must contain at least 8 characters'),
({'v3_privproto': 'SHA'},
'snmp_update.v3_privproto', 'Invalid choice: SHA'),
({'v3_privproto': 'AES'},
'snmp_update.v3_privpassphrase', 'This field is required when SNMPv3 private protocol is specified'),
])
def test_v3_validators(self, payload, attrib, errmsg):
"""
All these configuration updates should fail.
"""
with pytest.raises(ValidationErrors) as ve:
call('snmp.update', payload)
if attrib:
assert f"{attrib}" in ve.value.errors[0].attribute
if errmsg:
assert f"{errmsg}" in ve.value.errors[0].errmsg
@pytest.mark.usefixtures("add_SNMPv3_user")
class TestSNMPv3User:
def test_SNMPv3_user_function(self):
res = user_list_users(SNMP_USER_CONFIG)
assert SNMP_USER_NAME in res, f"Expected to find {SNMP_USER_NAME} in {res}"
def test_SNMPv3_user_retained_across_service_restart(self):
# Reset the systemd restart counter
reset_systemd_svcs("snmpd snmp-agent")
res = call('service.stop', 'snmp')
assert res is True
res = call('service.start', 'snmp')
assert res is True
res = call('snmp.get_snmp_users')
assert "snmpSystemUser" in res
assert SNMP_USER_NAME in res
def test_SNMPv3_user_retained_across_v3_disable(self):
# Disable and check
res = call('snmp.update', {'v3': False})
assert SNMP_USER_NAME in res['v3_username']
res = call('snmp.get_snmp_users')
assert SNMP_USER_NAME in res
# Enable and check
res = call('snmp.update', {'v3': True})
assert SNMP_USER_NAME in res['v3_username']
res = call('snmp.get_snmp_users')
assert SNMP_USER_NAME in res
@pytest.mark.parametrize('key,value', [
('reset', ''), # Reset systemd counters
('v3_username', 'ixUser'),
('v3_authtype', 'SHA'),
('v3_password', 'SimplePassword'),
('reset', ''), # Reset systemd counters
('v3_privproto', 'DES'),
('v3_privpassphrase', 'Pass phrase with spaces'),
# Restore original user name
('v3_username', SNMP_USER_NAME)
])
def test_SNMPv3_user_changes(self, key, value):
"""
Make changes to the SNMPv3 user name, password, etc. and confirm user function.
This also tests a pass phrase that includes spaces.
NOTE: We include systemd counter resets because these calls require the most restarts.
"""
if key == 'reset':
# Reset the systemd restart counter
reset_systemd_svcs("snmpd snmp-agent")
else:
res = call('snmp.update', {key: value})
assert value in res[key]
assert get_systemctl_status('snmp-agent') == "RUNNING"
# Confirm user function after change
user_config = call('snmp.config')
res = user_list_users(user_config)
assert user_config['v3_username'] in res
def test_SNMPv3_user_delete(self):
# Make sure the user is currently present
res = call('snmp.get_snmp_users')
assert SNMP_USER_NAME in res
res = call('snmp.update', {'v3': False, 'v3_username': ''})
# v3_authtype is defaulted to 'SHA' in the DB
assert not any([res['v3'], res['v3_username'], res['v3_password'],
res['v3_privproto'], res['v3_privpassphrase']]) and 'SHA' in res['v3_authtype']
assert get_systemctl_status('snmp-agent') == "RUNNING"
res = call('snmp.get_snmp_users')
assert SNMP_USER_NAME not in res
# Make sure the user cannot perform SNMP requests
with pytest.raises(Exception) as ve:
res = user_list_users(SNMP_USER_CONFIG)
assert "Unknown user name" in str(ve.value)
def test_zvol_reporting(self, create_nested_structure):
"""
The TrueNAS snmp agent should list all zvols.
TrueNAS zvols can be created on any ZFS pool or dataset.
The snmp agent should list them all.
snmpwalk -v2c -cpublic localhost 1.3.6.1.4.1.50536.1.2.1.1.2
"""
# The expectation is that the snmp agent should list exactly the six zvols.
created_items = create_nested_structure
# Include a snapshot of one of the zvols
with snapshot(created_items['zv'][0], "snmpsnap01"):
snmp_res = v2c_snmpwalk('1.3.6.1.4.1.50536.1.2.1.1.2')
assert all(v in created_items['zv'] for v in snmp_res), f"expected {created_items['zv']}, but found {snmp_res}"
| 16,335 | Python | .py | 370 | 36 | 123 | 0.59786 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,226 | test_zpool_status.py | truenas_middleware/tests/api2/test_zpool_status.py | import os
import pytest
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
POOL_NAME = 'test_format_pool'
ZFS_PART_UUID = '6a898cc3-1dd2-11b2-99a6-080020736631'
def get_disk_uuid_mapping(unused_disks):
disk_uuid = {}
for disk in filter(
lambda n: n['name'] in unused_disks and n['parts'], call('device.get_disks', True, False).values()
):
if partition := next((part for part in disk['parts'] if part['partition_type'] == ZFS_PART_UUID), None):
disk_uuid[disk['name']] = os.path.join('/dev/disk/by-partuuid', partition['partition_uuid'])
return disk_uuid
def get_pool_status(unused_disks, real_paths=False, replaced=False):
disk_uuid_mapping = get_disk_uuid_mapping(unused_disks)
return {
'disks': {
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
'pool_name': POOL_NAME,
'disk_status': 'AVAIL' if not replaced else 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe' if not replaced else 'spare-0',
'vdev_type': 'spares' if not replaced else 'data',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
] if not replaced else [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
]
},
f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'logs',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}'
]
},
f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'dedup',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}'
]
},
f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'special',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}'
]
},
f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'l2cache',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}'
]
},
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe' if not replaced else 'spare-0',
'vdev_type': 'data',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}'
] if not replaced else [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
]
}
},
POOL_NAME: {
'spares': {
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
'pool_name': POOL_NAME,
'disk_status': 'AVAIL' if not replaced else 'INUSE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'spares',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
]
}
},
'logs': {
f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'logs',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}'
]
}
},
'dedup': {
f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'dedup',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}'
]
}
},
'special': {
f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'special',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}'
]
}
},
'l2cache': {
f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'l2cache',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}'
]
}
},
'data': {
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'stripe',
'vdev_type': 'data',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}'
]
}
} if not replaced else {
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'spare-0',
'vdev_type': 'data',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
]
},
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
'pool_name': POOL_NAME,
'disk_status': 'ONLINE',
'disk_read_errors': 0,
'disk_write_errors': 0,
'disk_checksum_errors': 0,
'vdev_name': 'spare-0',
'vdev_type': 'data',
'vdev_disks': [
f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
]
},
}
}
}
@pytest.fixture(scope='module')
def test_pool():
unused_disks = call('disk.get_unused')
if len(unused_disks) < 7:
pytest.skip('Insufficient number of disks to perform these tests')
with another_pool({
'name': POOL_NAME,
'topology': {
'cache': [{'type': 'STRIPE', 'disks': [unused_disks[0]['name']]}],
'data': [{'type': 'STRIPE', 'disks': [unused_disks[1]['name']]}],
'dedup': [{'type': 'STRIPE', 'disks': [unused_disks[2]['name']]}],
'log': [{'type': 'STRIPE', 'disks': [unused_disks[3]['name']]}],
'spares': [unused_disks[4]['name']],
'special': [{'type': 'STRIPE', 'disks': [unused_disks[5]['name']]}]
},
'allow_duplicate_serials': True,
}) as pool_info:
yield pool_info, unused_disks
@pytest.mark.parametrize('real_path', [True, False])
def test_zpool_status_format(test_pool, real_path):
assert call('zpool.status', {'name': POOL_NAME, 'real_paths': real_path}) == get_pool_status(
[disk['name'] for disk in test_pool[1]], real_path
)
def test_replaced_disk_zpool_status_format(test_pool):
disk_mapping = get_disk_uuid_mapping([disk['name'] for disk in test_pool[1]])
data_disk = test_pool[1][1]['name']
spare_disk = test_pool[1][4]['name']
ssh(
f'zpool replace '
f'{test_pool[0]["name"]} '
f'{os.path.basename(disk_mapping[data_disk])} '
f'{os.path.basename(disk_mapping[spare_disk])}',
)
for real_path in (True, False):
assert call(
'zpool.status', {"name": POOL_NAME, "real_paths": real_path}
) == get_pool_status(
[disk['name'] for disk in test_pool[1]], real_path, True
)
| 11,650 | Python | .py | 250 | 30.832 | 112 | 0.471803 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,227 | test_config_upload.py | truenas_middleware/tests/api2/test_config_upload.py | import contextlib
import io
import json
import sqlite3
import tarfile
import os
import pytest
from truenas_api_client import ClientException
from middlewared.test.integration.utils import call, session, url
@contextlib.contextmanager
def db_ops(db_name):
try:
with contextlib.closing(sqlite3.connect(db_name)) as conn:
with conn:
conn.execute("CREATE TABLE alembic_version (version_num VARCHAR(32) NOT NULL);")
conn.execute("INSERT INTO alembic_version VALUES ('invalid')")
yield
finally:
os.unlink(db_name)
@contextlib.contextmanager
def tar_ops(file_to_add):
tar_name = "config.tar"
tfile = None
try:
with tarfile.open(tar_name, "w") as tfile:
tfile.add(file_to_add)
yield tfile.name
finally:
if tfile is not None:
os.unlink(tfile.name)
def test_invalid_database_file():
db_name = "freenas-v1.db"
with db_ops(db_name):
with tar_ops(db_name) as tar_name:
with session() as s:
r = s.post(
f"{url()}/_upload",
files={
"data": (None, io.StringIO(json.dumps({
"method": "config.upload",
"params": [],
}))),
"file": (None, open(tar_name, "rb")),
},
)
r.raise_for_status()
job_id = r.json()["job_id"]
with pytest.raises(ClientException) as ve:
call("core.job_wait", job_id, job=True)
assert 'Uploaded TrueNAS database file is not valid' in ve.value.error
assert "Can't locate revision identified by 'invalid'" in ve.value.error
| 1,824 | Python | .py | 51 | 24.941176 | 96 | 0.552721 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,228 | test_snapshots.py | truenas_middleware/tests/api2/test_snapshots.py | import errno
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
def common_min_max_txg_snapshot_test(test_min_txg=False, test_max_txg=False):
assert all(i is False for i in (test_min_txg, test_max_txg)) is False
with dataset('test') as test_dataset:
created_snaps = []
total_snaps = 20
for i in range(total_snaps):
created_snaps.append(int(call(
'zfs.snapshot.create', {'dataset': test_dataset, 'name': f'snap_{i}'}
)['properties']['createtxg']['value']))
assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True}) == len(created_snaps)
for i in range(total_snaps // 2 - 1):
new_list = created_snaps
extra_args = {}
if test_min_txg:
new_list = created_snaps[i:]
extra_args['min_txg'] = new_list[0]
if test_max_txg:
new_list = new_list[:len(new_list) // 2]
extra_args['max_txg'] = new_list[-1]
assert call(
'zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True, 'extra': extra_args}
) == len(new_list)
def test_min_txg_snapshot_query():
common_min_max_txg_snapshot_test(True, False)
def test_max_txg_snapshot_query():
common_min_max_txg_snapshot_test(False, True)
def test_min_max_txg_snapshot_query():
common_min_max_txg_snapshot_test(True, True)
def test_already_exists():
with dataset('test') as test_dataset:
call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'})
with pytest.raises(CallError) as ve:
call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'})
assert ve.value.errno == errno.EEXIST
| 1,909 | Python | .py | 39 | 40.025641 | 114 | 0.616838 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,229 | test_auth_otp.py | truenas_middleware/tests/api2/test_auth_otp.py | import io
import json
import pytest
from middlewared.test.integration.utils import call, session, ssh, url
@pytest.fixture(scope="module")
def otp_enabled():
call("auth.twofactor.update", {"enabled": True})
try:
yield
finally:
ssh("midclt call auth.twofactor.update '{\"enabled\": false}'")
def test_otp_http_basic_auth(otp_enabled):
with session() as s:
r = s.put(f"{url()}/api/v2.0/auth/twofactor/", data=json.dumps({"enabled": False}))
assert r.status_code == 401
assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled"
def test_otp_http_basic_auth_upload(otp_enabled):
with session() as s:
r = s.post(
f"{url()}/_upload/",
data={
"data": json.dumps({
"method": "filesystem.put",
"params": ["/tmp/upload"],
})
},
files={
"file": io.BytesIO(b"test"),
},
)
assert r.status_code == 401
assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled"
| 1,121 | Python | .py | 32 | 26.1875 | 91 | 0.56308 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,230 | test_system_lifetime.py | truenas_middleware/tests/api2/test_system_lifetime.py | import time
import pytest
from middlewared.test.integration.utils import call
from auto_config import ha
@pytest.mark.skipif(
ha,
reason="Cannot be tested on a HA system since rebooting this node will just fail over to another node",
)
def test_system_reboot():
boot_id = call("system.boot_id")
call("system.reboot", "Integration test")
for i in range(180):
try:
new_boot_id = call("system.boot_id")
except Exception:
pass
else:
if new_boot_id != boot_id:
break
time.sleep(1)
else:
assert False, "System did not reboot"
audit = call("audit.query", {
"services": ["MIDDLEWARE"],
"query-filters": [
["event", "=", "REBOOT"],
],
})
assert audit[-1]["event_data"] == {"reason": "Integration test"}
| 867 | Python | .py | 29 | 22.931034 | 107 | 0.594692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,231 | test_tunables.py | truenas_middleware/tests/api2/test_tunables.py | import pytest
from truenas_api_client import ValidationErrors
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.mock_binary import mock_binary
SYSCTL = "kernel.watchdog"
SYSCTL_DEFAULT_VALUE = "1"
SYSCTL_NEW_VALUE = "0"
ZFS = "zil_nocacheflush"
ZFS_DEFAULT_VALUE = "0"
ZFS_NEW_VALUE = "1"
def test_create_invalid_sysctl():
with pytest.raises(ValidationErrors) as ve:
call("tunable.create", {
"type": "SYSCTL",
"var": "kernel.truenas",
"value": "1",
}, job=True)
assert ve.value.errors[0].attribute == "tunable_create.var"
def test_create_invalid_udev():
with pytest.raises(ValidationErrors) as ve:
call("tunable.create", {
"type": "UDEV",
"var": "61-truenas-pmem",
"value": "# disable built-in truenas rule to enable memory loss",
}, job=True)
assert ve.value.errors[0].attribute == "tunable_create.var"
def test_create_invalid_zfs():
with pytest.raises(ValidationErrors) as ve:
call("tunable.create", {
"type": "ZFS",
"var": "zfs_truenas",
"value": "1",
}, job=True)
assert ve.value.errors[0].attribute == "tunable_create.var"
def test_sysctl_lifecycle():
def assert_default_value():
assert ssh("cat /etc/sysctl.d/tunables.conf", check=False) == f""
assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_DEFAULT_VALUE}\n"
def assert_new_value():
assert ssh("cat /etc/sysctl.d/tunables.conf") == f"{SYSCTL}={SYSCTL_NEW_VALUE}\n"
assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_NEW_VALUE}\n"
assert_default_value()
tunable = call("tunable.create", {
"type": "SYSCTL",
"var": SYSCTL,
"value": SYSCTL_NEW_VALUE,
}, job=True)
assert_new_value()
call("tunable.update", tunable["id"], {
"enabled": False,
}, job=True)
assert_default_value()
call("tunable.update", tunable["id"], {
"enabled": True,
}, job=True)
assert_new_value()
call("tunable.delete", tunable["id"], job=True)
assert_default_value()
def test_udev_lifecycle():
def assert_exists():
assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules") == f"BUS==\"usb\", OPTIONS+=\"ignore_device\"\n"
def assert_does_not_exist():
assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules", check=False) == f""
tunable = call("tunable.create", {
"type": "UDEV",
"var": "10-disable-usb",
"value": "BUS==\"usb\", OPTIONS+=\"ignore_device\""
}, job=True)
assert_exists()
call("tunable.update", tunable["id"], {
"enabled": False,
}, job=True)
assert_does_not_exist()
call("tunable.update", tunable["id"], {
"enabled": True,
}, job=True)
assert_exists()
call("tunable.delete", tunable["id"], job=True)
assert_does_not_exist()
def test_zfs_lifecycle():
with mock_binary("/usr/sbin/update-initramfs", exitcode=0):
def assert_default_value():
assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f""
assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_DEFAULT_VALUE}\n"
def assert_new_value():
assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f"options zfs {ZFS}={ZFS_NEW_VALUE}\n"
assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_NEW_VALUE}\n"
assert_default_value()
tunable = call("tunable.create", {
"type": "ZFS",
"var": ZFS,
"value": ZFS_NEW_VALUE,
}, job=True)
assert_new_value()
call("tunable.update", tunable["id"], {
"enabled": False,
}, job=True)
assert_default_value()
call("tunable.update", tunable["id"], {
"enabled": True,
}, job=True)
assert_new_value()
call("tunable.delete", tunable["id"], job=True)
assert_default_value()
def test_arc_max_set():
tunable = call("tunable.create", {"type": "ZFS", "var": "zfs_arc_max", "value": 8675309}, job=True)
try:
val = ssh("cat /sys/module/zfs/parameters/zfs_arc_max")
finally:
call("tunable.delete", tunable["id"], job=True)
assert int(val.strip()) == 8675309
mount_info = call("filesystem.mount_info", [["mountpoint", "=", "/"]], {"get": True})
assert "RO" in mount_info["super_opts"]
| 4,488 | Python | .py | 113 | 32.247788 | 113 | 0.597595 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,232 | test_replication_role.py | truenas_middleware/tests/api2/test_replication_role.py | import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.account import unprivileged_user_client
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.replication import replication_task
@pytest.mark.parametrize("has_pull", [False, True])
def test_create_pull_replication(has_pull):
with dataset("src") as src:
with dataset("dst") as dst:
payload = {
"name": "Test",
"direction": "PULL",
"transport": "LOCAL",
"source_datasets": [src],
"target_dataset": dst,
"recursive": True,
"naming_schema": ["%Y-%m-%d-%H-%M-%S"],
"retention_policy": "NONE",
"auto": False,
}
if has_pull:
role = "REPLICATION_TASK_WRITE_PULL"
else:
role = "REPLICATION_TASK_WRITE"
with unprivileged_user_client([role]) as c:
if has_pull:
task = c.call("replication.create", payload)
c.call("replication.delete", task["id"])
else:
with pytest.raises(ValidationErrors) as ve:
c.call("replication.create", payload)
assert ve.value.errors[0].attribute == "replication_create.direction"
@pytest.mark.parametrize("has_pull", [False, True])
def test_update_pull_replication(has_pull):
with dataset("src") as src:
with dataset("dst") as dst:
with replication_task({
"name": "Test",
"direction": "PUSH",
"transport": "LOCAL",
"source_datasets": [src],
"target_dataset": dst,
"recursive": True,
"also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
"retention_policy": "NONE",
"auto": False,
}) as t:
payload = {
"direction": "PULL",
"naming_schema": ["%Y-%m-%d-%H-%M-%S"],
"also_include_naming_schema": [],
}
if has_pull:
role = "REPLICATION_TASK_WRITE_PULL"
else:
role = "REPLICATION_TASK_WRITE"
with unprivileged_user_client([role]) as c:
if has_pull:
c.call("replication.update", t["id"], payload)
else:
with pytest.raises(ValidationErrors) as ve:
c.call("replication.update", t["id"], payload)
assert ve.value.errors[0].attribute == "replication_update.direction"
@pytest.mark.parametrize("has_pull", [False, True])
def test_restore_push_replication(has_pull):
with dataset("src") as src:
with dataset("dst") as dst:
with replication_task({
"name": "Test",
"direction": "PUSH",
"transport": "LOCAL",
"source_datasets": [src],
"target_dataset": dst,
"recursive": True,
"also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
"retention_policy": "NONE",
"auto": False,
}) as t:
with dataset("dst2") as dst2:
payload = {
"name": "Test restore",
"target_dataset": dst2,
}
if has_pull:
role = "REPLICATION_TASK_WRITE_PULL"
else:
role = "REPLICATION_TASK_WRITE"
with unprivileged_user_client([role]) as c:
if has_pull:
rt = c.call("replication.restore", t["id"], payload)
c.call("replication.delete", rt["id"])
else:
with pytest.raises(ValidationErrors) as ve:
c.call("replication.restore", t["id"], payload)
assert ve.value.errors[0].attribute == "replication_create.direction"
| 4,320 | Python | .py | 95 | 28.884211 | 97 | 0.47887 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,233 | test_replication.py | truenas_middleware/tests/api2/test_replication.py | import contextlib
import random
import string
import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.keychain import localhost_ssh_credentials
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.replication import replication_task
from middlewared.test.integration.assets.snapshot_task import snapshot_task
from middlewared.test.integration.utils import call, pool, ssh
BASE_REPLICATION = {
"direction": "PUSH",
"transport": "LOCAL",
"source_datasets": ["data"],
"target_dataset": "data",
"recursive": False,
"auto": False,
"retention_policy": "NONE",
}
@pytest.fixture(scope="module")
def ssh_credentials():
with localhost_ssh_credentials(username="root") as c:
yield c
@pytest.fixture(scope="module")
def periodic_snapshot_tasks():
result = {}
with contextlib.ExitStack() as stack:
for k, v in {
"data-recursive": {
"dataset": "tank/data",
"recursive": True,
"lifetime_value": 1,
"lifetime_unit": "WEEK",
"naming_schema": "auto-%Y%m%d.%H%M%S-1w",
"schedule": {},
},
"data-work-nonrecursive": {
"dataset": "tank/data/work",
"recursive": False,
"lifetime_value": 1,
"lifetime_unit": "WEEK",
"naming_schema": "auto-%Y%m%d.%H%M%S-1w",
"schedule": {},
},
"exclude": {
"dataset": "tank/exclude",
"recursive": True,
"exclude": ["tank/exclude/work/garbage"],
"lifetime_value": 1,
"lifetime_unit": "WEEK",
"naming_schema": "snap-%Y%m%d-%H%M-1w",
"schedule": {},
},
}.items():
stack.enter_context(dataset(v["dataset"].removeprefix("tank/")))
result[k] = stack.enter_context(snapshot_task(v))
yield result
@pytest.mark.parametrize("req,error", [
# Push + naming-schema
(dict(naming_schema=["snap-%Y%m%d-%H%M-1m"]), "naming_schema"),
# Auto with both periodic snapshot task and schedule
(dict(periodic_snapshot_tasks=["data-recursive"], schedule={"minute": "*/2"}, auto=True), None),
# Auto with periodic snapshot task
(dict(periodic_snapshot_tasks=["data-recursive"], auto=True), None),
# Auto with schedule
(dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], schedule={"minute": "*/2"}, auto=True), None),
# Auto without periodic snapshot task or schedule
(dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], auto=True), "auto"),
# Pull + periodic snapshot tasks
(dict(direction="PULL", periodic_snapshot_tasks=["data-recursive"]), "periodic_snapshot_tasks"),
# Pull with naming schema
(dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"]), None),
# Pull + also_include_naming_schema
(dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"]),
"also_include_naming_schema"),
# Pull + hold_pending_snapshots
(dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], hold_pending_snapshots=True),
"hold_pending_snapshots"),
# SSH+Netcat
(dict(periodic_snapshot_tasks=["data-recursive"],
transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=1024,
netcat_active_side_port_max=50000),
None),
# Bad netcat_active_side_port_max
(dict(transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=60000,
netcat_active_side_port_max=50000),
"netcat_active_side_port_max"),
# SSH+Netcat + compression
(dict(transport="SSH+NETCAT", compression="LZ4"), "compression"),
# SSH+Netcat + speed limit
(dict(transport="SSH+NETCAT", speed_limit=1024), "speed_limit"),
# Does not exclude garbage
(dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True), "exclude"),
# Does not exclude garbage
(dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True,
exclude=["tank/exclude/work/garbage"]),
None),
# May not exclude if not recursive
(dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False), None),
# Can't replicate excluded dataset
(dict(source_datasets=["tank/exclude/work/garbage"], periodic_snapshot_tasks=["exclude"]),
"source_datasets.0"),
# Non-recursive exclude
(dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False,
exclude=["tank/exclude/work/garbage"]),
"exclude"),
# Unrelated exclude
(dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True,
exclude=["tank/data"]),
"exclude.0"),
# Does not require unrelated exclude
(dict(source_datasets=["tank/exclude/work/important"], periodic_snapshot_tasks=["exclude"], recursive=True),
None),
# Custom retention policy
(dict(periodic_snapshot_tasks=["data-recursive"],
retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), None),
# Complex custom retention policy
(dict(periodic_snapshot_tasks=["data-recursive"],
retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK", lifetimes=[
dict(schedule={"hour": "0"}, lifetime_value=30, lifetime_unit="DAY"),
dict(schedule={"hour": "0", "dow": "1"}, lifetime_value=1, lifetime_unit="YEAR"),
]), None),
# name_regex
(dict(name_regex="manual-.+"), None),
(dict(direction="PULL", name_regex="manual-.+"), None),
(dict(name_regex="manual-.+",
retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), "retention_policy"),
# replicate
(dict(source_datasets=["tank/data", "tank/data/work"], periodic_snapshot_tasks=["data-recursive"], replicate=True,
recursive=True, properties=True),
"source_datasets.1"),
(dict(source_datasets=["tank/data"], periodic_snapshot_tasks=["data-recursive", "data-work-nonrecursive"],
replicate=True, recursive=True, properties=True),
"periodic_snapshot_tasks.1"),
])
def test_create_replication(ssh_credentials, periodic_snapshot_tasks, req, error):
if "ssh_credentials" in req:
req["ssh_credentials"] = ssh_credentials["credentials"]["id"]
if "periodic_snapshot_tasks" in req:
req["periodic_snapshot_tasks"] = [periodic_snapshot_tasks[k]["id"] for k in req["periodic_snapshot_tasks"]]
name = "".join(random.choice(string.ascii_letters) for _ in range(64))
data = dict(BASE_REPLICATION, name=name, **req)
if error:
with pytest.raises(ValidationErrors) as ve:
with replication_task(data):
pass
assert any(e.attribute == f"replication_create.{error}" for e in ve.value.errors)
else:
with replication_task(data) as replication:
restored = call("replication.restore", replication["id"], {
"name": f"restore {name}",
"target_dataset": "data/restore",
})
call("replication.delete", restored["id"])
@pytest.mark.parametrize("data,path,include", [
({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/", True),
({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child", True),
({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child/work", False),
({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data", True),
({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child", True),
({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child/work", False),
])
def test_query_attachment_delegate(ssh_credentials, data, path, include):
data = {
"name": "Test",
"transport": "SSH",
"source_datasets": ["source"],
"target_dataset": "target",
"recursive": False,
"name_regex": ".+",
"auto": False,
"retention_policy": "NONE",
**data,
}
if data["transport"] == "SSH":
data["ssh_credentials"] = ssh_credentials["credentials"]["id"]
with replication_task(data) as t:
result = call("pool.dataset.query_attachment_delegate", "replication", path, True)
if include:
assert len(result) == 1
assert result[0]["id"] == t["id"]
else:
assert len(result) == 0
@pytest.mark.parametrize("exclude_mountpoint_property", [True, False])
def test_run_onetime__exclude_mountpoint_property(exclude_mountpoint_property):
with dataset("src") as src:
with dataset("src/legacy") as src_legacy:
ssh(f"zfs set mountpoint=legacy {src_legacy}")
ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00")
try:
call("replication.run_onetime", {
"direction": "PUSH",
"transport": "LOCAL",
"source_datasets": [src],
"target_dataset": f"{pool}/dst",
"recursive": True,
"also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
"retention_policy": "NONE",
"replicate": True,
"readonly": "IGNORE",
"exclude_mountpoint_property": exclude_mountpoint_property
}, job=True)
mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip()
if exclude_mountpoint_property:
assert mountpoint == f"/mnt/{pool}/dst/legacy"
else:
assert mountpoint == "legacy"
finally:
ssh(f"zfs destroy -r {pool}/dst", check=False)
| 10,110 | Python | .py | 209 | 39.483254 | 119 | 0.612243 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,234 | test_catalog_roles.py | truenas_middleware/tests/api2/test_catalog_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
('catalog.get_app_details', 'CATALOG_READ', True, True),
('catalog.get_app_details', 'CATALOG_WRITE', True, True),
('catalog.get_app_details', 'DOCKER_READ', False, False),
('app.latest', 'CATALOG_READ', True, False),
('app.latest', 'CATALOG_WRITE', True, False),
('app.latest', 'APPS_WRITE', True, False),
('app.available', 'CATALOG_READ', True, False),
('app.available', 'CATALOG_WRITE', True, False),
('app.available', 'APPS_WRITE', True, False),
('app.categories', 'CATALOG_READ', True, False),
('app.categories', 'CATALOG_WRITE', True, False),
('app.categories', 'APPS_WRITE', True, False),
('app.similar', 'CATALOG_READ', True, True),
('app.similar', 'CATALOG_WRITE', True, True),
('app.similar', 'APPS_WRITE', True, True),
('catalog.apps', 'CATALOG_READ', True, False),
('catalog.apps', 'CATALOG_WRITE', True, False),
('catalog.apps', 'DOCKER_READ', False, False),
('catalog.sync', 'CATALOG_READ', False, False),
('catalog.sync', 'CATALOG_WRITE', True, False),
('catalog.update', 'CATALOG_READ', False, True),
('catalog.update', 'CATALOG_WRITE', True, True),
))
def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
| 1,530 | Python | .py | 28 | 50.214286 | 113 | 0.669559 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,235 | test_draid.py | truenas_middleware/tests/api2/test_draid.py | import pytest
from truenas_api_client import ValidationErrors
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
POOL_NAME = 'test_draid_pool'
@pytest.mark.parametrize(
'n_data,n_spare,n_parity', [
(1, 0, 1),
(1, 1, 1),
(1, 0, 2),
(1, 1, 2),
(2, 2, 2),
(1, 1, 3),
]
)
def test_valid_draid_pool_creation(n_data, n_spare, n_parity):
unused_disks = call('disk.get_unused')
if len(unused_disks) < 5:
pytest.skip('Insufficient number of disk to perform these test')
children = n_data + n_parity + n_spare
with another_pool({
'name': POOL_NAME,
'topology': {
'data': [{
'disks': [disk['name'] for disk in unused_disks[:children]],
'type': f'DRAID{n_parity}',
'draid_data_disks': n_data,
'draid_spare_disks': n_spare
}],
},
'allow_duplicate_serials': True,
}) as draid:
assert draid['topology']['data'][0]['name'] == f'draid{n_parity}:{n_data}d:{children}c:{n_spare}s-0'
unused_disk_for_update = call('disk.get_unused')
if len(unused_disk_for_update) >= children:
draid_pool_updated = call(
'pool.update', draid['id'], {
'topology': {
'data': [{
'type': f'DRAID{n_parity}',
'disks': [disk['name'] for disk in unused_disk_for_update[:children]],
'draid_data_disks': n_data,
'draid_spare_disks': n_spare
}]
},
'allow_duplicate_serials': True,
}, job=True)
assert len(draid_pool_updated['topology']['data']) == 2
assert draid_pool_updated['topology']['data'][1]['name'] == f'draid{n_parity}:{n_data}d:{children}c' \
f':{n_spare}s-1'
@pytest.mark.parametrize(
'n_data,n_spare,n_parity,minimum_disk', [
(0, 0, 1, 2),
(0, 2, 1, 2),
(0, 0, 2, 3),
(0, 0, 3, 4),
(0, 2, 1, 2),
(0, 2, 2, 3),
]
)
def test_invalid_draid_pool_creation(n_data, n_spare, n_parity, minimum_disk):
unused_disks = call('disk.get_unused')
if len(unused_disks) < 3:
pytest.skip('Insufficient number of disk to perform these test')
children = n_data + n_parity + n_spare
with pytest.raises(ValidationErrors) as ve:
call('pool.create', {
'name': POOL_NAME,
'topology': {
'data': [{
'disks': [disk['name'] for disk in unused_disks[:children]],
'type': f'DRAID{n_parity}',
'draid_data_disks': n_data,
'draid_spare_disks': n_spare,
}],
},
'allow_duplicate_serials': True,
}, job=True)
if n_spare:
assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.type'
assert ve.value.errors[0].errmsg == f'Requested number of dRAID data disks per group {n_data}' \
f' is too high, at most {children - n_spare - n_parity}' \
f' disks are available for data'
else:
assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.disks'
assert ve.value.errors[0].errmsg == f'You need at least {minimum_disk} disk(s) for this vdev type.'
| 3,643 | Python | .py | 86 | 29.930233 | 114 | 0.506768 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,236 | test_alert.py | truenas_middleware/tests/api2/test_alert.py | from time import sleep
import pytest
from auto_config import pool_name
from middlewared.test.integration.utils import call, ssh
ID_PATH = "/dev/disk/by-partuuid/"
def get_alert_by_id(alert_id):
return next(filter(lambda alert: alert["id"] == alert_id, call("alert.list")), None)
def wait_for_alert(timeout=120):
for _ in range(timeout):
for alert in call("alert.list"):
if (
alert["source"] == "VolumeStatus" and
alert["args"]["volume"] == pool_name and
alert["args"]["state"] == "DEGRADED"
):
return alert["id"]
sleep(1)
@pytest.fixture(scope="module")
def degraded_pool_gptid():
get_pool = call("pool.query", [["name", "=", pool_name]], {"get": True})
gptid = get_pool["topology"]["data"][0]["path"].replace(ID_PATH, "")
ssh(f"zinject -d {gptid} -A fault {pool_name}")
return gptid
@pytest.fixture(scope="module")
def alert_id(degraded_pool_gptid):
call("alert.process_alerts")
result = wait_for_alert()
if result is None:
pytest.fail("Timed out while waiting for alert.")
return result
def test_verify_the_pool_is_degraded(degraded_pool_gptid):
status = call("zpool.status", {"name": pool_name})
disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"]
assert disk_status == "DEGRADED"
def test_dismiss_alert(alert_id):
call("alert.dismiss", alert_id)
alert = get_alert_by_id(alert_id)
assert alert["dismissed"] is True, alert
def test_restore_alert(alert_id):
call("alert.restore", alert_id)
alert = get_alert_by_id(alert_id)
assert alert["dismissed"] is False, alert
def test_clear_the_pool_degradation(degraded_pool_gptid):
ssh(f"zpool clear {pool_name}")
status = call("zpool.status", {"name": pool_name})
disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"]
assert disk_status != "DEGRADED"
@pytest.mark.timeout(120)
def test_wait_for_the_alert_to_disappear(alert_id):
call("alert.process_alerts")
while get_alert_by_id(alert_id) is not None:
sleep(1)
| 2,165 | Python | .py | 52 | 35.961538 | 89 | 0.657102 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,237 | test_pool_dataset_encrypted.py | truenas_middleware/tests/api2/test_pool_dataset_encrypted.py | import errno
import pytest
from middlewared.service_exception import CallError, ValidationErrors
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
PASSPHRASE = "12345678"
def encryption_props():
return {
"encryption_options": {"generate_key": False, "passphrase": PASSPHRASE},
"encryption": True,
"inherit_encryption": False
}
def test_delete_locked_dataset():
with dataset("test_delete_locked_dataset", encryption_props()) as ds:
call("pool.dataset.lock", ds, job=True)
with pytest.raises(CallError) as ve:
call("filesystem.stat", f"/mnt/{ds}")
assert ve.value.errno == errno.ENOENT
def test_unencrypted_dataset_within_encrypted_dataset():
with dataset("test_pool_dataset_witin_encryted", encryption_props()) as ds:
with pytest.raises(ValidationErrors) as ve:
call("pool.dataset.create", {
"name": f"{ds}/child",
"encryption": False,
"inherit_encryption": False,
})
assert any(
"Cannot create an unencrypted dataset within an encrypted dataset" in error.errmsg
for error in ve.value.errors
) is True, ve
| 1,265 | Python | .py | 30 | 34.6 | 94 | 0.673486 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,238 | test_account_query_roles.py | truenas_middleware/tests/api2/test_account_query_roles.py | import pytest
from middlewared.test.integration.assets.account import unprivileged_user_client
@pytest.mark.parametrize("role", ["READONLY_ADMIN", "FULL_ADMIN"])
def test_user_role_in_account(role):
with unprivileged_user_client(roles=[role]) as c:
this_user = c.call("user.query", [["username", "=", c.username]], {"get": True})
assert this_user['roles'] == [role]
def test_user_role_full_admin_map():
with unprivileged_user_client(allowlist=[{"method": "*", "resource": "*"}]) as c:
this_user = c.call("user.query", [["username", "=", c.username]], {"get": True})
assert "FULL_ADMIN" in this_user["roles"]
assert "HAS_ALLOW_LIST" in this_user["roles"]
| 709 | Python | .py | 12 | 53.5 | 88 | 0.656522 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,239 | test_smb_groupmap.py | truenas_middleware/tests/api2/test_smb_groupmap.py | import pytest
from middlewared.test.integration.utils import call
from middlewared.test.integration.assets.account import group
BASE_RID_GROUP = 200000
@pytest.mark.parametrize('groupname,expected_memberof,expected_rid', [
('builtin_administrators', 'S-1-5-32-544', 512),
('builtin_guests', 'S-1-5-32-546', 514)
])
def test__local_builtin_accounts(groupname, expected_memberof, expected_rid):
entry = call('group.query', [['group', '=', groupname]], {'get': True})
rid = int(entry['sid'].split('-')[-1])
assert rid == expected_rid
groupmap = call('smb.groupmap_list')
assert str(entry['gid']) in groupmap['local_builtins']
assert groupmap['local_builtins'][str(entry['gid'])]['sid'] == entry['sid']
members = call('smb.groupmap_listmem', expected_memberof)
assert entry['sid'] in members
def test__local_builtin_users_account():
entry = call('group.query', [['group', '=', 'builtin_users']], {'get': True})
rid = int(entry['sid'].split('-')[-1])
assert rid == entry['id'] + BASE_RID_GROUP
members_dom_users = call('smb.groupmap_listmem', 'S-1-5-32-545')
assert entry['sid'] in members_dom_users
def test__new_group():
with group({"name": "group1"}) as g:
# Validate GID is being assigned as expected
assert g['sid'] is not None
rid = int(g['sid'].split('-')[-1])
assert rid == g['id'] + BASE_RID_GROUP
groupmap = call('smb.groupmap_list')
assert groupmap['local'][str(g['gid'])]['sid'] == g['sid']
# Validate that disabling SMB removes SID value from query results
call('group.update', g['id'], {'smb': False})
new = call('group.get_instance', g['id'])
assert new['sid'] is None
# Check for presence in group_mapping.tdb
groupmap = call('smb.groupmap_list')
assert new['gid'] not in groupmap['local']
# Validate that re-enabling restores SID value
call('group.update', g['id'], {'smb': True})
new = call('group.get_instance', g['id'])
assert new['sid'] == g['sid']
groupmap = call('smb.groupmap_list')
assert str(new['gid']) in groupmap['local']
@pytest.mark.parametrize('name,gid,sid', [
('Administrators', 90000001, 'S-1-5-32-544'),
('Users', 90000002, 'S-1-5-32-545'),
('Guests', 90000003, 'S-1-5-32-546')
])
def test__builtins(name, gid, sid):
builtins = call('smb.groupmap_list')['builtins']
assert str(gid) in builtins
| 2,482 | Python | .py | 52 | 41.788462 | 81 | 0.631797 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,240 | test_usage_reporting.py | truenas_middleware/tests/api2/test_usage_reporting.py | import pytest
from itertools import chain
from middlewared.test.integration.assets.nfs import nfs_server
from middlewared.test.integration.assets.ftp import ftp_server
from middlewared.test.integration.assets.pool import dataset as nfs_dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
from protocols import ftp_connection, SSH_NFS, nfs_share
from auto_config import password, pool_name, user
class GatherTypes:
expected = {
'total_capacity': ['total_capacity'],
'backup_data': ['data_backup_stats', 'data_without_backup_size'],
'applications': ['apps', 'catalog_items', 'docker_images'],
'filesystem_usage': ['datasets', 'zvols'],
'ha_stats': ['ha_licensed'],
'directory_service_stats': ['directory_services'],
'cloud_services': ['cloud_services'],
'hardware': ['hardware'],
'network': ['network'],
'system_version': ['platform', 'version'],
'system': ['system_hash', 'usage_version', 'system'],
'pools': ['pools', 'total_raw_capacity'],
'services': ['services'],
'nfs': ['NFS'],
'ftp': ['FTP'],
'sharing': ['shares'],
'vms': ['vms'],
'nspawn_containers': ['nspawn_containers'],
# Add new gather type here
}
@pytest.fixture(scope="module")
def get_usage_sample():
sample = call('usage.gather')
yield sample
def test_gather_types(get_usage_sample):
""" Confirm we find the expected types. Fail if this test needs updating """
sample = get_usage_sample
expected = list(chain.from_iterable(GatherTypes.expected.values()))
# If there is a mismatch it probably means this test module needs to be updated
assert set(expected).symmetric_difference(sample) == set(), "Expected empty set. "\
f"Missing an entry in the output ({len(sample)} entries) or test needs updating ({len(expected)} entries)"
def test_nfs_reporting(get_usage_sample):
""" Confirm we are correctly reporting the number of connections """
# Initial state should have NFSv[3,4] and no connections
assert set(get_usage_sample['NFS']['enabled_protocols']) == set(["NFSV3", "NFSV4"])
assert get_usage_sample['NFS']['num_clients'] == 0
# Establish two connections
nfs_path = f'/mnt/{pool_name}/test_nfs'
with nfs_dataset("test_nfs"):
with nfs_share(nfs_path):
with nfs_server():
with SSH_NFS(truenas_server.ip, nfs_path,
user=user, password=password, ip=truenas_server.ip):
usage_sample = call('usage.gather')
assert usage_sample['NFS']['num_clients'] == 1
def test_ftp_reporting(get_usage_sample):
""" Confirm we are correctly reporting the number of connections """
# Initial state should have no connections
assert get_usage_sample['FTP']['num_connections'] == 0
# Establish two connections
with ftp_server():
with ftp_connection(truenas_server.ip):
with ftp_connection(truenas_server.ip):
usage_sample = call('usage.gather')
assert usage_sample['FTP']['num_connections'] == 2
# Possible TODO: Add validation of the entries
| 3,282 | Python | .py | 67 | 41.761194 | 114 | 0.658331 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,241 | test_group_utils.py | truenas_middleware/tests/api2/test_group_utils.py | from middlewared.test.integration.utils import call
from middlewared.test.integration.assets.account import group, user
def test_root_password_disabled():
with group({"name": "group1"}) as g1:
with group({"name": "group2"}) as g2:
with user({
"username": "test",
"full_name": "Test",
"group_create": True,
"groups": [g1["id"], g2["id"]],
"password": "test1234",
}) as u:
result = call("group.get_password_enabled_users", [g1["gid"], g2["gid"]], [])
assert len(result) == 1
assert result[0]["id"] == u["id"]
| 673 | Python | .py | 15 | 32.8 | 93 | 0.515244 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,242 | test_client_job.py | truenas_middleware/tests/api2/test_client_job.py | import pprint
import time
import pytest
from middlewared.test.integration.utils import client, mock
# FIXME: Sometimes an equal message for `SUCCESS` state is being sent (or received) twice, we were not able
# to understand why and this does not break anything so we are not willing to waste our time investigating
# this.
# Also, `RUNNING` message sometimes is not received, this does not have a logical explanation as well and is not
# repeatable.
@pytest.mark.flaky(reruns=5, reruns_delay=5)
def test_client_job_callback():
with mock("test.test1", """
from middlewared.service import job
@job()
def mock(self, job, *args):
import time
time.sleep(2)
return 42
"""):
with client() as c:
results = []
c.call("test.test1", job=True, callback=lambda job: results.append(job.copy()))
# callback is called in a separate thread, allow it to settle
time.sleep(2)
assert len(results) == 2, pprint.pformat(results, indent=2)
assert results[0]['state'] == 'RUNNING'
assert results[1]['state'] == 'SUCCESS'
assert results[1]['result'] == 42
| 1,215 | Python | .py | 28 | 35.821429 | 112 | 0.648007 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,243 | test_account_shell_choices.py | truenas_middleware/tests/api2/test_account_shell_choices.py | import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.account import group, user
from middlewared.test.integration.utils import call
def test_shell_choices_has_no_privileges():
with group({
"name": "test_no_privileges",
}) as g:
assert "/usr/bin/cli" not in call("user.shell_choices", [g["id"]])
def test_shell_choices_has_privileges():
with group({
"name": "test_has_privileges",
}) as g:
privilege = call("privilege.create", {
"name": "Test",
"local_groups": [g["gid"]],
"ds_groups": [],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
})
try:
assert "/usr/bin/cli" in call("user.shell_choices", [g["id"]])
finally:
call("privilege.delete", privilege["id"])
@pytest.mark.parametrize("group_payload", [
lambda g: {"group": g["id"]},
lambda g: {"group_create": True, "groups": [g["id"]]},
])
def test_cant_create_user_with_cli_shell_without_privileges(group_payload):
with group({
"name": "test_no_privileges",
}) as g:
with pytest.raises(ValidationErrors) as ve:
with user({
"username": "test",
"full_name": "Test",
"home": f"/nonexistent",
"password": "test1234",
"shell": "/usr/bin/cli",
**group_payload(g),
}):
pass
assert ve.value.errors[0].attribute == "user_create.shell"
@pytest.mark.parametrize("group_payload", [
lambda g: {"group": g["id"]},
lambda g: {"group_create": True, "groups": [g["id"]]},
])
def test_can_create_user_with_cli_shell_with_privileges(group_payload):
with group({
"name": "test_no_privileges",
}) as g:
privilege = call("privilege.create", {
"name": "Test",
"local_groups": [g["gid"]],
"ds_groups": [],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
})
try:
with user({
"username": "test",
"full_name": "Test",
"home": f"/nonexistent",
"password": "test1234",
"shell": "/usr/bin/cli",
**group_payload(g),
}):
pass
finally:
call("privilege.delete", privilege["id"])
@pytest.mark.parametrize("group_payload", [
lambda g: {"group": g["id"]},
lambda g: {"groups": [g["id"]]},
])
def test_cant_update_user_with_cli_shell_without_privileges(group_payload):
with group({
"name": "test_no_privileges",
}) as g:
with user({
"username": "test",
"full_name": "Test",
"home": f"/nonexistent",
"password": "test1234",
"group_create": True,
}) as u:
with pytest.raises(ValidationErrors) as ve:
call("user.update", u["id"], {
"shell": "/usr/bin/cli",
**group_payload(g),
})
assert ve.value.errors[0].attribute == "user_update.shell"
@pytest.mark.parametrize("group_payload", [
lambda g: {"group": g["id"]},
lambda g: {"groups": [g["id"]]},
])
def test_can_update_user_with_cli_shell_with_privileges(group_payload):
with group({
"name": "test_no_privileges",
}) as g:
privilege = call("privilege.create", {
"name": "Test",
"local_groups": [g["gid"]],
"ds_groups": [],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
})
try:
with user({
"username": "test",
"full_name": "Test",
"home": f"/nonexistent",
"password": "test1234",
"group_create": True,
}) as u:
call("user.update", u["id"], {
"shell": "/usr/bin/cli",
**group_payload(g),
})
finally:
call("privilege.delete", privilege["id"])
| 4,277 | Python | .py | 120 | 25.383333 | 75 | 0.50338 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,244 | test_snapshot_task.py | truenas_middleware/tests/api2/test_snapshot_task.py | import pytest
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.snapshot_task import snapshot_task
from middlewared.test.integration.utils import call
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
def test_snapshot_task_is_not_deleted_when_deleting_a_child_dataset():
with dataset("parent") as parent:
with dataset("parent/child") as child:
with snapshot_task({
"dataset": parent,
"recursive": True,
"lifetime_value": 1,
"lifetime_unit": "DAY",
"naming_schema": "%Y%m%d%H%M",
}) as t:
call("pool.dataset.delete", child)
assert call("pool.snapshottask.get_instance", t["id"])
def test_snapshot_task_is_deleted_when_deleting_a_parent_dataset():
with dataset("parent") as parent:
with dataset("parent/child") as child:
with snapshot_task({
"dataset": child,
"recursive": True,
"lifetime_value": 1,
"lifetime_unit": "DAY",
"naming_schema": "%Y%m%d%H%M",
}) as t:
call("pool.dataset.delete", parent, {"recursive": True})
with pytest.raises(InstanceNotFound):
assert call("pool.snapshottask.get_instance", t["id"])
| 1,470 | Python | .py | 34 | 32.705882 | 75 | 0.603641 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,245 | test_system_settings_roles.py | truenas_middleware/tests/api2/test_system_settings_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize('role,endpoint,payload,should_work,valid_role_exception,is_return_type_none', [
('SYSTEM_GENERAL_READ', 'system.general.config', [], True, False, False),
('READONLY_ADMIN', 'system.general.update', [{}], False, False, False),
('SYSTEM_GENERAL_WRITE', 'system.general.update', [{}], True, False, False),
('SYSTEM_ADVANCED_READ', 'system.advanced.config', [], True, False, False),
('READONLY_ADMIN', 'system.advanced.update', [{}], False, False, False),
('SYSTEM_ADVANCED_WRITE', 'system.advanced.update', [{}], True, False, False),
('SYSTEM_ADVANCED_READ', 'system.advanced.sed_global_password', [], True, False, False),
('READONLY_ADMIN', 'system.advanced.update_gpu_pci_ids', [None], False, True, False),
('SYSTEM_ADVANCED_WRITE', 'system.advanced.update_gpu_pci_ids', [None], True, True, True),
('SYSTEM_GENERAL_READ', 'system.general.local_url', [], True, False, False),
])
def test_system_settings_read_and_write_role(
unprivileged_user_fixture, role, endpoint, payload, should_work, valid_role_exception, is_return_type_none
):
common_checks(
unprivileged_user_fixture, endpoint, role, should_work, is_return_type_none=is_return_type_none,
valid_role_exception=valid_role_exception, method_args=payload
)
| 1,388 | Python | .py | 21 | 61.714286 | 110 | 0.703812 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,246 | test_audit_basic.py | truenas_middleware/tests/api2/test_audit_basic.py | from middlewared.service_exception import ValidationError, CallError
from middlewared.test.integration.assets.account import user, unprivileged_user_client
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, url
from middlewared.test.integration.utils.audit import get_audit_entry
from auto_config import ha
from protocols import smb_connection
from time import sleep
import os
import pytest
import requests
import secrets
import string
SMBUSER = 'audit-smb-user'
PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
AUDIT_DATASET_CONFIG = {
# keyname : "audit"=audit only setting, "zfs"=zfs dataset setting, "ro"=read-only (not a setting)
'retention': 'audit',
'quota': 'zfs',
'reservation': 'zfs',
'quota_fill_warning': 'zfs',
'quota_fill_critical': 'zfs',
'remote_logging_enabled': 'other',
'space': 'ro'
}
MiB = 1024**2
GiB = 1024**3
# =====================================================================
# Fixtures and utilities
# =====================================================================
class AUDIT_CONFIG():
defaults = {
'retention': 7,
'quota': 0,
'reservation': 0,
'quota_fill_warning': 75,
'quota_fill_critical': 95
}
def get_zfs(data_type, key, zfs_config):
""" Get the equivalent ZFS value associated with the audit config setting """
types = {
'zfs': {
'reservation': zfs_config['properties']['refreservation']['parsed'] or 0,
'quota': zfs_config['properties']['refquota']['parsed'] or 0, # audit quota == ZFS refquota
'refquota': zfs_config['properties']['refquota']['parsed'] or 0,
'quota_fill_warning': zfs_config['org.freenas:quota_warning'],
'quota_fill_critical': zfs_config['org.freenas:quota_critical']
},
'space': {
'used': zfs_config['properties']['used']['parsed'],
'used_by_snapshots': zfs_config['properties']['usedbysnapshots']['parsed'],
'available': zfs_config['properties']['available']['parsed'],
'used_by_dataset': zfs_config['properties']['usedbydataset']['parsed'],
# We set 'refreservation' and there is no 'usedbyreservation'
'used_by_reservation': zfs_config['properties']['usedbyrefreservation']['parsed']
}
}
return types[data_type][key]
def check_audit_download(report_path, report_type, tag=None):
""" Download audit DB (root user)
If requested, assert the tag is present
INPUT: report_type ['CSV'|'JSON'|'YAML']
RETURN: lenght of content (bytes)
"""
job_id, url_path = call(
"core.download", "audit.download_report",
[{"report_name": os.path.basename(report_path)}],
f"report.{report_type.lower()}"
)
r = requests.get(f"{url()}{url_path}")
r.raise_for_status()
if tag is not None:
assert f"{tag}" in r.text
return len(r.content)
@pytest.fixture(scope='class')
def initialize_for_smb_tests():
with dataset('audit-test-basic', data={'share_type': 'SMB'}) as ds:
with smb_share(os.path.join('/mnt', ds), 'AUDIT_BASIC_TEST', {
'purpose': 'NO_PRESET',
'guestok': False,
'audit': {'enable': True}
}) as s:
with user({
'username': SMBUSER,
'full_name': SMBUSER,
'group_create': True,
'password': PASSWD,
'smb': True
}) as u:
yield {'dataset': ds, 'share': s, 'user': u}
@pytest.fixture(scope='class')
def init_audit():
""" Provides the audit and dataset configs and cleans up afterward """
try:
dataset = call('audit.get_audit_dataset')
config = call('audit.config')
yield (config, dataset)
finally:
call('audit.update', AUDIT_CONFIG.defaults)
@pytest.fixture(scope='class')
def standby_audit_event():
""" HA system: Create an audit event on the standby node
Attempt to delete a built-in user on the standby node
"""
event = "user.delete"
username = "backup"
user = call('user.query', [["username", "=", username]], {"select": ["id"], "get": True})
# Generate an audit entry on the remote node
with pytest.raises(CallError):
call('failover.call_remote', event, [user['id']])
yield {"event": event, "username": username}
# =====================================================================
# Tests
# =====================================================================
class TestAuditConfig:
def test_audit_config_defaults(self, init_audit):
(config, dataset) = init_audit
# Confirm existence of config entries
for key in [k for k in AUDIT_DATASET_CONFIG]:
assert key in config, str(config)
# Confirm audit default config settings
assert config['retention'] == AUDIT_CONFIG.defaults['retention']
assert config['quota'] == AUDIT_CONFIG.defaults['quota']
assert config['reservation'] == AUDIT_CONFIG.defaults['reservation']
assert config['quota_fill_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning']
assert config['quota_fill_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical']
assert config['remote_logging_enabled'] is False
for key in ['used', 'used_by_snapshots', 'used_by_dataset', 'used_by_reservation', 'available']:
assert key in config['space'], str(config['space'])
for service in ['MIDDLEWARE', 'SMB', 'SUDO']:
assert service in config['enabled_services']
# Confirm audit dataset settings
for key in [k for k in AUDIT_DATASET_CONFIG if AUDIT_DATASET_CONFIG[k] == 'zfs']:
assert get_zfs('zfs', key, dataset) == config[key], f"config[{key}] = {config[key]}"
def test_audit_config_dataset_defaults(self, init_audit):
""" Confirm Audit dataset uses Audit default settings """
(unused, ds_config) = init_audit
assert ds_config['org.freenas:refquota_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning']
assert ds_config['org.freenas:refquota_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical']
def test_audit_config_updates(self):
"""
This test validates that setting values has expected results.
"""
new_config = call('audit.update', {'retention': 10})
assert new_config['retention'] == 10
# quota are in units of GiB
new_config = call('audit.update', {'quota': 1})
assert new_config['quota'] == 1
audit_dataset = call('audit.get_audit_dataset')
# ZFS value is in units of bytes. Convert to GiB for comparison.
assert get_zfs('zfs', 'refquota', audit_dataset) // GiB == new_config['quota']
# Confirm ZFS and audit config are in sync
assert new_config['space']['available'] == get_zfs('space', 'available', audit_dataset)
assert new_config['space']['used_by_dataset'] == get_zfs('space', 'used', audit_dataset)
# Check that we're actually setting the quota by evaluating available space
# Change the the quota to something more interesting
new_config = call('audit.update', {'quota': 2})
assert new_config['quota'] == 2
audit_dataset = call('audit.get_audit_dataset')
assert get_zfs('zfs', 'refquota', audit_dataset) == 2*GiB # noqa (allow 2*GiB)
used_in_dataset = get_zfs('space', 'used_by_dataset', audit_dataset)
assert 2*GiB - new_config['space']['available'] == used_in_dataset # noqa (allow 2*GiB)
new_config = call('audit.update', {'reservation': 1})
assert new_config['reservation'] == 1
assert new_config['space']['used_by_reservation'] != 0
new_config = call('audit.update', {
'quota_fill_warning': 70,
'quota_fill_critical': 80
})
assert new_config['quota_fill_warning'] == 70
assert new_config['quota_fill_critical'] == 80
# Test disable reservation
new_config = call('audit.update', {'reservation': 0})
assert new_config['reservation'] == 0
# Test disable quota
new_config = call('audit.update', {'quota': 0})
assert new_config['quota'] == 0
class TestAuditOps:
def test_audit_query(self, initialize_for_smb_tests):
# If this test has been run more than once on this VM, then
# the audit DB _will_ record the creation.
# Let's get the starting count.
initial_ops_count = call('audit.query', {
'services': ['SMB'],
'query-filters': [['username', '=', SMBUSER]],
'query-options': {'count': True}
})
share = initialize_for_smb_tests['share']
with smb_connection(
share=share['name'],
username=SMBUSER,
password=PASSWD,
) as c:
fd = c.create_file('testfile.txt', 'w')
for i in range(0, 3):
c.write(fd, b'foo')
c.read(fd, 0, 3)
c.close(fd, True)
retries = 2
ops_count = initial_ops_count
while retries > 0 and (ops_count - initial_ops_count) <= 0:
sleep(5)
ops_count = call('audit.query', {
'services': ['SMB'],
'query-filters': [['username', '=', SMBUSER]],
'query-options': {'count': True}
})
retries -= 1
assert ops_count > initial_ops_count, f"retries remaining = {retries}"
def test_audit_order_by(self):
entries_forward = call('audit.query', {'services': ['SMB'], 'query-options': {
'order_by': ['audit_id']
}})
entries_reverse = call('audit.query', {'services': ['SMB'], 'query-options': {
'order_by': ['-audit_id']
}})
head_forward_id = entries_forward[0]['audit_id']
tail_forward_id = entries_forward[-1]['audit_id']
head_reverse_id = entries_reverse[0]['audit_id']
tail_reverse_id = entries_reverse[-1]['audit_id']
assert head_forward_id == tail_reverse_id
assert tail_forward_id == head_reverse_id
def test_audit_export(self):
for backend in ['CSV', 'JSON', 'YAML']:
report_path = call('audit.export', {'export_format': backend}, job=True)
assert report_path.startswith('/audit/reports/root/')
st = call('filesystem.stat', report_path)
assert st['size'] != 0, str(st)
content_len = check_audit_download(report_path, backend)
assert content_len == st['size']
def test_audit_export_nonroot(self):
with unprivileged_user_client(roles=['SYSTEM_AUDIT_READ', 'FILESYSTEM_ATTRS_READ']) as c:
me = c.call('auth.me')
username = me['pw_name']
for backend in ['CSV', 'JSON', 'YAML']:
report_path = c.call('audit.export', {'export_format': backend}, job=True)
assert report_path.startswith(f'/audit/reports/{username}/')
st = c.call('filesystem.stat', report_path)
assert st['size'] != 0, str(st)
# Make the call as the client
job_id, path = c.call(
"core.download", "audit.download_report",
[{"report_name": os.path.basename(report_path)}],
f"report.{backend.lower()}"
)
r = requests.get(f"{url()}{path}")
r.raise_for_status()
assert len(r.content) == st['size']
@pytest.mark.parametrize('svc', ["MIDDLEWARE", "SMB"])
def test_audit_timestamps(self, svc):
"""
NAS-130373
Confirm the timestamps are processed as expected
"""
audit_entry = get_audit_entry(svc)
ae_ts_ts = int(audit_entry['timestamp'].timestamp())
ae_msg_ts = int(audit_entry['message_timestamp'])
assert abs(ae_ts_ts - ae_msg_ts) < 2, f"$date='{ae_ts_ts}, message_timestamp={ae_msg_ts}"
@pytest.mark.skipif(not ha, reason="Skip HA tests")
class TestAuditOpsHA:
@pytest.mark.parametrize('remote_available', [True, False])
def test_audit_ha_query(self, standby_audit_event, remote_available):
'''
Confirm:
1) Ability to get a remote node audit event from a healthy remote node
2) Generate an exception on remote node audit event get if the remote node is unavailable.
NOTE: The standby_audit_event fixture generates the remote node audit event.
'''
event = standby_audit_event['event']
username = standby_audit_event['username']
payload = {
"query-filters": [["event_data.method", "=", event], ["success", "=", False]],
"query-options": {"select": ["event_data", "success"]},
"remote_controller": True
}
job_id = None
if not remote_available:
job_id = call('failover.reboot.other_node')
# Let the reboot get churning
sleep(2)
with pytest.raises(ValidationError) as e:
call('audit.query', payload)
assert "failed to communicate" in str(e.value)
# Wait for the remote to return
assert call("core.job_wait", job_id, job=True)
else:
# Handle delays in the audit database
remote_audit_entry = []
tries = 3
while tries > 0 and remote_audit_entry == []:
sleep(1)
remote_audit_entry = call('audit.query', payload)
if remote_audit_entry != []:
break
tries -= 1
assert tries > 0, "Failed to get expected audit entry"
assert remote_audit_entry != []
description = remote_audit_entry[0]['event_data']['description']
assert username in description, remote_audit_entry[0]['event_data']
def test_audit_ha_export(self, standby_audit_event):
"""
Confirm we can download 'Active' and 'Standby' audit DB.
With a failed user delete on the 'Standby' controller download the
audit DB from both controllers and confirm the failure is
in the 'Standby' audit DB and not in the 'Active' audit DB.
"""
assert standby_audit_event
username = standby_audit_event['username']
report_path_active = call('audit.export', {'export_format': 'CSV'}, job=True)
report_path_standby = call('audit.export', {'export_format': 'CSV', 'remote_controller': True}, job=True)
# Confirm entry NOT in active controller audit DB
with pytest.raises(AssertionError):
check_audit_download(report_path_active, 'CSV', f"Delete user {username}")
# Confirm entry IS in standby controller audit DB
check_audit_download(report_path_standby, 'CSV', f"Delete user {username}")
| 15,178 | Python | .py | 316 | 38.803797 | 113 | 0.590123 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,247 | test_rest_api_authentication.py | truenas_middleware/tests/api2/test_rest_api_authentication.py | # -*- coding=utf-8 -*-
import contextlib
import io
import json
import pytest
import requests
from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template
from middlewared.test.integration.assets.api_key import api_key
from middlewared.test.integration.utils import client
from middlewared.test.integration.utils.client import truenas_server
import os
import sys
sys.path.append(os.getcwd())
from functions import GET
@contextlib.contextmanager
def api_key_auth(allowlist):
with unprivileged_user_template(
username="unprivileged2",
group_name="unprivileged_users2",
privilege_name="Unprivileged users",
allowlist=allowlist,
web_shell=False,
) as t:
with api_key(t.username) as key:
yield dict(anonymous=True, headers={"Authorization": f"Bearer {key}"})
@contextlib.contextmanager
def login_password_auth(allowlist):
with unprivileged_user_template(
username="unprivileged",
group_name="unprivileged_users",
privilege_name="Unprivileged users",
allowlist=allowlist,
web_shell=False,
) as t:
yield dict(auth=(t.username, t.password))
@contextlib.contextmanager
def token_auth(allowlist):
with unprivileged_user_template(
username="unprivileged",
group_name="unprivileged_users",
privilege_name="Unprivileged users",
allowlist=allowlist,
web_shell=False,
) as t:
with client(auth=(t.username, t.password)) as c:
token = c.call("auth.generate_token", 300, {}, True)
yield dict(anonymous=True, headers={"Authorization": f"Token {token}"})
@pytest.fixture(params=[api_key_auth, login_password_auth, token_auth])
def auth(request):
return request.param
def test_root_api_key_rest(auth):
"""We should be able to call a method with a root credential using REST API."""
with auth([{"method": "*", "resource": "*"}]) as kwargs:
results = GET('/system/info/', **kwargs)
assert results.status_code == 200, results.text
def test_allowed_api_key_rest_plain(auth):
"""We should be able to request an endpoint with a credential that allows that request using REST API."""
with auth([{"method": "GET", "resource": "/system/info/"}]) as kwargs:
results = GET('/system/info/', **kwargs)
assert results.status_code == 200, results.text
def test_allowed_api_key_rest_dynamic(auth):
"""We should be able to request a dynamic endpoint with a credential that allows that request using REST API."""
with auth([{"method": "GET", "resource": "/user/id/{id_}/"}]) as kwargs:
results = GET('/user/id/1/', **kwargs)
assert results.status_code == 200, results.text
def test_denied_api_key_rest(auth):
"""
We should not be able to request an endpoint with a credential that does not allow that request using REST API.
"""
with auth([{"method": "GET", "resource": "/system/info_/"}]) as kwargs:
results = GET('/system/info/', **kwargs)
assert results.status_code == 403
def test_root_api_key_upload(auth):
"""We should be able to call a method with root a credential using file upload endpoint."""
ip = truenas_server.ip
with auth([{"method": "*", "resource": "*"}]) as kwargs:
kwargs.pop("anonymous", None) # This key is only used for our test requests library
r = requests.post(
f"http://{ip}/_upload",
**kwargs,
data={
"data": json.dumps({
"method": "filesystem.put",
"params": ["/tmp/upload"],
})
},
files={
"file": io.BytesIO(b"test"),
},
timeout=10
)
r.raise_for_status()
def test_allowed_api_key_upload(auth):
"""We should be able to call a method with an API that allows that call using file upload endpoint."""
ip = truenas_server.ip
with auth([{"method": "CALL", "resource": "filesystem.put"}]) as kwargs:
kwargs.pop("anonymous", None) # This key is only used for our test requests library
r = requests.post(
f"http://{ip}/_upload",
**kwargs,
data={
"data": json.dumps({
"method": "filesystem.put",
"params": ["/tmp/upload"],
})
},
files={
"file": io.BytesIO(b"test"),
},
timeout=10
)
r.raise_for_status()
def test_denied_api_key_upload(auth):
"""
We should not be able to call a method with a credential that does not allow that call using file upload endpoint.
"""
ip = truenas_server.ip
with auth([{"method": "CALL", "resource": "filesystem.put_"}]) as kwargs:
kwargs.pop("anonymous", None) # This key is only used for our test requests library
r = requests.post(
f"http://{ip}/_upload",
**kwargs,
data={
"data": json.dumps({
"method": "filesystem.put",
"params": ["/tmp/upload"],
})
},
files={
"file": io.BytesIO(b"test"),
},
timeout=10
)
assert r.status_code == 403
| 5,386 | Python | .py | 134 | 31.783582 | 118 | 0.607423 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,248 | test_audit_pool.py | truenas_middleware/tests/api2/test_audit_pool.py | import pytest
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_log
def test_pool_update_audit_success():
with another_pool() as pool:
params = [pool['id'], {'autotrim': 'ON'}]
with expect_audit_log([{
'event_data': {
'authenticated': True,
'authorized': True,
'method': 'pool.update',
'params': params,
'description': f'Pool update test',
},
'success': True,
}]):
call('pool.update', *params, job=True)
def test_pool_update_audit_error():
with another_pool() as pool:
params = [pool['id'], {'topology': {'spares': ['nonexistent']}}]
with expect_audit_log([{
'event_data': {
'authenticated': True,
'authorized': True,
'method': 'pool.update',
'params': params,
'description': f'Pool update test',
},
'success': False,
}]):
with pytest.raises(Exception):
call('pool.update', *params, job=True)
| 1,261 | Python | .py | 33 | 27.090909 | 72 | 0.537643 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,249 | test_pool_export.py | truenas_middleware/tests/api2/test_pool_export.py | import pytest
from truenas_api_client import ClientException
from middlewared.test.integration.assets.pool import another_pool, pool as pool_name
from middlewared.test.integration.utils import call, disable_failover, mock
def test_systemdataset_migrate_error():
"""
On HA this test will fail with the error below if failover is enabled:
[ENOTSUP] Disable failover before exporting last pool on system.
"""
with disable_failover():
pool = call("pool.query", [["name", "=", pool_name]], {"get": True})
with mock("systemdataset.update", """\
from middlewared.service import job, CallError
@job()
def mock(self, job, *args):
raise CallError("Test error")
"""):
with pytest.raises(ClientException) as e:
call("pool.export", pool["id"], job=True)
assert e.value.error == (
"[EFAULT] This pool contains system dataset, but its reconfiguration failed: [EFAULT] Test error"
)
def test_destroy_offline_disks():
with another_pool(topology=(2, lambda disks: {
"data": [
{"type": "MIRROR", "disks": disks[0:2]},
],
})) as pool:
disk = pool["topology"]["data"][0]["children"][0]
call("pool.offline", pool["id"], {"label": disk["guid"]})
call("pool.export", pool["id"], {"destroy": True}, job=True)
unused = [unused for unused in call("disk.get_unused") if unused["name"] == disk["disk"]][0]
assert unused["exported_zpool"] is None
| 1,570 | Python | .py | 33 | 38.909091 | 113 | 0.614829 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,250 | test_nfs_share_crud_roles.py | truenas_middleware/tests/api2/test_nfs_share_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "sharing.nfs.query", role, True, valid_role_exception=False)
common_checks(unprivileged_user_fixture, "nfs.client_count", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, False)
common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, False)
common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, False)
common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, False)
common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_NFS_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, True)
common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, True)
common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, True)
common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, True, valid_role_exception=False)
common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, True, valid_role_exception=False)
common_checks(
unprivileged_user_fixture, "service.start", role, True, method_args=["nfs"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.restart", role, True, method_args=["nfs"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.reload", role, True, method_args=["nfs"], valid_role_exception=False
)
common_checks(
unprivileged_user_fixture, "service.stop", role, True, method_args=["nfs"], valid_role_exception=False
)
| 2,114 | Python | .py | 32 | 61.34375 | 113 | 0.74506 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,251 | test_002_system_license.py | truenas_middleware/tests/api2/test_002_system_license.py | import os
import time
import pytest
from auto_config import ha, ha_license
from middlewared.test.integration.utils import call
@pytest.mark.skipif(not ha, reason='Test only valid for HA')
def test_apply_and_verify_license():
if ha_license:
_license_string = ha_license
else:
with open(os.environ.get('license_file', '/root/license.txt')) as f:
_license_string = f.read()
# apply license
call('system.license_update', _license_string)
# verify license is applied
assert call('failover.licensed') is True
retries = 30
sleep_time = 1
for i in range(retries):
if call('failover.call_remote', 'failover.licensed') is False:
# we call a hook that runs in a background task
# so give it a bit to propagate to other controller
time.sleep(sleep_time)
else:
break
else:
assert False, f'Timed out after {sleep_time * retries}s waiting on license to sync to standby'
| 1,002 | Python | .py | 27 | 30.666667 | 102 | 0.669421 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,252 | test_openssl.py | truenas_middleware/tests/api2/test_openssl.py | import pytest
from middlewared.test.integration.utils import call, ssh
from auto_config import ha
retry = 5
fips_version = "3.0.9"
# Sometimes this test fails because the testing environment has broken failover (randomly. Fun transient error. Reports a failed heartbeat).
@pytest.mark.flaky(reruns=retry, reruns_delay=5)
@pytest.mark.skipif(not ha, reason='Test only valid for HA')
def test_fips_version():
# The reason we have a set of commands in a payload is because of some annoying FIPS technicalities.
# Basically, when FIPS is enabled, we can't use SSH because the SSH key used by root isn't using a FIPS provided algorithm. (this might need to be investigated further)
# To allow testing, we write our FIPS information to a file during this phase, and then go disable FIPS to get SSH back all in one joint command.
payload = """midclt call --job system.security.update '{"enable_fips": true}' && openssl list -providers > /root/osslproviders && midclt call system.reboot.info >> /root/osslproviders && midclt call --job system.security.update '{"enable_fips": false}'"""
ssh(payload, complete_response=True, timeout=300)
# Check that things are what we expect when FIPS was enabled
enabled_info = ssh("cat /root/osslproviders")
assert fips_version in enabled_info
assert "FIPS configuration was changed." in enabled_info
# Check that we no longer have FIPS enabled
assert fips_version not in ssh("openssl list -providers")
assert call("system.reboot.info")["reboot_required_reasons"] == []
| 1,553 | Python | .py | 21 | 70.333333 | 259 | 0.752787 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,253 | test_070_alertservice.py | truenas_middleware/tests/api2/test_070_alertservice.py | from middlewared.test.integration.utils import call
def test_alert_gets():
call("alertservice.query")
def test_alertservice():
data = ["name", "type", "attributes", "level", "enabled"]
# create
payload = {
"name": "Critical Email Test",
"type": "Mail",
"attributes": {
"email": "eric.spam@ixsystems.com"
},
"level": "CRITICAL",
"enabled": True
}
results = call("alertservice.create", payload)
for key in data:
assert results[key] == payload[key]
alertservice_id = results['id']
# update
payload = {
"name": "Warning Email Test",
"type": "Mail",
"attributes": {
"email": "william.spam@ixsystems.com@"
},
"level": "WARNING",
"enabled": False
}
results = call(f"alertservice.update", alertservice_id, payload)
for key in data:
assert results[key] == payload[key]
# delete
call("alertservice.delete", alertservice_id)
assert call("alertservice.query", [["id", "=", alertservice_id]]) == []
| 1,094 | Python | .py | 35 | 24.314286 | 75 | 0.5804 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,254 | test_filesystem__file_tail_follow.py | truenas_middleware/tests/api2/test_filesystem__file_tail_follow.py | import time
import pytest
from middlewared.test.integration.utils import client, ssh
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
@pytest.mark.flaky(reruns=5, reruns_delay=5)
def test_filesystem__file_tail_follow__grouping():
ssh("echo > /tmp/file_tail_follow.txt")
with client() as c:
received = []
def append(type, **kwargs):
received.append((time.monotonic(), kwargs["fields"]["data"]))
c.subscribe("filesystem.file_tail_follow:/tmp/file_tail_follow.txt", append)
ssh("for i in `seq 1 200`; do echo test >> /tmp/file_tail_follow.txt; sleep 0.01; done")
# Settle down things
time.sleep(1)
received = received[1:] # Initial file contents
# We were sending this for 2-3 seconds, so we should have received 4-6 blocks with 0.5 sec interval
assert 4 <= len(received) <= 6, str(received)
# All blocks should have been received uniformly in time
assert all(0.4 <= b2[0] - b1[0] <= 1.0 for b1, b2 in zip(received[:-1], received[1:])), str(received)
# All blocks should contain more or less same amount of data
assert all(len(block[1].split("\n")) <= 60 for block in received[:-1]), str(received)
# One single send
ssh("echo finish >> /tmp/file_tail_follow.txt")
time.sleep(1)
assert received[-1][1] == "finish\n"
| 1,405 | Python | .py | 29 | 41.62069 | 109 | 0.650037 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,255 | test_mail_admins.py | truenas_middleware/tests/api2/test_mail_admins.py | import pytest
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.utils import call
MAILUSER = 'wilbur'
MAILADDR = 'wilbur.spam@ixsystems.com'
NONMAIL_USER = 'wilburette'
NONMAIL_ADDR = 'wilburette.spam@ixsystems.com'
PASSWD = 'abcd1234'
@pytest.fixture(scope='module')
def full_admin_user():
ba_id = call('group.query', [['gid', '=', 544]], {'get': True})['id']
with user({
'username': NONMAIL_USER,
'full_name': NONMAIL_USER,
'group_create': True,
'email': NONMAIL_ADDR,
'password': PASSWD
}, get_instance=False):
with user({
'username': MAILUSER,
'full_name': MAILUSER,
'group_create': False,
'email': MAILADDR,
'group': ba_id,
'password': PASSWD
}, get_instance=True) as u:
yield u
def test_mail_administrators(full_admin_user):
emails = call('mail.local_administrators_emails')
assert MAILADDR in emails
assert NONMAIL_ADDR not in emails
| 1,056 | Python | .py | 31 | 27.580645 | 73 | 0.636899 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,256 | test_disk_stats.py | truenas_middleware/tests/api2/test_disk_stats.py | import os
import pytest
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
def get_test_file_path(pool_name: str) -> str:
return os.path.join('/mnt', pool_name, 'test_file')
@pytest.fixture(scope='module')
def disk_pool():
with another_pool() as pool:
call('pool.dataset.update', pool['name'], {'sync': 'ALWAYS'})
pool_disks = call('disk.query', [['pool', '=', pool['name']]], {'extra': {'pools': True}})
assert len(pool_disks) == 1, f'Expected 1 disk in pool {pool["name"]}, got {len(pool_disks)}'
yield pool['name'], pool_disks[0]
def test_disk_write_stats(disk_pool):
pool_name, pool_disk = disk_pool
disk_identifier = pool_disk['identifier']
disk_stats_before_write = call('netdata.get_disk_stats')[disk_identifier]
test_file_path = get_test_file_path(pool_name)
# Amount of data to write
num_of_mb = 100
data_size = num_of_mb * 1024 * 1024 # 100 MB
ssh(f'dd if=/dev/urandom of={test_file_path} bs=1M count={num_of_mb} oflag=sync')
disk_stats_after_write = call('netdata.get_disk_stats')[disk_identifier]
expected_write_in_kb = data_size / 1024
actual_writes = disk_stats_after_write['writes'] - disk_stats_before_write['writes']
assert actual_writes == pytest.approx(expected_write_in_kb, rel=0.1)
| 1,376 | Python | .py | 26 | 48.076923 | 101 | 0.680867 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,257 | test_rest_api_download.py | truenas_middleware/tests/api2/test_rest_api_download.py | import errno
import time
import pytest
import requests
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.account import unprivileged_user
from middlewared.test.integration.utils import call, client, session, url
@pytest.mark.parametrize("method", ["test_download_pipe", "test_download_unchecked_pipe"])
def test_download(method):
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/{method}",
headers={"Content-type": "application/json"},
data="{\"key\": \"value\"}",
)
r.raise_for_status()
assert r.headers["Content-Type"] == "application/octet-stream"
assert r.text == '{"key": "value"}'
def test_no_download_from_checked_pipe():
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/test_download_pipe?download=0",
headers={"Content-type": "application/json"},
data="{\"key\": \"value\"}",
)
assert r.status_code == 400
assert r.json()["message"] == "JSON response is not supported for this method."
def test_no_download_from_unchecked_pipe():
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/test_download_unchecked_pipe?download=0",
headers={"Content-type": "application/json"},
data="{\"key\": \"value\"}",
)
r.raise_for_status()
assert r.headers["Content-Type"].startswith("application/json")
assert r.json() == {"wrapped": {"key": "value"}}
def test_download_from_download_endpoint():
with client() as c:
job_id, path = c.call("core.download", "resttest.test_download_pipe", [{"key": "value"}], "file.bin")
r = requests.get(f"{url()}{path}")
r.raise_for_status()
assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\""
assert r.headers["Content-Type"] == "application/octet-stream"
assert r.text == '{"key": "value"}'
@pytest.mark.parametrize("buffered,sleep,result", [
(True, 0, ""),
(True, 4, '{"key": "value"}'),
(False, 0, '{"key": "value"}'),
])
def test_buffered_download_from_slow_download_endpoint(buffered, sleep, result):
with client() as c:
job_id, path = c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin",
buffered)
time.sleep(sleep)
r = requests.get(f"{url()}{path}")
r.raise_for_status()
assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\""
assert r.headers["Content-Type"] == "application/octet-stream"
assert r.text == result
def test_download_duplicate_job():
call("core.download", "resttest.test_download_slow_pipe_with_lock", [{"key": "value"}], "file.bin")
with pytest.raises(CallError) as ve:
call("core.download", "resttest.test_download_slow_pipe_with_lock", [{"key": "value"}], "file.bin")
assert ve.value.errno == errno.EBUSY
def test_download_authorization_ok():
with unprivileged_user(
username="unprivileged",
group_name="unprivileged_users",
privilege_name="Unprivileged users",
allowlist=[{"method": "CALL", "resource": "resttest.test_download_slow_pipe"}],
web_shell=False,
) as user:
with client(auth=(user.username, user.password)) as c:
c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin")
def test_download_authorization_fails():
with unprivileged_user(
username="unprivileged",
group_name="unprivileged_users",
privilege_name="Unprivileged users",
allowlist=[],
web_shell=False,
) as user:
with client(auth=(user.username, user.password)) as c:
with pytest.raises(CallError) as ve:
c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin")
assert ve.value.errno == errno.EACCES
| 4,009 | Python | .py | 87 | 38.724138 | 114 | 0.626701 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,258 | test_alert_classes.py | truenas_middleware/tests/api2/test_alert_classes.py | from unittest.mock import ANY
import pytest
from pytest_dependency import depends
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.utils import call
def test__normal_alert_class():
value = {
"classes": {
"UPSBatteryLow": {
"level": "CRITICAL",
"policy": "IMMEDIATELY",
},
},
}
call("alertclasses.update", value)
assert call("alertclasses.config") == {"id": ANY, **value}
def test__nonexisting_alert_class():
with pytest.raises(ValidationErrors) as ve:
call("alertclasses.update", {
"classes": {
"Invalid": {
"level": "WARNING",
},
},
})
assert ve.value.errors[0].attribute == "alert_class_update.classes.Invalid"
def test__disable_proactive_support_for_valid_alert_class(request):
call("alertclasses.update", {
"classes": {
"ZpoolCapacityNotice": {
"proactive_support": False,
},
},
})
def test__disable_proactive_support_for_invalid_alert_class(request):
with pytest.raises(ValidationErrors) as ve:
call("alertclasses.update", {
"classes": {
"UPSBatteryLow": {
"proactive_support": False,
},
},
})
assert ve.value.errors[0].attribute == "alert_class_update.classes.UPSBatteryLow.proactive_support"
| 1,510 | Python | .py | 44 | 24.977273 | 103 | 0.580979 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,259 | test_035_ad_idmap.py | truenas_middleware/tests/api2/test_035_ad_idmap.py | #!/usr/bin/env python3
# Author: Eric Turgeon
# License: BSD
# Location for tests into REST API of FreeNAS
import pytest
import sys
import os
import json
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import hostname
from base64 import b64decode
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.directory_service import active_directory
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.system import reset_systemd_svcs
from time import sleep
try:
from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, AD_COMPUTER_OU
from config import (
LDAPBASEDN,
LDAPBINDDN,
LDAPBINDPASSWORD,
LDAPHOSTNAME
)
except ImportError:
Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
pytestmark = pytest.mark.skip(reason=Reason)
BACKENDS = [
"AD",
"AUTORID",
"LDAP",
"NSS",
"RFC2307",
"TDB",
"RID",
]
@pytest.fixture(scope="function")
def idmap_domain():
low, high = call('idmap.get_next_idmap_range')
payload = {
"name": "canary",
"range_low": low,
"range_high": high,
"idmap_backend": "RID",
"options": {},
}
new_idmap = call('idmap.create', payload)
try:
yield new_idmap
finally:
call('idmap.delete', new_idmap['id'])
@pytest.fixture(scope="module")
def do_ad_connection(request):
call('service.update', 'cifs', {'enable': True})
try:
with active_directory(
AD_DOMAIN,
ADUSERNAME,
ADPASSWORD,
netbiosname=hostname,
createcomputer=AD_COMPUTER_OU,
) as ad:
yield ad
finally:
call('service.update', 'cifs', {'enable': False})
def assert_ad_healthy():
assert call('directoryservices.status')['type'] == 'ACTIVEDIRECTORY'
call('directoryservices.health.check')
@pytest.fixture(scope="module")
def backend_data():
backend_options = call('idmap.backend_options')
workgroup = call('smb.config')['workgroup']
yield {'options': backend_options, 'workgroup': workgroup}
def test_name_sid_resolution(do_ad_connection):
# get list of AD group gids for user from NSS
ad_acct = call('user.get_user_obj', {'username': f'{ADUSERNAME}@{AD_DOMAIN}', 'get_groups': True})
groups = set(ad_acct['grouplist'])
# convert list of gids into sids
sids = call('idmap.convert_unixids', [{'id_type': 'GROUP', 'id': x} for x in groups])
sidlist = set([x['sid'] for x in sids['mapped'].values()])
assert len(groups) == len(sidlist)
# convert sids back into unixids
unixids = call('idmap.convert_sids', list(sidlist))
assert set([x['id'] for x in unixids['mapped'].values()]) == groups
@pytest.mark.parametrize('backend', BACKENDS)
def test_backend_options(do_ad_connection, backend_data, backend):
"""
Tests for backend options are performend against
the backend for the domain we're joined to
(DS_TYPE_ACTIVEDIRECTORY) so that auto-detection
works correctly. The three default idmap backends
DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP,
DS_TYPE_DEFAULT_DOMAIN have hard-coded ids and
so we don't need to look them up.
"""
reset_systemd_svcs('winbind smbd')
opts = backend_data['options'][backend]['parameters'].copy()
WORKGROUP = backend_data['workgroup']
set_secret = False
payload = {
"name": "DS_TYPE_ACTIVEDIRECTORY",
"range_low": "1000000001",
"range_high": "2000000000",
"idmap_backend": backend,
"options": {}
}
payload3 = {"options": {}}
for k, v in opts.items():
"""
Populate garbage data where an opt is required.
This should get us past the first step of
switching to the backend before doing more
comprehensive tests.
"""
if v['required']:
payload["options"].update({k: "canary"})
if backend == 'RFC2307':
payload['options'].update({"ldap_server": "STANDALONE"})
if not payload['options']:
payload.pop('options')
call('idmap.update', 1, payload)
# We unfortunately need to sleep here on each iteration to allow time for
# winbind to settle down before applying more idmap changes otherwise
# subsequent idmap.update call will time out.
sleep(5)
if backend == "AUTORID":
IDMAP_CFG = "idmap config * "
else:
IDMAP_CFG = f"idmap config {WORKGROUP} "
"""
Validate that backend was correctly set in smb.conf.
"""
running_backend = call('smb.getparm', f'{IDMAP_CFG}: backend', 'GLOBAL')
assert running_backend == backend.lower()
if backend == "RID":
"""
sssd_compat generates a lower range based
on murmur3 hash of domain SID. Since we're validating
basic functionilty, checking that our range_low
changed is sufficient for now.
"""
payload2 = {"options": {"sssd_compat": True}}
out = call('idmap.update', 1, payload2)
assert out['range_low'] != payload['range_low']
elif backend == "AUTORID":
"""
autorid is unique among the idmap backends because
its configuration replaces the default idmap backend
"idmap config *".
"""
payload3["options"] = {
"rangesize": 200000,
"readonly": True,
"ignore_builtin": True,
}
call('idmap.update', 1, payload3)
elif backend == "AD":
payload3["options"] = {
"schema_mode": "SFU",
"unix_primary_group": True,
"unix_nss_info": True,
}
call('idmap.update', 1, payload3)
elif backend == "LDAP":
payload3["options"] = {
"ldap_base_dn": LDAPBASEDN,
"ldap_user_dn": LDAPBINDDN,
"ldap_url": LDAPHOSTNAME,
"ldap_user_dn_password": LDAPBINDPASSWORD,
"ssl": "ON",
"readonly": True,
}
call('idmap.update', 1, payload3)
secret = payload3["options"].pop("ldap_user_dn_password")
set_secret = True
elif backend == "RFC2307":
payload3["options"] = {
"ldap_server": "STANDALONE",
"bind_path_user": LDAPBASEDN,
"bind_path_group": LDAPBASEDN,
"user_cn": True,
"ldap_domain": "",
"ldap_url": LDAPHOSTNAME,
"ldap_user_dn": LDAPBINDDN,
"ldap_user_dn_password": LDAPBINDPASSWORD,
"ssl": "ON",
"ldap_realm": True,
}
call('idmap.update', 1, payload3)
r = payload3["options"].pop("ldap_realm")
payload3["options"]["realm"] = r
secret = payload3["options"].pop("ldap_user_dn_password")
set_secret = True
for k, v in payload3['options'].items():
"""
At this point we should have added every supported option
for the current backend. Iterate through each option and verify
that it was written to samba's running configuration.
"""
if k in ['realm', 'ssl']:
continue
res = call('smb.getparm', f'{IDMAP_CFG}: {k}', 'GLOBAL')
assert res is not None, f'Failed to retrieve `{IDMAP_CFG}: {k}` from running configuration'
if k == 'ldap_url':
v = f'ldaps://{v}'
elif k == 'ldap_domain':
v = None
if v == 'STANDALONE':
v = 'stand-alone'
try:
res = json.loads(res)
assert res == v, f"{backend} - [{k}]: {res}"
except json.decoder.JSONDecodeError:
if isinstance(v, bool):
v = str(v)
if v is None:
assert res in (None, ''), f"{backend} - [{k}]: {res}"
else:
assert v.casefold() == res.casefold(), f"{backend} - [{k}]: {res}"
if set_secret:
"""
API calls that set an idmap secret should result in the
secret being written to secrets.tdb in Samba's private
directory. To check this, force a secrets db dump, check
for keys, then decode secret.
"""
idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN)
db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$']
# Check that our secret is written and stored in secrets backup correctly
assert idmap_secret == db_secrets[f"SECRETS/GENERIC/IDMAP_LDAP_{WORKGROUP}/{LDAPBINDDN}"]
decoded_sec = b64decode(idmap_secret).rstrip(b'\x00').decode()
assert secret == decoded_sec, idmap_secret
# Use net command via samba to rewrite secret and make sure it is same
ssh(f"net idmap set secret {WORKGROUP} '{secret}'")
new_idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN)
assert idmap_secret == new_idmap_secret
secrets_dump = call('directoryservices.secrets.dump')
assert secrets_dump == db_secrets
# reset idmap backend to RID to ensure that winbindd is running
reset_systemd_svcs('winbind smbd')
payload = {
"name": "DS_TYPE_ACTIVEDIRECTORY",
"range_low": "1000000001",
"range_high": "2000000000",
"idmap_backend": 'RID',
"options": {}
}
call('idmap.update', 1, payload)
def test_clear_idmap_cache(do_ad_connection):
call('idmap.clear_idmap_cache', job=True)
def test_idmap_overlap_fail(do_ad_connection):
"""
It should not be possible to set an idmap range for a new
domain that overlaps an existing one.
"""
assert_ad_healthy()
payload = {
"name": "canary",
"range_low": "20000",
"range_high": "2000000000",
"idmap_backend": "RID",
"options": {}
}
with pytest.raises(ValidationErrors):
call('idmap.create', payload)
def test_idmap_default_domain_name_change_fail():
"""
It should not be possible to change the name of a
default idmap domain.
"""
assert_ad_healthy()
payload = {
"name": "canary",
"range_low": "1000000000",
"range_high": "2000000000",
"idmap_backend": "RID",
"options": {}
}
with pytest.raises(ValidationErrors):
call('idmap.create', payload)
def test_idmap_low_high_range_inversion_fail(request):
"""
It should not be possible to set an idmap low range
that is greater than its high range.
"""
assert_ad_healthy()
payload = {
"name": "canary",
"range_low": "2000000000",
"range_high": "1900000000",
"idmap_backend": "RID",
}
with pytest.raises(ValidationErrors):
call('idmap.create', payload)
def test_idmap_new_domain_duplicate_fail(idmap_domain):
"""
It should not be possible to create a new domain that
has a name conflict with an existing one.
"""
low, high = call('idmap.get_next_idmap_range')
payload = {
"name": idmap_domain["name"],
"range_low": low,
"range_high": high,
"idmap_backend": "RID",
}
with pytest.raises(ValidationErrors):
call('idmap.create', payload)
def test_idmap_new_domain_autorid_fail(idmap_domain):
"""
It should only be possible to set AUTORID on
default domain.
"""
payload = {
"idmap_backend": "AUTORID",
}
with pytest.raises(ValidationErrors):
call('idmap.update', idmap_domain['id'], payload)
| 11,613 | Python | .py | 319 | 29.097179 | 105 | 0.616836 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,260 | test_pool_dataset_unlock.py | truenas_middleware/tests/api2/test_pool_dataset_unlock.py | import contextlib
import pytest
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, ssh
from protocols import SMB
from samba import ntstatus, NTSTATUSError
SMB_PASSWORD = 'Abcd1234'
SMB_USER = 'smbuser999'
def passphrase_encryption():
return {
'encryption_options': {
'generate_key': False,
'pbkdf2iters': 100000,
'algorithm': 'AES-128-CCM',
'passphrase': 'passphrase',
},
'encryption': True,
'inherit_encryption': False,
}
def lock_dataset(name):
payload = {
'force_umount': True
}
assert call('pool.dataset.lock', name, payload, job=True)
def unlock_dataset(name, options=None):
payload = {
'recursive': True,
'datasets': [
{
'name': name,
'passphrase': 'passphrase'
}
],
**(options or {}),
}
result = call('pool.dataset.unlock', name, payload, job=True)
assert result['unlocked'] == [name], str(result)
@contextlib.contextmanager
def smb_connection(**kwargs):
c = SMB()
c.connect(**kwargs)
try:
yield c
finally:
c.disconnect()
@pytest.fixture(scope='module')
def smb_user():
with user({
'username': SMB_USER,
'full_name': 'doug',
'group_create': True,
'password': SMB_PASSWORD,
'smb': True
}, get_instance=True) as u:
yield u
@pytest.mark.parametrize('toggle_attachments', [True, False])
def test_pool_dataset_unlock_smb(smb_user, toggle_attachments):
with (
# Prepare test SMB share
dataset('normal', mode='777') as normal,
smb_share(f'/mnt/{normal}', 'normal', {'guestok': True}),
# Create an encrypted SMB share, unlocking which might lead to SMB service interruption
dataset('encrypted', passphrase_encryption(), mode='777') as encrypted,
smb_share(f'/mnt/{encrypted}', 'encrypted', {'guestok': True})
):
ssh(f'touch /mnt/{encrypted}/secret')
assert call('service.start', 'cifs')
lock_dataset(encrypted)
# Mount test SMB share
with smb_connection(
share='normal',
username=SMB_USER,
password=SMB_PASSWORD
) as normal_connection:
# Locked share should not be mountable
with pytest.raises(NTSTATUSError) as e:
with smb_connection(
share='encrypted',
username=SMB_USER,
password=SMB_PASSWORD
):
pass
assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME
conn = normal_connection.show_connection()
assert conn['connected'], conn
unlock_dataset(encrypted, {'toggle_attachments': toggle_attachments})
conn = normal_connection.show_connection()
assert conn['connected'], conn
if toggle_attachments:
# We should be able to mount encrypted share
with smb_connection(
share='encrypted',
username=SMB_USER,
password=SMB_PASSWORD
) as encrypted_connection:
assert [x['name'] for x in encrypted_connection.ls('')] == ['secret']
else:
# We should still not be able to mount encrypted share as we did not reload attachments
with pytest.raises(NTSTATUSError) as e:
with smb_connection(
share='encrypted',
username=SMB_USER,
password=SMB_PASSWORD
):
pass
assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME
assert call('service.stop', 'cifs')
| 3,974 | Python | .py | 109 | 26.853211 | 99 | 0.589383 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,261 | test_audit_smb.py | truenas_middleware/tests/api2/test_audit_smb.py | import os
import sys
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_method_calls
sys.path.append(os.getcwd())
REDACTED_SECRET = '********'
@pytest.fixture(scope='module')
def smb_audit_dataset(request):
with dataset('audit-test-smb') as ds:
try:
yield ds
finally:
pass
def test_smb_update_audit():
'''
Test the auditing of SMB configuration changes
'''
initial_smb_config = call('smb.config')
payload = {'enable_smb1': True}
try:
with expect_audit_method_calls([{
'method': 'smb.update',
'params': [payload],
'description': 'Update SMB configuration',
}]):
call('smb.update', payload)
finally:
call('smb.update', {'enable_smb1': False})
def test_smb_share_audit(smb_audit_dataset):
'''
Test the auditing of SMB share operations
'''
smb_share_path = os.path.join('/mnt', smb_audit_dataset)
try:
# CREATE
payload = {
"comment": "My Test Share",
"path": smb_share_path,
"name": "audit_share"
}
with expect_audit_method_calls([{
'method': 'sharing.smb.create',
'params': [payload],
'description': f'SMB share create audit_share',
}]):
share_config = call('sharing.smb.create', payload)
# UPDATE
payload = {
"ro": True
}
with expect_audit_method_calls([{
'method': 'sharing.smb.update',
'params': [
share_config['id'],
payload,
],
'description': f'SMB share update audit_share',
}]):
share_config = call('sharing.smb.update', share_config['id'], payload)
finally:
if share_config is not None:
# DELETE
share_id = share_config['id']
with expect_audit_method_calls([{
'method': 'sharing.smb.delete',
'params': [share_id],
'description': f'SMB share delete audit_share',
}]):
call('sharing.smb.delete', share_id)
| 2,371 | Python | .py | 72 | 24.041667 | 82 | 0.566871 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,262 | test_426_smb_vss.py | truenas_middleware/tests/api2/test_426_smb_vss.py | #!/usr/bin/env python3
import pytest
import sys
import os
from subprocess import run
from time import sleep
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import PUT, POST, GET, DELETE, SSH_TEST, wait_on_job
from auto_config import (
pool_name,
user,
password,
)
from pytest_dependency import depends
from protocols import SMB
from samba import ntstatus
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
dataset = f"{pool_name}/smb-vss"
dataset_url = dataset.replace('/', '%2F')
dataset_nested = f"{dataset}/sub1"
dataset_nested_url = dataset_nested.replace('/', '%2F')
SMB_NAME = "SMBVSS"
smb_path = "/mnt/" + dataset
SMB_USER = "smbshadowuser"
SMB_PWD = "smb1234"
to_check = [
'testfile1',
f'{SMB_USER}/testfile2',
'sub1/testfile3'
]
snapshots = {
'snapshot1': {'gmt_string': '', 'offset': 18},
'snapshot2': {'gmt_string': '', 'offset': 36},
'snapshot3': {'gmt_string': '', 'offset': 54},
}
def check_previous_version_exists(path, home=False):
ip = truenas_server.ip
cmd = [
'smbclient',
f'//{ip}/{SMB_NAME if not home else SMB_USER}',
'-U', f'{SMB_USER}%{SMB_PWD}',
'-c' f'open {path}'
]
cli_open = run(cmd, capture_output=True)
if cli_open.returncode != 0:
return (
ntstatus.NT_STATUS_FAIL_CHECK,
'NT_STATUS_FAIL_CHECK',
cli_open.stderr.decode()
)
cli_output = cli_open.stdout.decode().strip()
if 'NT_STATUS_' not in cli_output:
return (0, 'NT_STATUS_OK', cli_output)
cli_output = cli_output.rsplit(' ', 1)
return (
ntstatus.__getattribute__(cli_output[1]),
cli_output[1],
cli_output[0]
)
"""
def check_previous_version_contents(path, contents, offset):
cmd = [
'smbclient',
f'//{ip}/{SMB_NAME}',
'-U', f'{SMB_USER}%{SMB_PWD}',
'-c' f'prompt OFF; mget {path}'
]
cli_get = run(cmd, capture_output=True)
if cli_get.returncode != 0:
return (
ntstatus.NT_STATUS_FAIL_CHECK,
'NT_STATUS_FAIL_CHECK',
cli_open.stderr.decode()
)
cli_output = cli_get.stdout.decode().strip()
if 'NT_STATUS_' in cli_output:
cli_output = cli_output.rsplit(' ', 1)
return (
ntstatus.__getattribute__(cli_output[1]),
cli_output[0]
)
with open(path[25:], "rb") as f:
bytes = f.read()
to_check = bytes[offset:]
assert len(to_check) == 9, f'path: {path}, contents: {to_check.decode()}'
os.unlink(path[25:])
assert to_check.decode() == contents, path
return (0, )
"""
@pytest.mark.parametrize('ds', [dataset, dataset_nested])
@pytest.mark.dependency(name="VSS_DATASET_CREATED")
def test_001_creating_smb_dataset(request, ds):
payload = {
"name": ds,
"share_type": "SMB"
}
results = POST("/pool/dataset/", payload)
assert results.status_code == 200, results.text
result = POST("/zfs/snapshot/", {
"dataset": ds,
"name": "init",
})
assert result.status_code == 200, results.text
@pytest.mark.dependency(name="VSS_USER_CREATED")
def test_002_creating_shareuser_to_test_acls(request):
depends(request, ['VSS_DATASET_CREATED'])
global vssuser_id
global next_uid
results = GET('/user/get_next_uid/')
assert results.status_code == 200, results.text
next_uid = results.json()
payload = {
"username": SMB_USER,
"full_name": "SMB User",
"group_create": True,
"password": SMB_PWD,
"uid": next_uid,
}
results = POST("/user/", payload)
assert results.status_code == 200, results.text
vssuser_id = results.json()
def test_003_changing_dataset_owner(request):
depends(request, ["VSS_USER_CREATED"])
payload = {
'path': smb_path,
'uid': next_uid,
'options': {'recursive': True, 'traverse': True},
}
results = POST('/filesystem/chown/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 180)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
@pytest.mark.dependency(name="VSS_SHARE_CREATED")
def test_004_creating_a_smb_share_path(request):
depends(request, ["VSS_DATASET_CREATED"], scope="session")
global payload, results, smb_id
payload = {
"comment": "SMB VSS Testing Share",
"path": smb_path,
"name": SMB_NAME,
"purpose": "NO_PRESET",
}
results = POST("/sharing/smb/", payload)
assert results.status_code == 200, results.text
smb_id = results.json()['id']
cmd = f'mkdir {smb_path}/{SMB_USER}; zpool sync; net cache flush'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, {"cmd": cmd, "res": results['output']}
@pytest.mark.dependency(name="VSS_SMB_SERVICE_STARTED")
def test_005_starting_cifs_service(request):
depends(request, ["VSS_SHARE_CREATED"])
payload = {"service": "cifs"}
results = POST("/service/start/", payload)
assert results.status_code == 200, results.text
@pytest.mark.dependency(name="VSS_SMB1_ENABLED")
def test_006_enable_smb1(request):
depends(request, ["VSS_SHARE_CREATED"])
payload = {
"enable_smb1": True,
}
results = PUT("/smb/", payload)
assert results.status_code == 200, results.text
@pytest.mark.dependency(name="SHARE_HAS_SHADOW_COPIES")
@pytest.mark.parametrize('proto', ["SMB1", "SMB2"])
def test_007_check_shadow_copies(request, proto):
"""
This is very basic validation of presence of snapshot
over SMB1 and SMB2/3.
"""
depends(request, ["VSS_USER_CREATED"])
c = SMB()
snaps = c.get_shadow_copies(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=(proto == "SMB1")
)
assert len(snaps) == 1, snaps
@pytest.mark.dependency(name="VSS_TESTFILES_CREATED")
@pytest.mark.parametrize('payload', [
'snapshot1', 'snapshot2', 'snapshot3'
])
def test_008_set_up_testfiles(request, payload):
depends(request, ["SHARE_HAS_SHADOW_COPIES"])
i = int(payload[-1])
offset = i * 2 * len(payload)
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
for f in to_check:
fd = c.create_file(f, "w")
c.write(fd, payload.encode(), offset)
c.close(fd)
fd = c.create_file(f'{f}:smb2_stream', 'w')
c.write(fd, payload.encode(), offset)
c.close(fd)
sleep(5)
result = POST("/zfs/snapshot/", {
"dataset": dataset,
"name": payload,
"recursive": True,
})
assert result.status_code == 200, results.text
@pytest.mark.parametrize('proto', ["SMB1", "SMB2"])
def test_009_check_shadow_copies_count_after_setup(request, proto):
"""
This is very basic validation of presence of snapshot
over SMB1 and SMB2/3.
"""
depends(request, ["VSS_USER_CREATED"])
c = SMB()
snaps = c.get_shadow_copies(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=(proto == "SMB1")
)
assert len(snaps) == 4, snaps
snaps.sort()
for idx, gmt in enumerate(snaps[1:]):
snapshots[f'snapshot{idx + 1}']['gmt_string'] = gmt
@pytest.mark.dependency(name="VSS_TESTFILES_VALIDATED")
@pytest.mark.parametrize('zfs, gmt_data', snapshots.items())
def test_010_check_previous_versions_of_testfiles(request, zfs, gmt_data):
"""
This test verifies that previous versions of files can be opened successfully
in the following situations:
1) root of share
2) subdirectory in share
3) child dataset in share
in (1) - (3) above, ability to open alternate data streams is also verified.
"""
depends(request, ["VSS_TESTFILES_CREATED"])
vers = gmt_data['gmt_string']
for f in to_check:
the_file = f'{vers}/{f}'
err, errstr, msg = check_previous_version_exists(the_file)
assert err == 0, f'{the_file}: {errstr} - {msg}'
"""
# further development of libsmb / smbclient required for this test
# best bet is to add a kwarg to py-libsmb create to allow openinging
# previous version of file.
err, msg = check_previous_version_contents(the_file, zfs, gmt_data['offset'])
assert err == 0, f'{the_file}: {msg}'
"""
err, errstr, msg = check_previous_version_exists(f'{the_file}:smb2_stream')
assert err == 0, f'{the_file}:smb2_stream: {errstr} - {msg}'
def test_011_convert_to_home_share(request):
depends(request, ["VSS_TESTFILES_VALIDATED"])
payload = {
"home": True,
}
results = PUT(f"/sharing/smb/id/{smb_id}", payload)
assert results.status_code == 200, results.text
@pytest.mark.parametrize('zfs, gmt_data', snapshots.items())
def test_012_check_previous_versions_of_testfiles_home_share(request, zfs, gmt_data):
"""
This test verifies that previous versions of files can be opened successfully
in the following situations:
1) root of share
2) subdirectory in share
3) child dataset in share
in (1) - (3) above, ability to open alternate data streams is also verified.
Differs from previous test in that this one covers a "home" share, which is
of a directory inside a ZFS dataset, which means that internally samba cwd
has to change to path outside of share root.
"""
depends(request, ["VSS_TESTFILES_VALIDATED"])
the_file = f'{gmt_data["gmt_string"]}/testfile2'
err, errstr, msg = check_previous_version_exists(the_file, True)
assert err == 0, f'{the_file}: {errstr} - {msg}'
def test_050_delete_smb_user(request):
depends(request, ["VSS_USER_CREATED"])
results = DELETE(f"/user/id/{vssuser_id}/", {"delete_group": True})
assert results.status_code == 200, results.text
results = DELETE(f"/sharing/smb/id/{smb_id}")
assert results.status_code == 200, results.text
def test_051_disable_smb1(request):
depends(request, ["VSS_SMB1_ENABLED"])
payload = {
"enable_smb1": False,
"aapl_extensions": False,
}
results = PUT("/smb/", payload)
assert results.status_code == 200, results.text
def test_052_stopping_smb_service(request):
depends(request, ["VSS_SMB_SERVICE_STARTED"])
assert call("service.stop", "cifs")
sleep(1)
def test_053_checking_if_smb_is_stoped(request):
depends(request, ["VSS_SMB_SERVICE_STARTED"])
results = GET("/service?service=cifs")
assert results.json()[0]['state'] == "STOPPED", results.text
def test_054_destroying_smb_dataset(request):
depends(request, ["VSS_DATASET_CREATED"])
results = DELETE(f"/pool/dataset/id/{dataset_url}/", {'recursive': True})
assert results.status_code == 200, results.text
| 10,914 | Python | .py | 299 | 31.003344 | 85 | 0.643358 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,263 | test_smart_test_crud.py | truenas_middleware/tests/api2/test_smart_test_crud.py | import contextlib
import re
import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def smart_test(data):
test = call("smart.test.create", data)
try:
yield test
finally:
call("smart.test.delete", test["id"])
def smart_test_disks(all_disks=False, disk_index=0):
if all_disks:
return {"all_disks": True}
else:
return {"disks": [sorted(call("smart.test.disk_choices").keys())[disk_index]]}
@pytest.mark.parametrize("existing_all_disks", [False, True])
@pytest.mark.parametrize("new_all_disks", [False, True])
def test_smart_test_already_has_tests_for_this_type(existing_all_disks, new_all_disks):
if existing_all_disks:
error = "There already is an all-disks SHORT test"
else:
error = "The following disks already have SHORT test: sd[a-z]"
with smart_test({
"schedule": {
"hour": "0",
"dom": "*",
"month": "*",
"dow": "*",
},
**smart_test_disks(existing_all_disks),
"type": "SHORT",
}):
with pytest.raises(ValidationErrors) as ve:
with smart_test({
"schedule": {
"hour": "1",
"dom": "*",
"month": "*",
"dow": "*",
},
**smart_test_disks(new_all_disks),
"type": "SHORT",
}):
pass
assert re.fullmatch(error, ve.value.errors[0].errmsg)
@pytest.mark.parametrize("existing_all_disks", [False, True])
@pytest.mark.parametrize("new_all_disks", [False, True])
def test_smart_test_intersect(existing_all_disks, new_all_disks):
with smart_test({
"schedule": {
"hour": "3",
"dom": "1",
"month": "*",
"dow": "*",
},
**smart_test_disks(existing_all_disks),
"type": "LONG",
}):
with pytest.raises(ValidationErrors) as ve:
with smart_test({
"schedule": {
"hour": "3",
"dom": "*",
"month": "*",
"dow": "1",
},
**smart_test_disks(existing_all_disks),
"type": "SHORT",
}):
pass
assert ve.value.errors[0].errmsg == "A LONG test already runs at Day 1st of every month, Mon, 03:00"
def test_smart_test_update():
with smart_test({
"schedule": {
"hour": "0",
"dom": "*",
"month": "*",
"dow": "*",
},
**smart_test_disks(True),
"type": "SHORT",
}) as test:
call("smart.test.update", test["id"], {})
| 2,829 | Python | .py | 85 | 23.305882 | 108 | 0.507145 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,264 | test_system_dataset.py | truenas_middleware/tests/api2/test_system_dataset.py | import errno
import os
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, pool
PASSPHRASE = 'passphrase'
@pytest.fixture(scope="module")
def passphrase_encrypted_pool_session():
with another_pool({"encryption": True, "encryption_options": {"passphrase": PASSPHRASE}}) as p:
yield p["name"]
@pytest.fixture(scope="function")
def passphrase_encrypted_pool(passphrase_encrypted_pool_session):
config = call("systemdataset.config")
assert config["pool"] == pool
try:
call("pool.dataset.delete", passphrase_encrypted_pool_session, {"recursive": True})
except CallError as e:
if e.errno != errno.ENOENT:
raise
# If root dataset is locked, let's unlock it here
# It can be locked if some test locks it but does not unlock it later on and we should have
# a clean slate whenever we are trying to test using this pool/root dataset
if call("pool.dataset.get_instance", passphrase_encrypted_pool_session)["locked"]:
call("pool.dataset.unlock", passphrase_encrypted_pool_session, {
"datasets": [{"name": passphrase_encrypted_pool_session, "passphrase": PASSPHRASE}],
})
yield passphrase_encrypted_pool_session
@pytest.mark.parametrize("lock", [False, True])
def test_migrate_to_a_pool_with_passphrase_encrypted_root_dataset(passphrase_encrypted_pool, lock):
if lock:
call("pool.dataset.lock", passphrase_encrypted_pool, job=True)
assert passphrase_encrypted_pool in call("systemdataset.pool_choices")
call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True)
ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system")
assert ds["properties"]["encryption"]["value"] == "off"
call("systemdataset.update", {"pool": pool}, job=True)
def test_lock_passphrase_encrypted_pool_with_system_dataset(passphrase_encrypted_pool):
call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True)
call("pool.dataset.lock", passphrase_encrypted_pool, job=True)
ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system")
assert ds["properties"]["mounted"]["value"] == "yes"
call("systemdataset.update", {"pool": pool}, job=True)
def test_system_dataset_mountpoints():
system_config = call("systemdataset.config")
for system_dataset_spec in call(
"systemdataset.get_system_dataset_spec", system_config["pool"], system_config["uuid"]
):
mount_point = system_dataset_spec.get("mountpoint") or os.path.join(
system_config["path"], os.path.basename(system_dataset_spec["name"])
)
ds_stats = call("filesystem.stat", mount_point)
assert ds_stats["uid"] == system_dataset_spec["chown_config"]["uid"]
assert ds_stats["gid"] == system_dataset_spec["chown_config"]["gid"]
assert ds_stats["mode"] & 0o777 == system_dataset_spec["chown_config"]["mode"]
def test_netdata_post_mount_action():
# We rely on this to make sure system dataset post mount actions are working as intended
ds_stats = call("filesystem.stat", "/var/db/system/netdata/ix_state")
assert ds_stats["uid"] == 999, ds_stats
assert ds_stats["gid"] == 997, ds_stats
assert ds_stats["mode"] & 0o777 == 0o755, ds_stats
| 3,422 | Python | .py | 61 | 50.47541 | 99 | 0.708546 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,265 | test_device_get_disks_size.py | truenas_middleware/tests/api2/test_device_get_disks_size.py | from middlewared.test.integration.utils import call, ssh
def test_device_get_disks_size():
boot_disk = call('boot.get_disks')[0]
fdisk_size = int(ssh(f'fdisk -s /dev/{boot_disk}').strip()) * 1024
assert call('device.get_disks')[boot_disk]['size'] == fdisk_size
| 275 | Python | .py | 5 | 51.2 | 70 | 0.690299 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,266 | test_zfs_dataset_list.py | truenas_middleware/tests/api2/test_zfs_dataset_list.py | from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
def test__unlocked_zvols_fast__volmode():
with dataset("container") as container:
ssh(f"zfs set volmode=full {container}")
with dataset("container/zvol", {"type": "VOLUME", "volsize": 100 * 1024 * 1024}) as zvol:
ssh(f"sgdisk -n 1:1MiB:2MiB /dev/zvol/{zvol}")
call("zfs.dataset.unlocked_zvols_fast", [["name", "=", zvol]], {}, ["SIZE", "RO", "DEVID", "ATTACHMENT"])
| 532 | Python | .py | 8 | 59.5 | 117 | 0.657692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,267 | test_005_interface.py | truenas_middleware/tests/api2/test_005_interface.py | import time
import os
import errno
import pytest
from middlewared.service_exception import ValidationError, ValidationErrors
from auto_config import interface, ha, netmask
from middlewared.test.integration.utils.client import client, truenas_server
from middlewared.test.integration.utils import call
@pytest.fixture(scope="module")
def ws_client():
with client(host_ip=truenas_server.ip) as c:
yield c
@pytest.fixture(scope="module")
def get_payload(ws_client):
if ha:
payload = {
"ipv4_dhcp": False,
"ipv6_auto": False,
"failover_critical": True,
"failover_group": 1,
"aliases": [
{
"type": "INET",
"address": os.environ["controller1_ip"],
"netmask": int(netmask),
}
],
"failover_aliases": [
{
"type": "INET",
"address": os.environ["controller2_ip"],
}
],
"failover_virtual_aliases": [
{
"type": "INET",
"address": os.environ["virtual_ip"],
}
],
}
to_validate = [os.environ["controller1_ip"], os.environ["virtual_ip"]]
else:
# NOTE: on a non-HA system, this method is assuming
# that the machine has been handed an IPv4 address
# from a DHCP server. That's why we're getting this information.
ans = ws_client.call(
"interface.query", [["name", "=", interface]], {"get": True}
)
payload = {"ipv4_dhcp": False, "ipv6_auto": False, "aliases": []}
to_validate = []
ip = truenas_server.ip
for info in filter(lambda x: x["address"] == ip, ans["state"]["aliases"]):
payload["aliases"].append({"address": ip, "netmask": info["netmask"]})
to_validate.append(ip)
assert all((payload["aliases"], to_validate))
return payload, to_validate
# Make sure that our initial conditions are met
def test_001_check_ipvx(request):
# Verify that dhclient is running
running, _ = call("interface.dhclient_status", interface)
assert running is True
# Check that our proc entry is set to its default 1.
assert int(call("tunable.get_sysctl", f"net.ipv6.conf.{interface}.autoconf")) == 1
def test_002_configure_interface(request, ws_client, get_payload):
if ha:
# can not make network changes on an HA system unless failover has
# been explicitly disabled
ws_client.call("failover.update", {"disabled": True, "master": True})
assert ws_client.call("failover.config")["disabled"] is True
# send the request to configure the interface
ws_client.call("interface.update", interface, get_payload[0])
# 1. verify there are pending changes
# 2. commit the changes specifying the rollback timer
# 3. verify that the changes that were committed, need to be "checked" in (finalized)
# 4. finalize the changes (before the temporary changes are rolled back) (i.e. checkin)
# 5. verify that there are no more pending interface changes
assert ws_client.call("interface.has_pending_changes")
ws_client.call("interface.commit", {"rollback": True, "checkin_timeout": 10})
assert ws_client.call("interface.checkin_waiting")
ws_client.call("interface.checkin")
assert ws_client.call("interface.checkin_waiting") is None
assert ws_client.call("interface.has_pending_changes") is False
if ha:
# on HA, keepalived is responsible for configuring the VIP so let's give it
# some time to settle
time.sleep(3)
# We've configured the interface so let's make sure the ip addresses on the interface
# match reality
reality = set(
[i["address"] for i in ws_client.call("interface.ip_in_use", {"ipv4": True})]
)
assert reality == set(get_payload[1])
if ha:
# let's go 1-step further and validate that the VIP accepts connections
with client(host_ip=os.environ["virtual_ip"]) as c:
assert c.call("core.ping") == "pong"
assert c.call("failover.call_remote", "core.ping") == "pong"
# it's very important to set this because the `tests/conftest.py` config
# (that pytest uses globally for the entirety of CI runs) uses this IP
# address and so we need to make sure it uses the VIP on HA systems
truenas_server.ip = os.environ["virtual_ip"]
truenas_server.nodea_ip = os.environ["controller1_ip"]
truenas_server.nodeb_ip = os.environ["controller2_ip"]
truenas_server.server_type = os.environ["SERVER_TYPE"]
def test_003_recheck_ipvx(request):
assert int(call("tunable.get_sysctl", f"net.ipv6.conf.{interface}.autoconf")) == 0
@pytest.mark.skipif(not ha, reason="Test valid on HA systems only")
def test_004_remove_critical_failover_group(request):
with pytest.raises(ValidationErrors) as ve:
call(
"interface.update",
interface,
{"failover_group": None, "failover_critical": True},
)
assert ve.value.errors == [
ValidationError(
"interface_update.failover_group",
"A failover group is required when configuring a critical failover interface.",
errno.EINVAL,
)
]
| 5,426 | Python | .py | 121 | 36.181818 | 91 | 0.628171 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,268 | test_001_ssh.py | truenas_middleware/tests/api2/test_001_ssh.py | import collections
import json
import os
import pytest
from functions import if_key_listed, SSH_TEST
from auto_config import sshKey, user, password
from middlewared.test.integration.utils import fail, ssh
from middlewared.test.integration.utils.client import client, truenas_server
@pytest.fixture(scope='module')
def ws_client():
with client(host_ip=truenas_server.ip) as c:
yield c
def test_001_is_system_ready(ws_client):
# other parts of the CI/CD pipeline should have waited
# for middlewared to report as system.ready so this is
# a smoke test to see if that's true. If it's not, then
# the end-user can know that the entire integration run
# will be non-deterministic because middleware plugins
# internally expect that the system is ready before
# propertly responding to REST/WS requests.
if not ws_client.call('system.ready'):
fail(f'System is not ready. Currently: {ws_client.call("system.state")}. Aborting tests.')
def test_002_firstboot_checks(ws_client):
expected_ds = [
'boot-pool/.system',
'boot-pool/.system/cores',
'boot-pool/.system/nfs',
'boot-pool/.system/samba4',
'boot-pool/grub'
]
# first make sure our expected datasets actually exist
datasets = [i['name'] for i in ws_client.call('zfs.dataset.query', [], {'select': ['name']})]
assert all(ds in datasets for ds in expected_ds)
# now verify that they are mounted with the expected options
mounts = {i['mount_source']: i for i in ws_client.call('filesystem.mount_info', [['fs_type', '=', 'zfs']])}
assert all(mounts[ds]['super_opts'] == ['RW', 'XATTR', 'NOACL', 'CASESENSITIVE'] for ds in expected_ds)
# Verify we don't have any unexpected services running
# NOTE: smartd is started with "-q never" which means it should
# always start in all circumstances (even if there is an invalid (or empty) config)
ignore = ('smartd',)
for srv in filter(lambda x: x['service'] not in ignore, ws_client.call('service.query')):
assert srv['enable'] is False, f"{srv['service']} service is unexpectedly enabled"
assert srv['state'] == 'STOPPED', f"{srv['service']} service expected STOPPED, but found {srv['state']}"
# verify posix mode, uid and gid for standard users
stat_info = {
'/home/truenas_admin': {'mode': 0o40700, 'uid': 950, 'gid': 950},
'/root': {'mode': 0o40700, 'uid': 0, 'gid': 0},
}
for path, expected_stat in stat_info.items():
assert all(ws_client.call('filesystem.stat', path)[key] == expected_stat[key] for key in expected_stat)
def test_003_enable_ssh_for_root_user(ws_client):
# enable ssh password login for root user (used by all tests that come after this one)
filters, options = [['username', '=', 'root']], {'get': True}
root_user_db_id = ws_client.call('user.query', filters, options)['id']
ws_client.call('user.update', root_user_db_id, {'sshpubkey': sshKey, 'ssh_password_enabled': True})
assert ws_client.call('user.query', filters, options)['ssh_password_enabled'] is True
def test_004_enable_and_start_ssh(ws_client):
# enable ssh to start at boot
ws_client.call('service.update', 'ssh', {'enable': True})
filters, options = [['srv_service', '=', 'ssh']], {'get': True}
assert ws_client.call('datastore.query', 'services.services', filters, options)['srv_enable'] is True
# start ssh
ws_client.call('service.start', 'ssh')
assert ws_client.call('service.query', [['service', '=', 'ssh']], options)['state'] == 'RUNNING'
def test_005_ssh_using_root_password():
results = SSH_TEST('ls -la', user, password)
if not results['result']:
fail(f"SSH is not usable: {results['output']}. Aborting tests.")
def test_006_setup_and_login_using_root_ssh_key():
assert os.environ.get('SSH_AUTH_SOCK') is not None
assert if_key_listed() is True # horrible function name
results = SSH_TEST('ls -la', user, None)
assert results['result'] is True, results['output']
@pytest.mark.parametrize('account', [
{'type': 'GROUP', 'gid': 544, 'name': 'builtin_administrators'},
{'type': 'GROUP', 'gid': 545, 'name': 'builtin_users'},
{'type': 'GROUP', 'gid': 951, 'name': 'truenas_readonly_administrators'},
{'type': 'GROUP', 'gid': 952, 'name': 'truenas_sharing_administrators'},
])
def test_007_check_local_accounts(ws_client, account):
entry = ws_client.call('group.query', [['gid', '=', account['gid']]])
if not entry:
fail(f'{account["gid"]}: entry does not exist in db')
entry = entry[0]
if entry['group'] != account['name']:
fail(f'Group has unexpected name: {account["name"]} -> {entry["group"]}')
def test_008_check_root_dataset_settings(ws_client):
data = SSH_TEST('cat /conf/truenas_root_ds.json', user, password)
if not data['result']:
fail(f'Unable to get dataset schema: {data["output"]}')
try:
ds_schema = json.loads(data['stdout'])
except Exception as e:
fail(f'Unable to load dataset schema: {e}')
data = SSH_TEST('zfs get -o value -H truenas:developer /', user, password)
if not data['result']:
fail('Failed to determine whether developer mode enabled')
is_dev = data['stdout'] == 'on'
for entry in ds_schema:
fhs_entry = entry['fhs_entry']
mp = fhs_entry.get('mountpoint') or os.path.join('/', fhs_entry['name'])
if (force_mode := fhs_entry.get('mode')):
st = ws_client.call('filesystem.stat', mp)
assert st['mode'] & 0o777 == force_mode, f'{entry["ds"]}: unexpected permissions on dataset'
fs = ws_client.call('filesystem.mount_info', [['mountpoint', '=', mp]])
if not fs:
fail(f'{mp}: mountpoint not found')
fs = fs[0]
if fs['mount_source'] != entry['ds']:
fail(f'{fs["mount_source"]}: unexpected filesystem, expected {entry["ds"]}')
if is_dev:
# This is a run where root filesystem is unlocked. Don't obther checking remaining
continue
for opt in fhs_entry['options']:
if opt not in fs['mount_opts'] and opt not in fs['super_opts']:
assert opt in fs['mount_opts'], f'{opt}: mount option not present for {mp}: {fs["mount_opts"]}'
def test_009_check_listening_ports():
listen = collections.defaultdict(set)
for line in ssh("netstat -tuvpan | grep LISTEN").splitlines():
proto, _, _, local, _, _, process = line.split(maxsplit=6)
if proto == "tcp":
host, port = local.split(":", 1)
if host != "0.0.0.0":
continue
elif proto == "tcp6":
host, port = local.rsplit(":", 1)
if host != "::":
continue
else:
assert False, f"Unknown protocol {proto}"
port = int(port)
if port in [22, 80, 111, 443]:
continue
listen[int(port)].add(process.strip())
assert not listen, f"Invalid ports listening on 0.0.0.0: {dict(listen)}"
| 7,074 | Python | .py | 135 | 45.659259 | 112 | 0.638406 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,269 | test_347_posix_mode.py | truenas_middleware/tests/api2/test_347_posix_mode.py | #!/usr/bin/env python3
# License: BSD
import os
import pytest
import stat
from functions import SSH_TEST
from middlewared.test.integration.assets.account import user, group
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from time import sleep
MODE_DATASET_NAME = 'modetest'
MODE_SUBDATASET_NAME = 'modetest/sub1'
OWNER_BITS = {
"OWNER_READ": stat.S_IRUSR,
"OWNER_WRITE": stat.S_IWUSR,
"OWNER_EXECUTE": stat.S_IXUSR,
}
GROUP_BITS = {
"GROUP_READ": stat.S_IRGRP,
"GROUP_WRITE": stat.S_IWGRP,
"GROUP_EXECUTE": stat.S_IXGRP,
}
OTHER_BITS = {
"OTHER_READ": stat.S_IROTH,
"OTHER_WRITE": stat.S_IWOTH,
"OTHER_EXECUTE": stat.S_IXOTH,
}
MODE = {**OWNER_BITS, **GROUP_BITS, **OTHER_BITS}
MODE_USER = "modetesting"
MODE_GROUP = "modetestgrp"
MODE_PWD = "modetesting"
@pytest.fixture(scope='module')
def get_dataset():
with dataset(MODE_DATASET_NAME) as ds:
path = os.path.join('/mnt', ds)
ssh(f'mkdir -p {path}/dir1/dir2')
ssh(f'touch {path}/dir1/dir2/testfile')
with dataset(MODE_SUBDATASET_NAME):
yield ds
@pytest.fixture(scope='module')
def get_user():
with group({"name": MODE_GROUP}) as g:
with user({
'username': MODE_USER,
'full_name': MODE_USER,
'password': MODE_PWD,
'group_create': True,
'shell': '/usr/bin/bash',
'ssh_password_enabled': True,
'groups': [g['id']]
}) as u:
yield u | {'group_gid': g['gid']}
@pytest.fixture(scope='function')
def setup_file(get_dataset):
ds_path = os.path.join('/mnt', get_dataset)
try:
ssh(f'echo "echo CANARY" > {ds_path}/canary')
yield
finally:
ssh(f'rm {ds_path}/canary', check=False)
def get_mode_octal(path):
mode = call('filesystem.stat', path)['mode']
return f"{stat.S_IMODE(mode):03o}"
@pytest.mark.dependency(name="IS_TRIVIAL")
def test_verify_acl_is_trivial(get_dataset):
st = call('filesystem.stat', os.path.join('/mnt', get_dataset))
assert st['acl'] is False
@pytest.mark.parametrize('mode_bit', MODE.keys())
def test_verify_setting_mode_bits_nonrecursive(get_dataset, mode_bit):
"""
This test iterates through possible POSIX permissions bits and
verifies that they are properly set on the remote server.
"""
new_mode = f"{MODE[mode_bit]:03o}"
path = os.path.join('/mnt', get_dataset)
call('filesystem.setperm', {
'path': path,
'mode': new_mode,
'uid': 65534,
'gid': 65534
}, job=True)
server_mode = get_mode_octal(path)
assert new_mode == server_mode
@pytest.mark.parametrize('mode_bit', MODE.keys())
def test_verify_setting_mode_bits_recursive_no_traverse(get_dataset, mode_bit):
"""
Perform recursive permissions change and verify new mode written
to files and subdirectories.
"""
ds_path = os.path.join('/mnt', get_dataset)
sub_ds_path = os.path.join(ds_path, 'sub1')
new_mode = f"{MODE[mode_bit]:03o}"
call('filesystem.setperm', {
'path': ds_path,
'mode': new_mode,
'uid': 65534,
'gid': 65534,
'options': {'recursive': True}
}, job=True)
server_mode = get_mode_octal(ds_path)
assert new_mode == server_mode
server_mode = get_mode_octal(os.path.join(ds_path, 'dir1', 'dir2'))
assert new_mode == server_mode
server_mode = get_mode_octal(os.path.join(ds_path, 'dir1', 'dir2', 'testfile'))
assert new_mode == server_mode
# child dataset shouldn't be touched
server_mode = get_mode_octal(sub_ds_path)
assert server_mode == "755"
def test_verify_traverse_to_child_dataset(get_dataset):
ds_path = os.path.join('/mnt', get_dataset)
sub_ds_path = os.path.join(ds_path, 'sub1')
call('filesystem.setperm', {
'path': ds_path,
'mode': '777',
'uid': 65534,
'gid': 65534,
'options': {'recursive': True, 'traverse': True}
}, job=True)
server_mode = get_mode_octal(sub_ds_path)
assert server_mode == "777"
def dir_mode_check(mode_bit, MODE_DATASET):
if mode_bit.endswith("READ"):
cmd = f'ls /mnt/{MODE_DATASET}'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is True, results['output']
cmd = f'touch /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
cmd = f'cd /mnt/{MODE_DATASET}'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
elif mode_bit.endswith("WRITE"):
cmd = f'ls /mnt/{MODE_DATASET}'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
# Ensure that file is deleted before trying to create
ssh(f'rm /mnt/{MODE_DATASET}/canary', check=False)
cmd = f'touch /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is True, results['output']
cmd = f'rm /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is True, results['output']
elif mode_bit.endswith("EXECUTE"):
cmd = f'ls /mnt/{MODE_DATASET}'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
# Ensure that file is deleted before trying to create
ssh(f'rm /mnt/{MODE_DATASET}/canary', check=False)
cmd = f'touch /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
def file_mode_check(mode_bit, MODE_DATASET):
if mode_bit.endswith("READ"):
cmd = f'cat /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is True, results['output']
assert results['stdout'].strip() == "echo CANARY", results['output']
cmd = f'echo "FAIL" >> /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
cmd = f'/mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
elif mode_bit.endswith("WRITE"):
cmd = f'cat /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is True, results['output']
cmd = f'/mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
"""
Parent directory does not have write bit set. This
means rm should fail even though WRITE is set for user.
"""
cmd = f'rm /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
ssh(f'echo "echo CANARY" > /mnt/{MODE_DATASET}/canary')
elif mode_bit.endswith("EXECUTE"):
cmd = f'cat /mnt/{MODE_DATASET}'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
cmd = f'echo "FAIL" > /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
def file_mode_check_xor(mode_bit, MODE_DATASET):
"""
when this method is called, all permissions bits are set except for
the one being tested.
"""
if mode_bit.endswith("READ"):
cmd = f'cat /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
elif mode_bit.endswith("WRITE"):
cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
elif mode_bit.endswith("EXECUTE"):
cmd = f'/mnt/{MODE_DATASET}/canary'
results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
assert results['result'] is False, results['output']
@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
def test_directory_owner_bits_function_allow(mode_bit, get_dataset, get_user):
"""
Verify mode behavior correct when it's the only bit set.
In case of directory, Execute must be set concurrently with write
in order to verify correct write behavior.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
if new_mode == stat.S_IWUSR:
new_mode |= stat.S_IXUSR
call('filesystem.setperm', {
'path': ds_path,
'mode': f'{new_mode:03o}',
'uid': get_user['uid'],
'gid': 65534,
}, job=True)
dir_mode_check(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
def test_directory_group_bits_function_allow(mode_bit, get_dataset, get_user):
"""
Verify mode behavior correct when it's the only bit set.
In case of directory, Execute must be set concurrently with write
in order to verify correct write behavior.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
if new_mode == stat.S_IWGRP:
new_mode |= stat.S_IXGRP
call('filesystem.setperm', {
'path': ds_path,
'mode': f'{new_mode:03o}',
'uid': 0,
'gid': get_user['group_gid'],
}, job=True)
dir_mode_check(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
def test_directory_other_bits_function_allow(mode_bit, get_dataset, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
In case of directory, Execute must be set concurrently with write
in order to verify correct write behavior.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
if new_mode == stat.S_IWOTH:
new_mode |= stat.S_IXOTH
call('filesystem.setperm', {
'path': ds_path,
'mode': f'{new_mode:03o}',
'uid': 0,
'gid': 0,
}, job=True)
sleep(5)
dir_mode_check(mode_bit, get_dataset)
def test_setup_dataset_perm(get_dataset):
""" Allow execute permission on dataset mountpoint to facilitate file testing """
ds_path = os.path.join('/mnt', get_dataset)
call('filesystem.setperm', {
'path': ds_path,
'mode': '001',
'uid': 0,
'gid': 0,
}, job=True)
@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
def test_file_owner_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'uid': get_user['uid'],
'gid': 0,
}, job=True)
file_mode_check(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
def test_file_group_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'gid': get_user['group_gid'],
'uid': 0,
}, job=True)
file_mode_check(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
def test_file_other_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'gid': 0,
'uid': 0,
}, job=True)
file_mode_check(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
def test_file_owner_bits_xor(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
new_mode = new_mode ^ MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'gid': 0,
'uid': get_user['uid'],
}, job=True)
file_mode_check_xor(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
def test_file_group_bits_xor(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
new_mode = new_mode ^ MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'gid': get_user['group_gid'],
'uid': 0,
}, job=True)
file_mode_check_xor(mode_bit, get_dataset)
@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
def test_file_other_bits_xor(mode_bit, get_dataset, get_user, setup_file):
"""
Verify mode behavior correct when it's the only bit set.
"""
ds_path = os.path.join('/mnt', get_dataset)
new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
new_mode = new_mode ^ MODE[mode_bit]
call('filesystem.setperm', {
'path': os.path.join(ds_path, 'canary'),
'mode': f'{new_mode:03o}',
'gid': 0,
'uid': 0,
}, job=True)
file_mode_check_xor(mode_bit, get_dataset)
| 14,219 | Python | .py | 357 | 33.467787 | 85 | 0.628634 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,270 | test_filesystem__put.py | truenas_middleware/tests/api2/test_filesystem__put.py | import json
import os
import sys
import tempfile
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import wait_on_job, POST
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
def upload_file(file_path, file_path_on_tn):
data = {'method': 'filesystem.put', 'params': [file_path_on_tn]}
with open(file_path, 'rb') as f:
response = POST(
'/_upload/',
files={'data': json.dumps(data), 'file': f},
use_ip_only=True,
force_new_headers=True,
)
job_id = json.loads(response.text)['job_id']
return wait_on_job(job_id, 300)
def file_exists(file_path):
return any(
entry for entry in call('filesystem.listdir', os.path.dirname(file_path))
if entry['name'] == os.path.basename(file_path) and entry['type'] == 'FILE'
)
def test_put_file():
upload_file_impl(False)
def test_put_file_in_locked_dataset():
upload_file_impl(True)
def upload_file_impl(lock):
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write('filesystem.put test')
f.flush()
with dataset(
'test_filesystem_put', data={
'encryption': True,
'inherit_encryption': False,
'encryption_options': {'passphrase': '12345678'}
},
) as test_dataset:
if lock:
call('pool.dataset.lock', test_dataset, job=True)
file_path_on_tn = f'/mnt/{test_dataset}/testfile'
job_detail = upload_file(f.name,file_path_on_tn)
assert job_detail['results']['state'] == ('FAILED' if lock else 'SUCCESS')
assert file_exists(file_path_on_tn) is not lock
| 1,763 | Python | .py | 46 | 30.608696 | 86 | 0.620305 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,271 | test_300_nfs.py | truenas_middleware/tests/api2/test_300_nfs.py | import contextlib
import ipaddress
import os
import re
from copy import copy
from time import sleep
import pytest
from middlewared.service_exception import (
ValidationError, ValidationErrors, CallError, InstanceNotFound
)
from middlewared.test.integration.assets.account import group as create_group
from middlewared.test.integration.assets.account import user as create_user
from middlewared.test.integration.assets.filesystem import directory
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, mock, ssh
from middlewared.test.integration.utils.string import random_string
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.failover import wait_for_standby
from middlewared.test.integration.utils.system import reset_systemd_svcs as reset_svcs
from auto_config import hostname, password, pool_name, user, ha
from protocols import SSH_NFS, nfs_share
MOUNTPOINT = f"/tmp/nfs-{hostname}"
dataset = f"{pool_name}/nfs"
dataset_url = dataset.replace('/', '%2F')
NFS_PATH = "/mnt/" + dataset
# Alias
pp = pytest.param
# Supported configuration files
conf_file = {
"nfs": {
"pname": "/etc/nfs.conf.d/local.conf",
"sections": {
'nfsd': {},
'exportd': {},
'nfsdcld': {},
'nfsdcltrack': {},
'mountd': {},
'statd': {},
'lockd': {}}
},
"idmapd": {
"pname": "/etc/idmapd.conf",
"sections": {"General": {}, "Mapping": {}, "Translation": {}}
}
}
# =====================================================================
# Fixtures and utilities
# =====================================================================
class NFS_CONFIG:
'''This is used to restore the NFS config to it's original state'''
initial_nfs_config = {}
# These are the expected default config values
default_config = {
"allow_nonroot": False,
"protocols": ["NFSV3", "NFSV4"],
"v4_krb": False,
"v4_domain": "",
"bindip": [],
"mountd_port": None,
"rpcstatd_port": None,
"rpclockd_port": None,
"mountd_log": False, # nfs.py indicates this should be True, but db says False
"statd_lockd_log": False,
"v4_krb_enabled": False,
"userd_manage_gids": False,
"keytab_has_nfs_spn": False,
"managed_nfsd": True,
"rdma": False,
}
initial_service_state = {}
# These are the expected default run state values
default_service_state = {
"service": "nfs",
"enable": False,
"state": "STOPPED",
"pids": []
}
def parse_exports():
exp = ssh("cat /etc/exports").splitlines()
rv = []
for idx, line in enumerate(exp):
if not line or line.startswith('\t'):
continue
entry = {"path": line.strip()[1:-2], "opts": []}
i = idx + 1
while i < len(exp):
if not exp[i].startswith('\t'):
break
e = exp[i].strip()
host, params = e.split('(', 1)
entry['opts'].append({
"host": host,
"parameters": params[:-1].split(",")
})
i += 1
rv.append(entry)
return rv
def parse_server_config(conf_type="nfs"):
'''
Parse known 'ini' style conf files. See definition of conf_file above.
Debian will read to /etc/default/nfs-common and then /etc/nfs.conf
All TrueNAS NFS settings are in /etc/nfs.conf.d/local.conf as overrides
'''
assert conf_type in conf_file.keys(), f"{conf_type} is not a supported conf type"
pathname = conf_file[conf_type]['pname']
rv = conf_file[conf_type]['sections']
expected_sections = rv.keys()
# Read the file and parse it
res = ssh(f"cat {pathname}")
conf = res.splitlines()
section = ''
for line in conf:
if not line or line.startswith("#"):
continue
if line.startswith("["):
section = line.split('[')[1].split(']')[0]
assert section in expected_sections, f"Unexpected section found: {section}"
continue
k, v = line.split(" = ", 1)
rv[section].update({k: v})
return rv
def parse_rpcbind_config():
'''
In Debian 12 (Bookwork) rpcbind uses /etc/default/rpcbind.
Look for /etc/rpcbind.conf in future releases.
'''
conf = ssh("cat /etc/default/rpcbind").splitlines()
rv = {}
# With bindip the line of intrest looks like: OPTIONS=-w -h 192.168.40.156
for line in conf:
if not line or line.startswith("#"):
continue
if line.startswith("OPTIONS"):
opts = line.split('=')[1].split()
# '-w' is hard-wired, lets confirm that
assert len(opts) > 0
assert '-w' == opts[0]
rv['-w'] = ''
# If there are more opts they must the bindip settings
if len(opts) == 3:
rv[opts[1]] = opts[2]
return rv
def get_nfs_service_state():
nfs_service = call('service.query', [['service', '=', 'nfs']], {'get': True})
return nfs_service['state']
def set_nfs_service_state(do_what=None, expect_to_pass=True, fail_check=False):
"""
Start or Stop NFS service
expect_to_pass parameter is optional
fail_check parameter is optional
"""
assert do_what in ['start', 'stop'], f"Requested invalid service state: {do_what}"
test_res = {'start': True, 'stop': False}
if expect_to_pass:
res = call(f'service.{do_what}', 'nfs', {'silent': False})
sleep(1)
return res
else:
with pytest.raises(CallError) as e:
call(f'service.{do_what}', 'nfs', {'silent': False})
if fail_check:
assert fail_check in str(e.value)
# Confirm requested state
if expect_to_pass:
res = call('service.started', 'nfs')
assert res == test_res[do_what], f"Expected {test_res[do_what]} for NFS started result, but found {res}"
return res
def get_client_nfs_port():
'''
Output from netstat -nt looks like:
tcp 0 0 127.0.0.1:50664 127.0.0.1:6000 ESTABLISHED
The client port is the number after the ':' in the 5th column
'''
rv = (None, None)
res = ssh("netstat -nt")
for line in str(res).splitlines():
# The server will listen on port 2049
if f"{truenas_server.ip}:2049" == line.split()[3]:
rv = (line, line.split()[4].split(':')[1])
return rv
def set_immutable_state(path: str, want_immutable=True):
'''
Used by exportsd test
'''
call('filesystem.set_immutable', want_immutable, path)
res = call('filesystem.is_immutable', '/etc/exports.d')
assert res is want_immutable, f"Expected mutable filesystem: {res}"
def confirm_nfsd_processes(expected):
'''
Confirm the expected number of nfsd processes are running
'''
result = ssh("cat /proc/fs/nfsd/threads")
assert int(result) == expected, result
def confirm_mountd_processes(expected):
'''
Confirm the expected number of mountd processes are running
'''
rx_mountd = r"rpc\.mountd"
result = ssh(f"ps -ef | grep '{rx_mountd}' | wc -l")
# If there is more than one, we subtract one to account for the rpc.mountd thread manager
num_detected = int(result)
assert (num_detected - 1 if num_detected > 1 else num_detected) == expected
def confirm_rpc_processes(expected=['idmapd', 'bind', 'statd']):
'''
Confirm the expected rpc processes are running
NB: This only supports the listed names
'''
prepend = {'idmapd': 'rpc.', 'bind': 'rpc', 'statd': 'rpc.'}
for n in expected:
procname = prepend[n] + n
assert len(ssh(f"pgrep {procname}").splitlines()) > 0
def confirm_nfs_version(expected=[]):
'''
Confirm the expected NFS versions are 'enabled and supported'
Possible values for expected:
["3"] means NFSv3 only
["4"] means NFSv4 only
["3","4"] means both NFSv3 and NFSv4
'''
result = ssh("rpcinfo -s | grep ' nfs '").strip().split()[1]
for v in expected:
assert v in result, result
def confirm_rpc_port(rpc_name, port_num):
'''
Confirm the expected port for the requested rpc process
rpc_name = ('mountd', 'status', 'nlockmgr')
'''
line = ssh(f"rpcinfo -p | grep {rpc_name} | grep tcp")
# example: '100005 3 tcp 618 mountd'
assert int(line.split()[3]) == port_num, str(line)
def run_missing_usrgrp_mapping_test(data: list[str], usrgrp, tmp_path, share, usrgrpInst):
''' Used by test_invalid_user_group_mapping '''
parsed = parse_exports()
assert len(parsed) == 2, str(parsed)
this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}"
# Remove the user/group and restart nfs
call(f'{usrgrp}.delete', usrgrpInst['id'])
call('service.restart', 'nfs')
# An alert should be generated
alerts = call('alert.list')
this_alert = [entry for entry in alerts if entry['klass'] == "NFSexportMappingInvalidNames"]
assert len(this_alert) == 1, f"Did not find alert for 'NFSexportMappingInvalidNames'.\n{alerts}"
# The NFS export should have been removed
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
assert len(this_share) == 0, f"Unexpectedly found share {tmp_path}.\nexports = {parsed}"
# Modify share to map with a built-in user or group and restart NFS
call('sharing.nfs.update', share, {data[0]: "ftp"})
call('service.restart', 'nfs')
# The alert should be cleared
alerts = call('alert.list')
this_alert = [entry for entry in alerts if entry['key'] == "NFSexportMappingInvalidNames"]
assert len(this_alert) == 0, f"Unexpectedly found alert 'NFSexportMappingInvalidNames'.\n{alerts}"
# Share should have been restored
parsed = parse_exports()
assert len(parsed) == 2, str(parsed)
this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}"
@contextlib.contextmanager
def manage_start_nfs():
""" The exit state is managed by init_nfs """
try:
yield set_nfs_service_state('start')
finally:
set_nfs_service_state('stop')
def move_systemdataset(new_pool_name):
''' Move the system dataset to the requested pool '''
try:
call('systemdataset.update', {'pool': new_pool_name}, job=True)
except Exception as e:
raise e
else:
if ha:
wait_for_standby()
return call('systemdataset.config')
@contextlib.contextmanager
def system_dataset(new_pool_name):
'''
Temporarily move the system dataset to the new_pool_name
'''
orig_sysds = call('systemdataset.config')
try:
sysds = move_systemdataset(new_pool_name)
yield sysds
finally:
move_systemdataset(orig_sysds['pool'])
@contextlib.contextmanager
def nfs_dataset(name, options=None, acl=None, mode=None, pool=None):
"""
NOTE: This is _nearly_ the same as the 'dataset' test asset. The difference
is the retry loop.
TODO: Enhance the 'dataset' test asset to include a retry loop
"""
assert "/" not in name
_pool_name = pool if pool else pool_name
_dataset = f"{_pool_name}/{name}"
try:
call("pool.dataset.create", {"name": _dataset, **(options or {})})
if acl is None:
call("filesystem.setperm", {'path': f"/mnt/{_dataset}", "mode": mode or "777"}, job=True)
else:
call("filesystem.setacl", {'path': f"/mnt/{_dataset}", "dacl": acl}, job=True)
yield _dataset
finally:
# dataset may be busy
sleep(2)
for _ in range(6):
try:
call("pool.dataset.delete", _dataset)
# Success
break
except InstanceNotFound:
# Also success
break
except Exception:
# Cannot yet delete
sleep(10)
@contextlib.contextmanager
def nfs_config():
''' Use this to restore NFS settings '''
try:
nfs_db_conf = call("nfs.config")
excl = ['id', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd']
[nfs_db_conf.pop(key) for key in excl]
yield copy(nfs_db_conf)
finally:
call("nfs.update", nfs_db_conf)
@contextlib.contextmanager
def nfs_share_config(nfsid: int):
''' Use this to restore NFS share settings '''
try:
configs = call("sharing.nfs.query", [["id", "=", nfsid]])
assert configs != []
share_config = configs[0]
yield copy(share_config)
finally:
excl = ['id', 'path', 'locked']
[share_config.pop(key) for key in excl]
call("sharing.nfs.update", nfsid, share_config)
@pytest.fixture(scope="module")
def init_nfs():
""" Will restore to _default_ config and state at module exit """
try:
initial_config = call("nfs.config")
NFS_CONFIG.initial_nfs_config = copy(initial_config)
initial_service_state = call('service.query', [['service', '=', 'nfs']], {'get': True})
NFS_CONFIG.initial_service_state = copy(initial_service_state)
yield {"config": initial_config, "service_state": initial_service_state}
finally:
# Restore to -default- state (some might be redundant, but ensures clean state at exit)
call('service.update', 'nfs', {'enable': NFS_CONFIG.default_service_state['enable']})
state_cmd = {'RUNNING': 'start', 'STOPPED': 'stop'}
set_nfs_service_state(state_cmd[NFS_CONFIG.default_service_state['state']])
# Restore to -default- config
exclude = ['servers', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd']
default_config_payload = {k: v for k, v in NFS_CONFIG.default_config.items() if k not in exclude}
if NFS_CONFIG.default_config['managed_nfsd']:
default_config_payload['servers'] = None
call('nfs.update', default_config_payload)
@pytest.fixture(scope="module")
def nfs_dataset_and_share():
""" Will delete the 'nfs' share and dataset at the module exit """
with nfs_dataset('nfs') as ds:
with nfs_share(NFS_PATH, {
"comment": "My Test Share",
"security": ["SYS"]
}) as nfsid:
yield {"nfsid": nfsid, "ds": ds}
@pytest.fixture(scope="class")
def start_nfs():
""" The exit state is managed by init_nfs """
try:
yield set_nfs_service_state('start')
finally:
set_nfs_service_state('stop')
# =====================================================================
# Tests
# =====================================================================
def test_config(init_nfs):
initial_config = init_nfs['config']
initial_service_state = init_nfs['service_state']
# We should be starting with the default config
# Check the hard way so that we can identify the culprit
for k, v in NFS_CONFIG.default_config.items():
assert initial_config.get(k) == v, f'Expected {k}:"{v}", but found {k}:"{initial_config.get(k)}"'
# Confirm NFS is not running
assert initial_service_state['state'] == 'STOPPED', \
f"Before update, expected STOPPED, but found {initial_service_state['state']}"
def test_service_enable_at_boot(init_nfs):
initial_run_state = init_nfs['service_state']
assert initial_run_state['enable'] is False
svc_id = call('service.update', 'nfs', {"enable": True})
nfs_state = call('service.query', [["id", "=", svc_id]])
assert nfs_state[0]['service'] == "nfs"
assert nfs_state[0]['enable'] is True
def test_dataset_permissions(nfs_dataset_and_share):
ds = nfs_dataset_and_share["ds"]
call('filesystem.setperm', {
'path': os.path.join('/mnt', ds),
'mode': '777',
'uid': 0,
'gid': 0,
}, job=True)
class TestNFSops:
"""
Test NFS operations: server running
"""
def test_state_directory(self, start_nfs):
"""
By default, the NFS state directory is at /var/lib/nfs.
To support HA systems, we moved this to the system dataset
at /var/db/system/nfs. In support of this we updated the
NFS conf file settings
"""
assert start_nfs is True
# Make sure the conf file has the expected settings
sysds_path = call('systemdataset.sysdataset_path')
assert sysds_path == '/var/db/system'
nfs_state_dir = os.path.join(sysds_path, 'nfs')
s = parse_server_config()
assert s['exportd']['state-directory-path'] == nfs_state_dir, str(s)
assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s)
assert s['nfsdcltrack']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcltrack'), str(s)
assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s)
assert s['mountd']['state-directory-path'] == nfs_state_dir, str(s)
assert s['statd']['state-directory-path'] == nfs_state_dir, str(s)
# Confirm we have the mount point in the system dataset
sysds = call('systemdataset.config')
bootds = call('systemdataset.get_system_dataset_spec', sysds['pool'], sysds['uuid'])
bootds_nfs = list([d for d in bootds if 'nfs' in d.get('name')])[0]
assert bootds_nfs['name'] == sysds['pool'] + "/.system/nfs"
# Confirm the required entries are present
required_nfs_entries = set(["nfsdcld", "nfsdcltrack", "sm", "sm.bak", "state", "v4recovery"])
current_nfs_entries = set(list(ssh(f'ls {nfs_state_dir}').splitlines()))
assert required_nfs_entries.issubset(current_nfs_entries)
# Confirm proc entry reports expected value after nfs restart
call('service.restart', 'nfs')
sleep(1)
recovery_dir = ssh('cat /proc/fs/nfsd/nfsv4recoverydir').strip()
assert recovery_dir == os.path.join(nfs_state_dir, 'v4recovery'), \
f"Expected {nfs_state_dir + '/v4recovery'} but found {recovery_dir}"
# ----------------------------------------------------------------------
# NOTE: Test fresh-install and upgrade.
# ----------------------------------------------------------------------
@pytest.mark.parametrize('vers', [3, 4])
def test_basic_nfs_ops(self, start_nfs, nfs_dataset_and_share, vers):
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=vers, user=user,
password=password, ip=truenas_server.ip) as n:
n.create('testfile')
n.mkdir('testdir')
contents = n.ls('.')
assert 'testdir' in contents
assert 'testfile' in contents
n.unlink('testfile')
n.rmdir('testdir')
contents = n.ls('.')
assert 'testdir' not in contents
assert 'testfile' not in contents
def test_server_side_copy(self, start_nfs, nfs_dataset_and_share):
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user,
password=password, ip=truenas_server.ip) as n:
n.server_side_copy('ssc1', 'ssc2')
@pytest.mark.parametrize('nfsd,cores,expected', [
pp(50, 1, {'nfsd': 50, 'mountd': 12, 'managed': False}, id="User set 50: expect 12 mountd"),
pp(None, 12, {'nfsd': 12, 'mountd': 3, 'managed': True}, id="12 cores: expect 12 nfsd, 3 mountd"),
pp(None, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="4 cores: expect 4 nfsd, 1 mountd"),
pp(None, 2, {'nfsd': 2, 'mountd': 1, 'managed': True}, id="2 cores: expect 2 nfsd, 1 mountd"),
pp(None, 1, {'nfsd': 1, 'mountd': 1, 'managed': True}, id="1 core: expect 1 nfsd, 1 mountd"),
pp(0, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="User set 0: invalid"),
pp(257, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="User set 257: invalid"),
pp(None, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}, id="48 cores: expect 32 nfsd (max), 8 mountd"),
pp(-1, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}, id="Reset to 'managed_nfsd'"),
])
def test_service_update(self, start_nfs, nfsd, cores, expected):
"""
This test verifies that service can be updated in general,
and also that the 'servers' key can be altered.
Latter goal is achieved by reading the nfs config file
and verifying that the value here was set correctly.
Update:
The default setting for 'servers' is None. This specifies that we dynamically
determine the number of nfsd to start based on the capabilities of the system.
In this state, we choose one nfsd for each CPU core.
The user can override the dynamic calculation by specifying a
number greater than zero.
The number of mountd will be 1/4 the number of nfsd.
"""
assert start_nfs is True
with mock("system.cpu_info", return_value={"core_count": cores}):
# Use 0 as 'null' flag
if nfsd is None or nfsd in range(1, 257):
call("nfs.update", {"servers": nfsd})
s = parse_server_config()
assert int(s['nfsd']['threads']) == expected['nfsd'], str(s)
assert int(s['mountd']['threads']) == expected['mountd'], str(s)
confirm_nfsd_processes(expected['nfsd'])
confirm_mountd_processes(expected['mountd'])
confirm_rpc_processes()
# In all passing cases, the 'servers' field represents the number of expected nfsd
nfs_conf = call("nfs.config")
assert nfs_conf['servers'] == expected['nfsd']
assert nfs_conf['managed_nfsd'] == expected['managed']
else:
if nfsd == -1:
# We know apriori that the current state is managed_nfsd == True
with nfs_config():
# Test making change to non-'server' setting does not change managed_nfsd
assert call("nfs.config")['managed_nfsd'] == expected['managed']
else:
with pytest.raises(ValidationErrors) as ve:
assert call("nfs.config")['managed_nfsd'] == expected['managed']
call("nfs.update", {"servers": nfsd})
assert ve.value.errors == [ValidationError('nfs_update.servers', 'Should be between 1 and 256', 22)]
def test_share_update(self, start_nfs, nfs_dataset_and_share):
"""
Test changing the security and enabled fields
We want nfs running to allow confirmation of changes in exportfs
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid) as share_data:
assert share_data['security'] != []
nfs_share = call('sharing.nfs.update', nfsid, {"security": [], "comment": "no comment"})
# The default is 'SYS', so changing from ['SYS'] to [] does not change /etc/exports
assert nfs_share['security'] == [], f"Expected [], but found {nfs_share[0]['security']}"
assert nfs_share['comment'] == "no comment"
# Confirm changes are reflected in /etc/exports
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
export_opts = parsed[0]['opts'][0]['parameters']
assert "sec=sys" in export_opts
# Test share disable
assert share_data['enabled'] is True
nfs_share = call('sharing.nfs.update', nfsid, {"enabled": False})
assert parse_exports() == []
@pytest.mark.parametrize(
"networklist,ExpectedToPass,FailureMsg", [
# IPv4
pp(["192.168.0.0/24", "192.168.1.0/24"], True, "", id="IPv4 - non-overlap"),
pp(["192.168.0.0/16", "192.168.1.0/24"], False, "Overlapped", id="IPv4 - overlap wide"),
pp(["192.168.0.0/24", "192.168.0.211/32"], False, "Overlapped", id="IPv4 - overlap narrow"),
pp(["192.168.0.0/64"], False, "does not appear to be an IPv4 or IPv6 network", id="IPv4 - invalid range"),
pp(["bogus_network"], False, "does not appear to be an IPv4 or IPv6 network", id="IPv4 - invalid format"),
pp(["192.168.27.211"], True, "", id="IPv4 - auto-convert to CIDR"),
# IPv6
pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/96"],
True, "", id="IPv6 - non-overlap"),
pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/88"],
False, "Overlapped", id="IPv6 - overlap wide"),
pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128"],
False, "Overlapped", id="IPv6 - overlap narrow"),
pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334/256"],
False, "does not appear to be an IPv4 or IPv6 network", id="IPv6 - invalid range"),
pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334"],
True, "", id="IPv6 - auto-convert to CIDR"),
],
)
def test_share_networks(
self, start_nfs, nfs_dataset_and_share, networklist, ExpectedToPass, FailureMsg):
"""
Verify that adding a network generates an appropriate line in exports
file for same path. Sample:
"/mnt/dozer/nfs"\
192.168.0.0/24(sec=sys,rw,subtree_check)\
192.168.1.0/24(sec=sys,rw,subtree_check)
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid):
if ExpectedToPass:
call('sharing.nfs.update', nfsid, {'networks': networklist})
else:
with pytest.raises(ValidationErrors) as re:
call('sharing.nfs.update', nfsid, {'networks': networklist})
assert FailureMsg in str(re.value.errors[0])
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
exports_networks = [x['host'] for x in parsed[0]['opts']]
if ExpectedToPass:
# The input is converted to CIDR format which often will
# look different from the input. e.g. 1.2.3.4/16 -> 1.2.0.0/16
cidr_list = [str(ipaddress.ip_network(x, strict=False)) for x in networklist]
# The entry should be present
diff = set(cidr_list) ^ set(exports_networks)
assert len(diff) == 0, f'diff: {diff}, exports: {parsed}'
else:
# The entry should NOT be present
assert len(exports_networks) == 1, str(parsed)
@pytest.mark.parametrize(
"hostlist,ExpectedToPass,FailureMsg", [
pp(["192.168.0.69", "192.168.0.70", "@fakenetgroup"],
True, "", id="Valid - IPv4 address, netgroup"),
pp(["asdfnm-*", "?-asdfnm-*", "asdfnm[0-9]", "nmix?-*dev[0-9]"],
True, "", id="Valid - wildcard names,ranges"),
pp(["asdfdm-*.example.com", "?-asdfdm-*.ixsystems.com",
"asdfdm[0-9].example.com", "dmix?-*dev[0-9].ixsystems.com"],
True, "", id="Valid - wildcard domains,ranges"),
pp(["-asdffail", "*.asdffail.com", "*.*.com", "bozofail.?.*"],
False, "Unable to resolve", id="Invalid - names,domains (not resolvable)"),
pp(["bogus/name"], False, "Unable to resolve", id="Invalid - name (path)"),
pp(["192.168.1.0/24"], False, "Unable to resolve", id="Invalid - name (network format)"),
pp(["asdfdm[0-9].example.com", "-asdffail", "devteam-*.ixsystems.com", "*.asdffail.com"],
False, "Unable to resolve", id="Mix - valid and invalid names"),
pp(["192.168.1.0", "192.168.1.0"], False, "not unique", id="Invalid - duplicate address"),
pp(["ixsystems.com", "ixsystems.com"], False, "not unique", id="Invalid - duplicate address"),
pp(["ixsystems.com", "*"], True, "", id="Valid - mix name and everybody"),
pp(["*", "*.ixsystems.com"], True, "", id="Valid - mix everybody and wildcard name"),
pp(["192.168.1.o"], False, "Unable to resolve", id="Invalid - character in address"),
pp(["bad host"], False, "cannot contain spaces", id="Invalid - name with spaces"),
pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], True, "", id="Valid - IPv6 address")
],
)
def test_share_hosts(
self, start_nfs, nfs_dataset_and_share, hostlist, ExpectedToPass, FailureMsg):
"""
Verify that adding a network generates an appropriate line in exports
file for same path. Sample:
"/mnt/dozer/nfs"\
192.168.0.69(sec=sys,rw,subtree_check)\
192.168.0.70(sec=sys,rw,subtree_check)\
@fakenetgroup(sec=sys,rw,subtree_check)
host name handling in middleware:
If the host name contains no wildcard or special chars,
then we test it with a look up
else we apply the host name rules and skip the look up
The rules for the host field are:
- Dashes are allowed, but a level cannot start or end with a dash, '-'
- Only the left most level may contain special characters: '*','?' and '[]'
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid):
if ExpectedToPass:
call('sharing.nfs.update', nfsid, {'hosts': hostlist})
else:
with pytest.raises(ValidationErrors) as re:
call('sharing.nfs.update', nfsid, {'hosts': hostlist})
assert FailureMsg in str(re.value.errors[0])
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
# Check the exports file
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
exports_hosts = [x['host'] for x in parsed[0]['opts']]
if ExpectedToPass:
# The entry should be present
diff = set(hostlist) ^ set(exports_hosts)
assert len(diff) == 0, f'diff: {diff}, exports: {parsed}'
else:
# The entry should not be present
assert len(exports_hosts) == 1, str(parsed)
def test_share_ro(self, start_nfs, nfs_dataset_and_share):
"""
Verify that toggling `ro` will cause appropriate change in
exports file. We also verify with write tests on a local mount.
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid) as share_data:
# Confirm 'rw' initial state and create a file and dir
assert share_data['ro'] is False
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
assert "rw" in parsed[0]['opts'][0]['parameters'], str(parsed)
# Mount the share locally and create a file and dir
with SSH_NFS(truenas_server.ip, NFS_PATH,
user=user, password=password, ip=truenas_server.ip) as n:
n.create("testfile_should_pass")
n.mkdir("testdir_should_pass")
# Change to 'ro'
call('sharing.nfs.update', nfsid, {'ro': True})
# Confirm 'ro' state and behavior
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
assert "rw" not in parsed[0]['opts'][0]['parameters'], str(parsed)
# Attempt create and delete
with SSH_NFS(truenas_server.ip, NFS_PATH,
user=user, password=password, ip=truenas_server.ip) as n:
with pytest.raises(RuntimeError) as re:
n.create("testfile_should_fail")
assert False, "Should not have been able to create a new file"
assert 'cannot touch' in str(re), re
with pytest.raises(RuntimeError) as re:
n.mkdir("testdir_should_fail")
assert False, "Should not have been able to create a new directory"
assert 'cannot create directory' in str(re), re
def test_share_maproot(self, start_nfs, nfs_dataset_and_share):
"""
root squash is always enabled, and so maproot accomplished through
anonuid and anongid
Sample:
"/mnt/dozer/NFSV4"\
*(sec=sys,rw,anonuid=65534,anongid=65534,subtree_check)
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid) as share_data:
# Confirm we won't compete against mapall
assert share_data['mapall_user'] is None
assert share_data['mapall_group'] is None
# Map root to everybody
call('sharing.nfs.update', nfsid, {
'maproot_user': 'nobody',
'maproot_group': 'nogroup'
})
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert 'anonuid=65534' in params, str(parsed)
assert 'anongid=65534' in params, str(parsed)
# TODO: Run test as nobody, expect success
# Setting maproot_user and maproot_group to root should
# cause us to append "no_root_squash" to options.
call('sharing.nfs.update', nfsid, {
'maproot_user': 'root',
'maproot_group': 'root'
})
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert 'no_root_squash' in params, str(parsed)
assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
# TODO: Run test as nobody, expect failure
# Second share should have normal (no maproot) params.
second_share = f'/mnt/{pool_name}/second_share'
with nfs_dataset('second_share'):
with nfs_share(second_share):
parsed = parse_exports()
assert len(parsed) == 2, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert 'no_root_squash' in params, str(parsed)
params = parsed[1]['opts'][0]['parameters']
assert 'no_root_squash' not in params, str(parsed)
assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
# After share config restore, confirm expected settings
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
def test_share_mapall(self, start_nfs, nfs_dataset_and_share):
"""
mapall is accomplished through anonuid and anongid and
setting 'all_squash'.
Sample:
"/mnt/dozer/NFSV4"\
*(sec=sys,rw,all_squash,anonuid=65534,anongid=65534,subtree_check)
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
nfsid = nfs_dataset_and_share['nfsid']
with nfs_share_config(nfsid) as share_data:
# Confirm we won't compete against maproot
assert share_data['maproot_user'] is None
assert share_data['maproot_group'] is None
call('sharing.nfs.update', nfsid, {
'mapall_user': 'nobody',
'mapall_group': 'nogroup'
})
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert 'anonuid=65534' in params, str(parsed)
assert 'anongid=65534' in params, str(parsed)
assert 'all_squash' in params, str(parsed)
# After share config restore, confirm settings
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
params = parsed[0]['opts'][0]['parameters']
assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
assert 'all_squash' not in params, str(parsed)
def test_subtree_behavior(self, start_nfs, nfs_dataset_and_share):
"""
If dataset mountpoint is exported rather than simple dir,
we disable subtree checking as an optimization. This check
makes sure we're doing this as expected:
Sample:
"/mnt/dozer/NFSV4"\
*(sec=sys,rw,no_subtree_check)
"/mnt/dozer/NFSV4/foobar"\
*(sec=sys,rw,subtree_check)
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
with directory(f'{NFS_PATH}/sub1') as tmp_path:
with nfs_share(tmp_path, {'hosts': ['127.0.0.1']}):
parsed = parse_exports()
assert len(parsed) == 2, str(parsed)
assert parsed[0]['path'] == NFS_PATH, str(parsed)
assert 'no_subtree_check' in parsed[0]['opts'][0]['parameters'], str(parsed)
assert parsed[1]['path'] == tmp_path, str(parsed)
assert 'subtree_check' in parsed[1]['opts'][0]['parameters'], str(parsed)
def test_nonroot_behavior(self, start_nfs, nfs_dataset_and_share):
"""
If global configuration option "allow_nonroot" is set, then
we append "insecure" to each exports line.
Since this is a global option, it triggers an nfsd restart
even though it's not technically required.
Linux will, by default, mount using a priviledged port (1..1023)
MacOS NFS mounts do not follow this 'standard' behavior.
Four conditions to test:
server: secure (e.g. allow_nonroot is False)
client: resvport -> expect to pass.
client: noresvport -> expect to fail.
server: insecure (e.g. allow_nonroot is True)
client: resvport -> expect to pass.
client: noresvport -> expect to pass
Sample:
"/mnt/dozer/NFSV4"\
*(sec=sys,rw,insecure,no_subtree_check)
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
# Verify that NFS server configuration is as expected
with nfs_config() as nfs_conf_orig:
# --- Test: allow_nonroot is False ---
assert nfs_conf_orig['allow_nonroot'] is False, nfs_conf_orig
# Confirm setting in /etc/exports
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed)
# Confirm we allow mounts from 'root' ports
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip):
client_port = get_client_nfs_port()
assert client_port[1] is not None, f"Failed to get client port: f{client_port[0]}"
assert int(client_port[1]) < 1024, \
f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}"
# Confirm we block mounts from 'non-root' ports
with pytest.raises(RuntimeError) as re:
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'],
user=user, password=password, ip=truenas_server.ip):
pass
# We should not get to this assert
assert False, "Unexpected success with mount"
assert 'Operation not permitted' in str(re), re
# --- Test: allow_nonroot is True ---
new_nfs_conf = call('nfs.update', {"allow_nonroot": True})
assert new_nfs_conf['allow_nonroot'] is True, new_nfs_conf
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
assert 'insecure' in parsed[0]['opts'][0]['parameters'], str(parsed)
# Confirm we allow mounts from 'root' ports
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip):
client_port = get_client_nfs_port()
assert client_port[1] is not None, "Failed to get client port"
assert int(client_port[1]) < 1024, \
f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}"
# Confirm we allow mounts from 'non-root' ports
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'],
user=user, password=password, ip=truenas_server.ip):
client_port = get_client_nfs_port()
assert client_port[1] is not None, "Failed to get client port"
assert int(client_port[1]) >= 1024, \
f"client_port is not in 'non-root' range: {client_port[1]}\n{client_port[0]}"
# Confirm setting was returned to original state
parsed = parse_exports()
assert len(parsed) == 1, str(parsed)
assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed)
def test_syslog_filters(self, start_nfs, nfs_dataset_and_share):
"""
This test checks the function of the mountd_log setting to filter
rpc.mountd messages that have priority DEBUG to NOTICE.
We performing loopback mounts on the remote TrueNAS server and
then check the syslog for rpc.mountd messages. Outside of SSH_NFS
we test the umount case.
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
test_marker = random_string()
with nfs_config():
# The effect is much more clear if there are many mountd.
# We can force this by configuring many nfsd
call("nfs.update", {"servers": 24})
# Confirm default setting: mountd logging enabled
call("nfs.update", {"mountd_log": True})
# Add a marker to indicate the expectation of messages
ssh(f'logger "====== {test_marker} START_NFS_SYSLOG_FILTER_TEST ======"')
# Mount twice to generate plenty messages
ssh('logger "mount once"')
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip) as n:
n.ls('/')
ssh('logger "mount twice"')
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip) as n:
n.ls('/')
# Disable mountd logging
call("nfs.update", {"mountd_log": False})
# Add a marker to indicate the expectation of no messages
ssh(f'logger "====== {test_marker} END_NFS_SYSLOG_FILTER_TEST ======"')
# Mount twice to generate plenty of opportunity for messages
ssh('logger "mount once"')
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip) as n:
n.ls('/')
ssh('logger "mount twice"')
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip) as n:
n.ls('/')
# Add a marker to indicate the end of
ssh(f'logger "====== {test_marker} STOP_NFS_SYSLOG_FILTER_TEST ======"')
# Wait a few seconds for messages to flush
sleep(5)
# Process syslog
log_data = ssh("tail -200 /var/log/syslog").replace('\n', '')
data_with_msg = re.findall(f"{test_marker} START.*{test_marker} END", log_data)[0]
assert 'rpc.mountd in data_with_msg', data_with_msg
data_without_msg = re.findall(f"{test_marker} END.*{test_marker} STOP", log_data)[0]
assert 'rpc.mountd' not in data_without_msg
def test_client_status(self, start_nfs, nfs_dataset_and_share):
"""
This test checks the function of API endpoints to list NFSv3 and
NFSv4 clients by performing loopback mounts on the remote TrueNAS
server and then checking client counts. Due to inherent imprecision
of counts over NFSv3 protcol (specifically with regard to decrementing
sessions) we only verify that count is non-zero for NFSv3.
"""
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=3,
user=user, password=password, ip=truenas_server.ip):
res = call('nfs.get_nfs3_clients', [], {'count': True})
assert int(res) != 0
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
user=user, password=password, ip=truenas_server.ip):
res = call('nfs.get_nfs4_clients', [], {'count': True})
assert int(res) == 1, f"Expected to find 1, but found {int(res)}"
# # Enable this when CI environment supports IPv6
# # NAS-130437: Confirm IPv6 support
# try:
# # Get the IPv6 equivalent of truenas_server.ip
# ip_info = call(
# 'interface.query',
# [["aliases.*.address", "=", truenas_server.ip]], {"get": True}
# )
# devname = ip_info['name']
# aliases = ip_info['state']['aliases']
# ipv6_addr = list(filter(lambda x: x['type'] == 'INET6', aliases))[0]['address']
# ipv6_mp = '/mnt/nfs_ipv6'
# ssh(f"mkdir -p {ipv6_mp}")
# # zsh requires the 'server' part to be encapsulated in quotes due to square brackets
# ssh(f'mount "[{ipv6_addr}%{devname}]":{NFS_PATH} {ipv6_mp}')
# # Confirm we can process get_nfs4_clients that use IPv6 addresses
# nfs4_client_list = call("nfs.get_nfs4_clients")
# assert len(nfs4_client_list) == 1
# assert ipv6_addr in nfs4_client_list[0]['info']['callback address']
# finally:
# ssh(f"umount -f {ipv6_mp}")
# ssh(f"rmdir {ipv6_mp}")
@pytest.mark.parametrize('type,data', [
pp('InvalidAssignment', [
{'maproot_user': 'baduser'}, 'maproot_user', 'User not found: baduser'
], id="invalid maproot user"),
pp('InvalidAssignment', [
{'maproot_group': 'badgroup'}, 'maproot_user', 'This field is required when map group is specified'
], id="invalid maproot group"),
pp('InvalidAssignment', [
{'mapall_user': 'baduser'}, 'mapall_user', 'User not found: baduser'
], id="invalid mapall user"),
pp('InvalidAssignment', [
{'mapall_group': 'badgroup'}, 'mapall_user', 'This field is required when map group is specified'
], id="invalid mapall group"),
pp('MissingUser', ['maproot_user', 'missinguser'], id="missing maproot user"),
pp('MissingUser', ['mapall_user', 'missinguser'], id="missing mapall user"),
pp('MissingGroup', ['maproot_group', 'missingroup'], id="missing maproot group"),
pp('MissingGroup', ['mapall_group', 'missingroup'], id="missing mapall group"),
])
def test_invalid_user_group_mapping(self, start_nfs, nfs_dataset_and_share, type, data):
'''
Verify we properly trap and handle invalid user and group mapping
Two conditions:
1) Catch invalid assignments
2) Catch invalid settings at NFS start
'''
assert start_nfs is True
assert nfs_dataset_and_share['nfsid'] is not None
''' Test Processing '''
with directory(f'{NFS_PATH}/sub1') as tmp_path:
if type == 'InvalidAssignment':
payload = {'path': tmp_path} | data[0]
with pytest.raises(ValidationErrors) as ve:
call("sharing.nfs.create", payload)
assert ve.value.errors == [ValidationError('sharingnfs_create.' + f'{data[1]}', data[2], 22)]
elif type == 'MissingUser':
usrname = data[1]
testkey, testval = data[0].split('_')
usr_payload = {'username': usrname, 'full_name': usrname,
'group_create': True, 'password': 'abadpassword'}
mapping = {data[0]: usrname}
with create_user(usr_payload) as usrInst:
with nfs_share(tmp_path, mapping) as share:
run_missing_usrgrp_mapping_test(data, testval, tmp_path, share, usrInst)
elif type == 'MissingGroup':
# Use a built-in user for the group test
grpname = data[1]
testkey, testval = data[0].split('_')
mapping = {f"{testkey}_user": 'ftp', data[0]: grpname}
with create_group({'name': grpname}) as grpInst:
with nfs_share(tmp_path, mapping) as share:
run_missing_usrgrp_mapping_test(data, testval, tmp_path, share, grpInst)
def test_service_protocols(self, start_nfs):
"""
This test verifies that changing the `protocols` option generates expected
changes in nfs kernel server config. In most cases we will also confirm
the settings have taken effect.
For the time being this test will also exercise the deprecated `v4` option
to the same effect, but this will later be removed.
NFS must be enabled for this test to succeed as while the config (i.e.
database) will be updated regardless, the server config file will not
be updated.
TODO: Add client side tests
"""
assert start_nfs is True
# Multiple restarts cause systemd failures. Reset the systemd counters.
reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
with nfs_config() as nfs_conf_orig:
# Check existing config (both NFSv3 & NFSv4 configured)
assert "NFSV3" in nfs_conf_orig['protocols'], nfs_conf_orig
assert "NFSV4" in nfs_conf_orig['protocols'], nfs_conf_orig
s = parse_server_config()
assert s['nfsd']["vers3"] == 'y', str(s)
assert s['nfsd']["vers4"] == 'y', str(s)
confirm_nfs_version(['3', '4'])
# Turn off NFSv4 (v3 on)
new_config = call('nfs.update', {"protocols": ["NFSV3"]})
assert "NFSV3" in new_config['protocols'], new_config
assert "NFSV4" not in new_config['protocols'], new_config
s = parse_server_config()
assert s['nfsd']["vers3"] == 'y', str(s)
assert s['nfsd']["vers4"] == 'n', str(s)
# Confirm setting has taken effect: v4->off, v3->on
confirm_nfs_version(['3'])
# Confirm we trap invalid setting
with pytest.raises(ValidationError) as ve:
call("nfs.update", {"protocols": []})
assert "nfs_update.protocols" == ve.value.attribute
assert "at least one" in str(ve.value)
# Turn off NFSv3 (v4 on)
new_config = call('nfs.update', {"protocols": ["NFSV4"]})
assert "NFSV3" not in new_config['protocols'], new_config
assert "NFSV4" in new_config['protocols'], new_config
s = parse_server_config()
assert s['nfsd']["vers3"] == 'n', str(s)
assert s['nfsd']["vers4"] == 'y', str(s)
# Confirm setting has taken effect: v4->on, v3->off
confirm_nfs_version(['4'])
# Finally, confirm both are re-enabled
nfs_conf = call('nfs.config')
assert "NFSV3" in nfs_conf['protocols'], nfs_conf
assert "NFSV4" in nfs_conf['protocols'], nfs_conf
s = parse_server_config()
assert s['nfsd']["vers3"] == 'y', str(s)
assert s['nfsd']["vers4"] == 'y', str(s)
# Confirm setting has taken effect: v4->on, v3->on
confirm_nfs_version(['3', '4'])
def test_service_udp(self, start_nfs):
"""
This test verifies the udp config is NOT in the DB and
that it is NOT in the etc file.
"""
assert start_nfs is True
# The 'udp' setting should have been removed
nfs_conf = call('nfs.config')
assert nfs_conf.get('udp') is None, nfs_conf
s = parse_server_config()
assert s.get('nfsd', {}).get('udp') is None, s
@pytest.mark.parametrize('test_port', [
pp([["mountd", 618, None], ["rpcstatd", 871, None], ["rpclockd", 32803, None]], id="valid ports"),
pp([["rpcstatd", -21, 0], ["rpclockd", 328031, 0]], id="invalid ports"),
pp([["mountd", 20049, 1]], id="excluded ports"),
])
def test_service_ports(self, start_nfs, test_port):
"""
This test verifies that we can set custom port and the
settings are reflected in the relevant files and are active.
This also tests the port range and exclude.
"""
assert start_nfs is True
# Multiple restarts cause systemd failures. Reset the systemd counters.
reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
# Friendly index names
name = 0
value = 1
err = 2
# Error message snippets
errmsg = ["Should be between", "reserved for internal use"]
# Test ports
for port in test_port:
port_name = port[name] + "_port"
if port[err] is None:
nfs_conf = call("nfs.update", {port_name: port[value]})
assert nfs_conf[port_name] == port[value]
else:
with pytest.raises(ValidationErrors) as ve:
nfs_conf = call("nfs.update", {port_name: port[value]})
errStr = str(ve.value.errors[0])
assert errmsg[port[err]] in errStr
# Compare DB with setting in /etc/nfs.conf.d/local.conf
with nfs_config() as config_db:
s = parse_server_config()
assert int(s['mountd']['port']) == config_db["mountd_port"], str(s)
assert int(s['statd']['port']) == config_db["rpcstatd_port"], str(s)
assert int(s['lockd']['port']) == config_db["rpclockd_port"], str(s)
# Confirm port settings are active
confirm_rpc_port('mountd', config_db["mountd_port"])
confirm_rpc_port('status', config_db["rpcstatd_port"])
confirm_rpc_port('nlockmgr', config_db["rpclockd_port"])
def test_runtime_debug(self, start_nfs):
"""
This validates that the private NFS debugging API works correctly.
"""
assert start_nfs is True
disabled = {"NFS": ["NONE"], "NFSD": ["NONE"], "NLM": ["NONE"], "RPC": ["NONE"]}
enabled = {"NFS": ["PROC", "XDR", "CLIENT", "MOUNT", "XATTR_CACHE"],
"NFSD": ["ALL"],
"NLM": ["CLIENT", "CLNTLOCK", "SVC"],
"RPC": ["CALL", "NFS", "TRANS"]}
failure = {"RPC": ["CALL", "NFS", "TRANS", "NONE"]}
try:
res = call('nfs.get_debug')
assert res == disabled
call('nfs.set_debug', enabled)
res = call('nfs.get_debug')
assert set(res['NFS']) == set(enabled['NFS']), f"Mismatch on NFS: {res}"
assert set(res['NFSD']) == set(enabled['NFSD']), f"Mismatch on NFSD: {res}"
assert set(res['NLM']) == set(enabled['NLM']), f"Mismatch on NLM: {res}"
assert set(res['RPC']) == set(enabled['RPC']), f"Mismatch on RPC: {res}"
# Test failure case. This should generate an ValueError exception on the system
with pytest.raises(ValueError) as ve:
call('nfs.set_debug', failure)
assert 'Cannot specify another value' in str(ve), ve
finally:
call('nfs.set_debug', disabled)
res = call('nfs.get_debug')
assert res == disabled
def test_bind_ip(self, start_nfs):
'''
This test requires a static IP address
* Test the private nfs.bindip call
* Test the actual bindip config setting
- Confirm setting in conf files
- Confirm service on IP address
'''
assert start_nfs is True
# Multiple restarts cause systemd failures. Reset the systemd counters.
reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
choices = call("nfs.bindip_choices")
assert truenas_server.ip in choices
call("nfs.bindip", {"bindip": [truenas_server.ip]})
call("nfs.bindip", {"bindip": []})
# Test config with bindip. Use choices from above
# TODO: check with 'nmap -sT <IP>' from the runner.
with nfs_config() as db_conf:
# Should have no bindip setting
nfs_conf = parse_server_config()
rpc_conf = parse_rpcbind_config()
assert db_conf['bindip'] == []
assert nfs_conf['nfsd'].get('host') is None
assert rpc_conf.get('-h') is None
# Set bindip
call("nfs.update", {"bindip": [truenas_server.ip]})
# Confirm we see it in the nfs and rpc conf files
nfs_conf = parse_server_config()
rpc_conf = parse_rpcbind_config()
assert truenas_server.ip in nfs_conf['nfsd'].get('host'), f"nfs_conf = {nfs_conf}"
assert truenas_server.ip in rpc_conf.get('-h'), f"rpc_conf = {rpc_conf}"
def test_v4_domain(self, start_nfs):
'''
The v4_domain configuration item maps to the 'Domain' setting in
the [General] section of /etc/idmapd.conf.
It is described as:
The local NFSv4 domain name. An NFSv4 domain is a namespace
with a unique username<->UID and groupname<->GID mapping.
(Default: Host's fully-qualified DNS domain name)
'''
assert start_nfs is True
with nfs_config() as nfs_db:
# By default, v4_domain is not set
assert nfs_db['v4_domain'] == "", f"Expected zero-len string, but found {nfs_db['v4_domain']}"
s = parse_server_config("idmapd")
assert s['General'].get('Domain') is None, f"'Domain' was not expected to be set: {s}"
# Make a setting change and confirm
db = call('nfs.update', {"v4_domain": "ixsystems.com"})
assert db['v4_domain'] == 'ixsystems.com', f"v4_domain failed to be updated in nfs DB: {db}"
s = parse_server_config("idmapd")
assert s['General'].get('Domain') == 'ixsystems.com', f"'Domain' failed to be updated in idmapd.conf: {s}"
def test_xattr_support(self, start_nfs):
"""
Perform basic validation of NFSv4.2 xattr support.
Mount path via NFS 4.2, create a file and dir,
and write + read xattr on each.
"""
assert start_nfs is True
xattr_nfs_path = f'/mnt/{pool_name}/test_nfs4_xattr'
with nfs_dataset("test_nfs4_xattr"):
with nfs_share(xattr_nfs_path):
with SSH_NFS(truenas_server.ip, xattr_nfs_path, vers=4.2,
user=user, password=password, ip=truenas_server.ip) as n:
n.create("testfile")
n.setxattr("testfile", "user.testxattr", "the_contents")
xattr_val = n.getxattr("testfile", "user.testxattr")
assert xattr_val == "the_contents"
n.create("testdir", True)
n.setxattr("testdir", "user.testxattr2", "the_contents2")
xattr_val = n.getxattr("testdir", "user.testxattr2")
assert xattr_val == "the_contents2"
class TestSubtreeShares:
"""
Wrap a class around test_37 to allow calling the fixture only once
in the parametrized test
"""
# TODO: Work up a valid IPv6 test (when CI environment supports it)
# res = SSH_TEST(f"ip address show {interface} | grep inet6", user, password, ip)
# ipv6_network = str(res['output'].split()[1])
# ipv6_host = ipv6_network.split('/')[0]
@pytest.fixture(scope='class')
def dataset_and_dirs(self):
"""
Create a dataset and an NFS share for it for host 127.0.0.1 only
In the dataset, create directories: dir1, dir2, dir3
In each directory, create subdirs: subdir1, subdir2, subdir3
"""
# Characteristics of expected error messages
err_strs = [
["Another share", "same path"],
["This or another", "overlaps"],
["Another NFS share already exports"],
["Symbolic links"]
]
vol0 = f'/mnt/{pool_name}/VOL0'
with nfs_dataset('VOL0'):
# Top level shared to narrow host
with nfs_share(vol0, {'hosts': ['127.0.0.1']}):
# Get the initial list of entries for the cleanup test
contents = call('sharing.nfs.query')
startIdList = [item.get('id') for item in contents]
# Create the dirs
dirs = ["everybody_1", "everybody_2", "limited_1", "dir_1", "dir_2"]
subdirs = ["subdir1", "subdir2", "subdir3"]
try:
for dir in dirs:
ssh(f"mkdir -p {vol0}/{dir}")
for subdir in subdirs:
ssh(f"mkdir -p {vol0}/{dir}/{subdir}")
# And symlinks
ssh(f"ln -sf {vol0}/{dir}/{subdir} {vol0}/{dir}/symlink2{subdir}")
yield vol0, err_strs
finally:
# Remove the created dirs
for dir in dirs:
ssh(f"rm -rf {vol0}/{dir}")
# Remove the created shares
contents = call('sharing.nfs.query')
endIdList = [item.get('id') for item in contents]
[call('sharing.nfs.delete', id) for id in endIdList if id not in startIdList]
@pytest.mark.parametrize(
"dirname,isHost,HostOrNet,ExpectedToPass, ErrFormat", [
pp("everybody_1", True, ["*"], True, None, id="NAS-120957: host - everybody"),
pp("everybody_2", True, ["*"], True, None, id="NAS-120957: host - non-related paths"),
pp("everybody_2", False, ["192.168.1.0/22"], True, None, id="NAS-129577: network, everybody, same path"),
pp("limited_1", True, ["127.0.0.1"], True, None, id="NAS-123042: host - export subdirs"),
pp("limited_1", False, ["192.168.1.0/22"], True, None, id="NAS-123042: network - export subdirs"),
pp("limited_1", True, ["127.0.0.1"], False, 0, id="NAS-127220: host - already exported"),
pp("limited_1", False, ["192.168.1.0/22"], False, 2, id="NAS-127220: network - already exported"),
pp("dir_1", True, ["*.example.com"], True, None, id="NAS-120616: host - wildcards"),
pp("dir_1", True, ["*.example.com"], False, 0, id="NAS-127220: host - wildcard already exported"),
pp("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e::/96"],
True, None, id="NAS-123042: network - IPv6 network range"),
pp("dir_1/subdir2", True, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334"],
True, None, id="NAS-129577: host - IPv6 allow host overlap with network"),
pp("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334/112"],
False, 1, id="NAS-123042: network - IPv6 overlap with network"),
pp("dir_1/subdir3", True, ["192.168.27.211"], True, None, id="NAS-123042: host - export sub-subdirs"),
pp("dir_1/subdir3", False, ["192.168.24.0/22"],
True, None, id="NAS-129522: network - allow overlap with host"),
pp("limited_1/subdir2", True, ["*"], True, None, id="NAS-123042: host - setup everybody on sub-subdir"),
pp("limited_1/subdir2", True, ["*"], False, 2, id="NAS-127220: host - already exported sub-subdir"),
pp("dir_2/subdir2", False, ["192.168.1.0/24"],
True, None, id="NAS-123042: network - export sub-subdirs"),
pp("dir_2/subdir2", False, ["192.168.1.0/32"], False, 1, id="NAS-123042: network - overlap sub-subdir"),
pp("limited_1/subdir3", True, ["192.168.1.0", "*.ixsystems.com"],
True, None, id="NAS-123042: host - two hosts, same sub-subdir"),
pp("dir_1/symlink2subdir3", True, ["192.168.0.0"], False, 3, id="Block exporting symlinks"),
],
)
def test_subtree_share(self, start_nfs, dataset_and_dirs, dirname, isHost, HostOrNet, ExpectedToPass, ErrFormat):
"""
Sharing subtrees to the same host can cause problems for
NFSv3. This check makes sure a share creation follows
the rules.
* First match is applied
* A new path that is _the same_ as existing path cannot be shared to same 'host'
For example, the following is not allowed:
"/mnt/dozer/NFS"\
fred(rw)
"/mnt/dozer/NFS"\
fred(ro)
Also not allowed are collisions that may result in unexpected share permissions.
For example, the following is not allowed:
"/mnt/dozer/NFS"\
*(rw)
"/mnt/dozer/NFS"\
marketing(ro)
"""
assert start_nfs is True
vol, err_strs = dataset_and_dirs
dirpath = f'{vol}/{dirname}'
if isHost:
payload = {"path": dirpath, "hosts": HostOrNet}
else:
payload = {"path": dirpath, "networks": HostOrNet}
if ExpectedToPass:
call("sharing.nfs.create", payload)
else:
with pytest.raises(ValidationErrors) as ve:
call("sharing.nfs.create", payload)
errStr = str(ve.value.errors[0])
# Confirm we have the expected error message format
for this_substr in err_strs[ErrFormat]:
assert this_substr in errStr
@pytest.mark.timeout(600)
def test_nfsv4_acl_support(self, start_nfs):
"""
This test validates reading and setting NFSv4 ACLs through an NFSv4
mount in the following manner for NFSv4.2, NFSv4.1 & NFSv4.0:
1) Create and locally mount an NFSv4 share on the TrueNAS server
2) Iterate through all possible permissions options and set them
via an NFS client, read back through NFS client, and read resulting
ACL through the filesystem API.
3) Repeat same process for each of the supported ACE flags.
4) For NFSv4.1 or NFSv4.2, repeat same process for each of the
supported acl_flags.
"""
assert start_nfs is True
acl_nfs_path = f'/mnt/{pool_name}/test_nfs4_acl'
test_perms = {
"READ_DATA": True,
"WRITE_DATA": True,
"EXECUTE": True,
"APPEND_DATA": True,
"DELETE_CHILD": True,
"DELETE": True,
"READ_ATTRIBUTES": True,
"WRITE_ATTRIBUTES": True,
"READ_NAMED_ATTRS": True,
"WRITE_NAMED_ATTRS": True,
"READ_ACL": True,
"WRITE_ACL": True,
"WRITE_OWNER": True,
"SYNCHRONIZE": True
}
test_flags = {
"FILE_INHERIT": True,
"DIRECTORY_INHERIT": True,
"INHERIT_ONLY": False,
"NO_PROPAGATE_INHERIT": False,
"INHERITED": False
}
# getacl setting
simplified = True
for (version, test_acl_flag) in [(4, True), (4.1, True), (4.0, False)]:
theacl = [
{"tag": "owner@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
{"tag": "group@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
{"tag": "everyone@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
{"tag": "USER", "id": 65534, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
{"tag": "GROUP", "id": 666, "perms": test_perms.copy(), "flags": test_flags.copy(), "type": "ALLOW"},
]
with nfs_dataset("test_nfs4_acl", {"acltype": "NFSV4", "aclmode": "PASSTHROUGH"}, theacl):
with nfs_share(acl_nfs_path):
with SSH_NFS(truenas_server.ip, acl_nfs_path, vers=version, user=user, password=password, ip=truenas_server.ip) as n:
nfsacl = n.getacl(".")
for idx, ace in enumerate(nfsacl):
assert ace == theacl[idx], str(ace)
for perm in test_perms.keys():
if perm == 'SYNCHRONIZE':
# break in SYNCHRONIZE because Linux tool limitation
break
theacl[4]['perms'][perm] = False
n.setacl(".", theacl)
nfsacl = n.getacl(".")
for idx, ace in enumerate(nfsacl):
assert ace == theacl[idx], str(ace)
result = call('filesystem.getacl', acl_nfs_path, not simplified)
for idx, ace in enumerate(result['acl']):
assert ace == nfsacl[idx], str(ace)
for flag in ("INHERIT_ONLY", "NO_PROPAGATE_INHERIT"):
theacl[4]['flags'][flag] = True
n.setacl(".", theacl)
nfsacl = n.getacl(".")
for idx, ace in enumerate(nfsacl):
assert ace == theacl[idx], str(ace)
result = call('filesystem.getacl', acl_nfs_path, not simplified)
for idx, ace in enumerate(result['acl']):
assert ace == nfsacl[idx], str(ace)
if test_acl_flag:
assert 'none' == n.getaclflag(".")
for acl_flag in ['auto-inherit', 'protected', 'defaulted']:
n.setaclflag(".", acl_flag)
assert acl_flag == n.getaclflag(".")
result = call('filesystem.getacl', acl_nfs_path, not simplified)
# Normalize the flag_is_set name for comparision to plugin equivalent
# (just remove the '-' from auto-inherit)
if acl_flag == 'auto-inherit':
flag_is_set = 'autoinherit'
else:
flag_is_set = acl_flag
# Now ensure that only the expected flag is set
nfs41_flags = result['nfs41_flags']
for flag in ['autoinherit', 'protected', 'defaulted']:
if flag == flag_is_set:
assert nfs41_flags[flag], nfs41_flags
else:
assert not nfs41_flags[flag], nfs41_flags
@pytest.mark.parametrize('state,expected', [
pp(None, 'n', id="default state"),
pp(True, 'y', id="enable"),
pp(False, 'n', id="disable")
])
def test_manage_gids(self, start_nfs, state, expected):
'''
The nfsd_manage_gids setting is called "Support > 16 groups" in the webui.
It is that and, to a greater extent, defines the GIDs that are used for permissions.
If NOT enabled, then the expectation is that the groups to which the user belongs
are defined on the _client_ and NOT the server. It also means groups to which the user
belongs are passed in on the NFS commands from the client. The file object GID is
checked against the passed in list of GIDs. This is also where the 16 group
limitation is enforced. The NFS protocol allows passing up to 16 groups per user.
If nfsd_manage_gids is enabled, the groups to which the user belong are defined
on the server. In this condition, the server confirms the user is a member of
the file object GID.
NAS-126067: Debian changed the 'default' setting to manage_gids in /etc/nfs.conf
from undefined to "manage_gids = y".
TEST: Confirm manage_gids is set in /etc/nfs.conf.d/local/conf for
both the enable and disable states
TODO: Add client-side and server-side test from client when available
'''
assert start_nfs is True
with nfs_config():
if state is not None:
sleep(3) # In Cobia: Prevent restarting NFS too quickly.
call("nfs.update", {"userd_manage_gids": state})
s = parse_server_config()
assert s['mountd']['manage-gids'] == expected, str(s)
def test_pool_delete_with_attached_share():
'''
Confirm we can delete a pool with the system dataset and a dataset with active NFS shares
'''
with another_pool() as new_pool:
# Move the system dataset to this pool
with system_dataset(new_pool['name']):
# Add some additional NFS stuff to make it interesting
with nfs_dataset("deleteme", pool=new_pool['name']) as ds:
with nfs_share(f"/mnt/{ds}"):
with manage_start_nfs():
# Delete the pool and confirm it's gone
call("pool.export", new_pool["id"], {"destroy": True}, job=True)
assert call("pool.query", [["name", "=", f"{new_pool['name']}"]]) == []
def test_threadpool_mode():
'''
Verify that NFS thread pool configuration can be adjusted through private API endpoints.
NOTE: This request will fail if NFS server (or NFS client) is still running.
'''
assert get_nfs_service_state() == "STOPPED", "NFS cannot be running during this test."
default_mode = call('nfs.get_threadpool_mode')
supported_modes = ["AUTO", "PERCPU", "PERNODE", "GLOBAL"]
try:
for m in supported_modes:
call('nfs.set_threadpool_mode', m)
res = call('nfs.get_threadpool_mode')
assert res == m, res
finally:
# Restore to default
call('nfs.set_threadpool_mode', default_mode)
@pytest.mark.parametrize('exports', ['missing', 'empty'])
def test_missing_or_empty_exports(exports):
'''
NAS-123498: Eliminate conditions on exports for service start NAS-123498: Eliminate conditions on exports for service start
The goal is to make the NFS server behavior similar to the other protocols
'''
# Setup /etc/exports
if exports == 'empty':
ssh("echo '' > /etc/exports")
else: # 'missing'
ssh("rm -f /etc/exports")
with nfs_config() as nfs_conf:
try:
# Start NFS
call('service.start', 'nfs')
sleep(1)
confirm_nfsd_processes(nfs_conf['servers'])
finally:
# Return NFS to stopped condition
call('service.stop', 'nfs')
sleep(1)
# Confirm stopped
assert get_nfs_service_state() == "STOPPED"
@pytest.mark.parametrize('expect_NFS_start', [False, True])
def test_files_in_exportsd(expect_NFS_start):
'''
Any files in /etc/exports.d are potentially dangerous, especially zfs.exports.
We implemented protections against rogue exports files.
- We block starting NFS if there are any files in /etc/exports.d
- We generate an alert when we detect this condition
- We clear the alert when /etc/exports.d is empty
'''
fail_check = {False: 'ConditionDirectoryNotEmpty=!/etc/exports.d', True: None}
try:
# Setup the test
set_immutable_state('/etc/exports.d', want_immutable=False) # Disable immutable
# Do the 'failing' case first to end with a clean condition
if not expect_NFS_start:
ssh("echo 'bogus data' > /etc/exports.d/persistent.file")
ssh("chattr +i /etc/exports.d/persistent.file")
else:
# Restore /etc/exports.d directory to a clean state
ssh("chattr -i /etc/exports.d/persistent.file")
ssh("rm -rf /etc/exports.d/*")
set_immutable_state('/etc/exports.d', want_immutable=True) # Enable immutable
set_nfs_service_state('start', expect_NFS_start, fail_check[expect_NFS_start])
finally:
# In all cases we want to end with NFS stopped
set_nfs_service_state('stop')
# If NFS start is blocked, then an alert should have been raised
alerts = call('alert.list')
if not expect_NFS_start:
# Find alert
assert any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts
else: # Alert should have been cleared
assert not any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts
| 79,478 | Python | .py | 1,542 | 40.068742 | 322 | 0.570345 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,272 | test_snapshot_task_retention.py | truenas_middleware/tests/api2/test_snapshot_task_retention.py | from datetime import datetime
from unittest.mock import ANY
import pytz
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.snapshot_task import snapshot_task
from middlewared.test.integration.utils import assert_creates_job, call
def test_change_retention():
tz = pytz.timezone(call("system.info")["timezone"])
with dataset("snapshottask-retention-test") as ds:
call("zettarepl.load_removal_dates")
with snapshot_task({
"dataset": ds,
"recursive": True,
"exclude": [],
"lifetime_value": 10,
"lifetime_unit": "YEAR",
"naming_schema": "auto-%Y-%m-%d-%H-%M-1y",
"schedule": {
"minute": "*",
},
}) as task:
call("zfs.snapshot.create", {
"dataset": ds,
"name": "auto-2021-04-12-06-30-1y",
})
result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
{"get": True, "extra": {"retention": True}})
assert result["retention"] == {
"datetime": ANY,
"source": "periodic_snapshot_task",
"periodic_snapshot_task_id": task["id"],
}
assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
result = call("pool.snapshottask.update_will_change_retention_for", task["id"], {
"naming_schema": "auto-%Y-%m-%d-%H-%M-365d",
})
assert result == {
ds: ["auto-2021-04-12-06-30-1y"],
}
with assert_creates_job("pool.snapshottask.fixate_removal_date") as job:
call("pool.snapshottask.update", task["id"], {
"naming_schema": "auto-%Y-%m-%d-%H-%M-365d",
"fixate_removal_date": True,
})
call("core.job_wait", job.id, job=True)
result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
{"get": True, "extra": {"retention": True}})
properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")]
assert properties, result["properties"]
assert properties[0]["value"] == "2031-04-10T06:30:00"
assert result["retention"] == {
"datetime": ANY,
"source": "property",
}
assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
def test_delete_retention():
tz = pytz.timezone(call("system.info")["timezone"])
with dataset("snapshottask-retention-test-2") as ds:
call("zettarepl.load_removal_dates")
with snapshot_task({
"dataset": ds,
"recursive": True,
"exclude": [],
"lifetime_value": 10,
"lifetime_unit": "YEAR",
"naming_schema": "auto-%Y-%m-%d-%H-%M-1y",
"schedule": {
"minute": "*",
},
}) as task:
call("zfs.snapshot.create", {
"dataset": ds,
"name": "auto-2021-04-12-06-30-1y",
})
result = call("pool.snapshottask.delete_will_change_retention_for", task["id"])
assert result == {
ds: ["auto-2021-04-12-06-30-1y"],
}
with assert_creates_job("pool.snapshottask.fixate_removal_date") as job:
call("pool.snapshottask.delete", task["id"], {
"fixate_removal_date": True,
})
call("core.job_wait", job.id, job=True)
result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
{"get": True, "extra": {"retention": True}})
properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")]
assert properties, result["properties"]
assert properties[0]["value"] == "2031-04-10T06:30:00"
assert result["retention"] == {
"datetime": ANY,
"source": "property",
}
assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
| 4,428 | Python | .py | 93 | 34.473118 | 112 | 0.517841 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,273 | test_crud_events.py | truenas_middleware/tests/api2/test_crud_events.py | import contextlib
import threading
import typing
from middlewared.test.integration.assets.crypto import get_cert_params, root_certificate_authority
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import client
def event_thread(event_endpoint: str, context: dict):
with client(py_exceptions=False) as c:
def cb(mtype, **message):
if len(message) != 3 or not all(
k in message for k in ('id', 'msg', 'collection')
) or message['collection'] != event_endpoint or message['msg'] not in (
'added', 'changed', 'removed'
):
return
if context['result'] is None:
context['result'] = message
context['received_result'].set()
context['shutdown_thread'].set()
c.subscribe(event_endpoint, cb)
context['subscribed'].set()
context['shutdown_thread'].wait(context['timeout'])
@contextlib.contextmanager
def wait_for_event(event_endpoint: str, timeout=60):
context = {
'subscribed': threading.Event(),
'result': None,
'received_result': threading.Event(),
'shutdown_thread': threading.Event(),
'timeout': timeout,
}
thread = threading.Thread(target=event_thread, args=(event_endpoint, context), daemon=True)
thread.start()
if not context['subscribed'].wait(30):
raise Exception('Timed out waiting for client to subscribe')
try:
yield context
if not context['received_result'].wait(timeout):
raise Exception('Event not received')
finally:
context['shutdown_thread'].set()
thread.join(timeout=5)
def assert_result(context: dict, event_endpoint: str, oid: typing.Union[int, str], event_type: str) -> None:
assert context['result'] == {
'msg': event_type,
'collection': event_endpoint,
'id': oid,
}
def test_event_create_on_non_job_method():
with wait_for_event('certificateauthority.query') as context:
with root_certificate_authority('root_ca_create_event_test') as root_ca:
assert root_ca['CA_type_internal'] is True, root_ca
assert_result(context, 'certificateauthority.query', root_ca['id'], 'added')
def test_event_create_on_job_method():
with root_certificate_authority('root_ca_create_event_test') as root_ca:
with wait_for_event('certificate.query') as context:
cert = call('certificate.create', {
'name': 'cert_test',
'signedby': root_ca['id'],
'create_type': 'CERTIFICATE_CREATE_INTERNAL',
**get_cert_params(),
}, job=True)
try:
assert cert['cert_type_internal'] is True, cert
finally:
call('certificate.delete', cert['id'], job=True)
assert_result(context, 'certificate.query', cert['id'], 'added')
def test_event_update_on_non_job_method():
with root_certificate_authority('root_ca_update_event_test') as root_ca:
assert root_ca['CA_type_internal'] is True, root_ca
with wait_for_event('certificateauthority.query') as context:
call('certificateauthority.update', root_ca['id'], {})
assert_result(context, 'certificateauthority.query', root_ca['id'], 'changed')
def test_event_update_on_job_method():
with wait_for_event('tunable.query'):
tunable = call('tunable.create', {
'type': 'SYSCTL',
'var': 'kernel.watchdog',
'value': '1',
}, job=True)
try:
with wait_for_event('tunable.query') as context:
call('tunable.update', tunable['id'], {'value': '0'}, job=True)
assert_result(context, 'tunable.query', tunable['id'], 'changed')
finally:
call('tunable.delete', tunable['id'], job=True)
def test_event_delete_on_non_job_method():
root_ca = call('certificateauthority.create', {
**get_cert_params(),
'name': 'test_root_ca_delete_event',
'create_type': 'CA_CREATE_INTERNAL',
})
assert root_ca['CA_type_internal'] is True, root_ca
with wait_for_event('certificateauthority.query') as context:
call('certificateauthority.delete', root_ca['id'])
assert_result(context, 'certificateauthority.query', root_ca['id'], 'removed')
def test_event_delete_on_job_method():
with wait_for_event('tunable.query'):
tunable = call('tunable.create', {
'type': 'SYSCTL',
'var': 'kernel.watchdog',
'value': '1',
}, job=True)
with wait_for_event('tunable.query') as context:
call('tunable.delete', tunable['id'], job=True)
assert_result(context, 'tunable.query', tunable['id'], 'removed')
| 4,844 | Python | .py | 106 | 36.811321 | 108 | 0.621972 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,274 | test_virt_001_global.py | truenas_middleware/tests/api2/test_virt_001_global.py | from middlewared.test.integration.utils.call import call
from middlewared.test.integration.utils.ssh import ssh
from auto_config import pool_name
def test_virt_pool():
call('virt.global.update', {'pool': pool_name}, job=True)
ssh(f'zfs list {pool_name}/.ix-virt')
def test_virt_no_pool():
call('virt.global.update', {'pool': None}, job=True)
ssh('incus storage show default 2>&1 | grep "not found"')
def test_virt_pool_auto_bridge():
call('virt.global.update', {'pool': pool_name, 'bridge': None}, job=True)
ssh('ifconfig incusbr0')
| 565 | Python | .py | 12 | 43.416667 | 77 | 0.711927 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,275 | test_275_ldap.py | truenas_middleware/tests/api2/test_275_ldap.py | import pytest
from middlewared.test.integration.assets.directory_service import ldap, LDAPUSER, LDAPPASSWORD
from middlewared.test.integration.assets.privilege import privilege
from middlewared.test.integration.assets.product import product_type
from middlewared.test.integration.utils import call, client
pytestmark = [
pytest.mark.skipif(not LDAPUSER, reason='Missing LDAP configuration'),
]
@pytest.fixture(scope="module")
def do_ldap_connection(request):
with ldap() as ldap_conn:
with product_type():
yield ldap_conn
def test_ldap_initial_state():
ds = call("directoryservices.status")
assert ds["type"] is None
assert ds["status"] is None
ldap_config = call("ldap.config")
assert not ldap_config["enable"]
def test_ldap_schema_choices():
expected = {"RFC2307", "RFC2307BIS"}
choices = call("ldap.schema_choices")
assert set(choices) == expected
def test_get_ldap_ssl_choices():
expected = {"OFF", "ON", "START_TLS"}
choices = call("ldap.ssl_choices")
assert set(choices) == expected
def test_ldap_connection(do_ldap_connection):
ds = call("directoryservices.status")
assert ds["type"] == "LDAP"
assert ds["status"] == "HEALTHY"
ldap_config = call("ldap.config")
assert ldap_config["enable"]
assert ldap_config["server_type"] == "OPENLDAP"
def test_ldap_user_group_cache(do_ldap_connection):
assert call("user.query", [["local", "=", False]], {'count': True}) != 0
assert call("group.query", [["local", "=", False]], {'count': True}) != 0
def test_account_privilege_authentication(do_ldap_connection):
call("system.general.update", {"ds_auth": True})
try:
group = call("user.get_user_obj", {"username": LDAPUSER})
assert group["source"] == "LDAP"
with privilege({
"name": "LDAP privilege",
"local_groups": [],
"ds_groups": [group["pw_gid"]],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
}):
with client(auth=(LDAPUSER, LDAPPASSWORD)) as c:
methods = c.call("core.get_methods")
assert "system.info" in methods
assert "pool.create" not in methods
finally:
call("system.general.update", {"ds_auth": False})
| 2,336 | Python | .py | 55 | 36.2 | 94 | 0.655157 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,276 | test_initshutdownscript.py | truenas_middleware/tests/api2/test_initshutdownscript.py | import base64
import contextlib
import errno
import stat
import time
import pytest
from middlewared.test.integration.utils import client, ssh
from middlewared.service_exception import ValidationErrors, ValidationError
TEST_SCRIPT_FILE = '/root/.TEST_SCRIPT_FILE'
_775 = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
@pytest.fixture(scope='module')
def ws_client():
with client() as c:
yield c
@contextlib.contextmanager
def initshutudown_script(ws_client, contents, extra=None):
extra = extra or {}
ws_client.call(
'filesystem.file_receive',
TEST_SCRIPT_FILE,
base64.b64encode(contents.encode('utf-8')).decode(),
{'mode': _775},
)
script = ws_client.call(
'initshutdownscript.create',
{
'type': 'SCRIPT',
'script': TEST_SCRIPT_FILE,
'when': 'PREINIT',
**extra,
}
)
try:
yield script
finally:
ws_client.call('initshutdownscript.delete', script['id'])
def test_initshutudown_script(ws_client):
with initshutudown_script(ws_client, 'echo "testing"') as script:
_id = script['id']
filters = [['id', '=', _id]]
opts = {'get': True}
# verify
assert ws_client.call('initshutdownscript.query', filters, opts)['script'] == TEST_SCRIPT_FILE
# add a comment
ws_client.call('initshutdownscript.update', _id, {'comment': 'test_comment'})
assert ws_client.call('initshutdownscript.query', filters, opts)['comment'] == 'test_comment'
# disable it
ws_client.call('initshutdownscript.update', _id, {'enabled': False})
assert ws_client.call('initshutdownscript.query', filters, opts)['enabled'] is False
assert not ws_client.call('initshutdownscript.query', filters)
def test_initshutdown_script_bad(ws_client):
bad_script = f'/root/nonexistent-script'
with pytest.raises(ValidationErrors) as e:
ws_client.call(
'initshutdownscript.create',
{
'type': 'SCRIPT',
'script': bad_script,
'when': 'PREINIT',
}
)
assert e.value.errors == [
ValidationError(
'init_shutdown_script_create.script',
f'Path {bad_script} not found',
errno.ENOENT
)
]
def test_initshutdownscript_success(ws_client):
ssh("rm /tmp/flag", check=False)
with initshutudown_script(ws_client, 'echo ok > /tmp/flag'):
ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
assert ssh("cat /tmp/flag") == "ok\n"
def test_initshutdownscript_timeout(ws_client):
ssh("rm /tmp/flag", check=False)
with initshutudown_script(ws_client, 'sleep 10', {"timeout": 2}):
start = time.monotonic()
ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
assert time.monotonic() - start < 5
assert f"Timed out running SCRIPT: {TEST_SCRIPT_FILE!r}" in ssh("cat /var/log/middlewared.log")
def test_initshutdownscript_failure(ws_client):
ssh("rm /tmp/flag", check=False)
with initshutudown_script(ws_client, 'echo everything went wrong > /dev/stderr; exit 1'):
ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
assert (
f"Failed to execute 'exec {TEST_SCRIPT_FILE}' with error 'everything went wrong\\n'" in
ssh("cat /var/log/middlewared.log")
)
| 3,502 | Python | .py | 88 | 32.545455 | 102 | 0.644504 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,277 | test_cloud_sync_config.py | truenas_middleware/tests/api2/test_cloud_sync_config.py | import time
from middlewared.test.integration.assets.cloud_sync import credential, task
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.mock_rclone import mock_rclone
def test_rclone_config_writer_bool():
with dataset("test_cloud_sync_config") as ds:
with credential({
"name": "Google Cloud Storage",
"provider": "GOOGLE_CLOUD_STORAGE",
"attributes": {
"service_account_credentials": "{\"project_id\": 1}",
},
}) as c:
with task({
"direction": "PUSH",
"transfer_mode": "COPY",
"path": f"/mnt/{ds}",
"credentials": c["id"],
"attributes": {
"bucket": "bucket",
"folder": "",
"bucket_policy_only": True,
},
}) as t:
with mock_rclone() as mr:
call("cloudsync.sync", t["id"])
time.sleep(2.5)
assert mr.result["config"]["remote"]["bucket_policy_only"] == "true"
| 1,206 | Python | .py | 29 | 28.689655 | 88 | 0.523891 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,278 | test_replication_sudo.py | truenas_middleware/tests/api2/test_replication_sudo.py | import pytest
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, password, ssh
@pytest.mark.parametrize("task", [
{"direction": "PUSH", "also_include_naming_schema": ["auto-%Y-%m-%d-%H-%M"]},
{"direction": "PULL", "naming_schema": ["auto-%Y-%m-%d-%H-%M"]},
])
def test_replication_sudo(task):
with dataset("admin") as admin_homedir:
with user({
"username": "admin",
"full_name": "admin",
"group_create": True,
"home": f"/mnt/{admin_homedir}",
"password": "test1234",
}):
ssh_connection = call("keychaincredential.setup_ssh_connection", {
"private_key": {
"generate_key": True,
"name": "test key",
},
"connection_name": "test",
"setup_type": "SEMI-AUTOMATIC",
"semi_automatic_setup": {
"url": "http://localhost",
"password": password(),
"username": "admin",
"sudo": True,
},
})
try:
with dataset("src") as src:
ssh(f"touch /mnt/{src}/test")
call("zfs.snapshot.create", {"dataset": src, "name": "auto-2023-01-18-16-00"})
with dataset("dst") as dst:
call("replication.run_onetime", {
**task,
"transport": "SSH",
"ssh_credentials": ssh_connection["id"],
"sudo": True,
"source_datasets": [src],
"target_dataset": dst,
"recursive": False,
"retention_policy": "NONE",
}, job=True)
assert ssh(f"ls /mnt/{dst}") == "test\n"
finally:
call("keychaincredential.delete", ssh_connection["id"])
call("keychaincredential.delete", ssh_connection["attributes"]["private_key"])
| 2,243 | Python | .py | 50 | 28.66 | 98 | 0.463682 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,279 | test_dataset_mount.py | truenas_middleware/tests/api2/test_dataset_mount.py | from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
def test_dataset_mount_on_readonly_dataset():
src_parent_dataset_name = 'parent_src'
with dataset(src_parent_dataset_name) as parent_src:
with dataset(f'{src_parent_dataset_name}/child1', {'readonly': 'ON'}) as child1_ds:
with dataset(f'{src_parent_dataset_name}/child2', {'readonly': 'ON'}) as child2_ds:
call('zfs.dataset.create', {'name': f'{child1_ds}/failed'})
call('zfs.dataset.umount', parent_src, {'force': True})
call('zfs.dataset.mount', parent_src, {'recursive': True})
for source_dataset, mounted in (
(parent_src, 'yes'),
(child1_ds, 'yes'),
(f'{child1_ds}/failed', 'no'),
(child2_ds, 'yes'),
):
assert call('zfs.dataset.get_instance', source_dataset)['properties']['mounted']['value'] == mounted
| 1,038 | Python | .py | 17 | 47.705882 | 120 | 0.57998 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,280 | test_smb_encryption.py | truenas_middleware/tests/api2/test_smb_encryption.py | import os
import pytest
from contextlib import contextmanager
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from protocols import smb_connection
SHAREUSER = 'smbuser420'
PASSWD = 'abcd1234'
SMB_NAME = 'enc_share'
@pytest.fixture(scope='module')
def smb_setup(request):
with dataset('smb-encrypt', data={'share_type': 'SMB'}) as ds:
with user({
'username': SHAREUSER,
'full_name': SHAREUSER,
'group_create': True,
'password': PASSWD
}, get_instance=False):
with smb_share(os.path.join('/mnt', ds), SMB_NAME) as s:
try:
call('service.start', 'cifs')
yield {'dataset': ds, 'share': s}
finally:
call('service.stop', 'cifs')
@contextmanager
def server_encryption(param):
call('smb.update', {'encryption': param})
try:
yield
finally:
call('smb.update', {'encryption': 'DEFAULT'})
def test__smb_client_encrypt_default(smb_setup):
with smb_connection(
share=smb_setup['share']['name'],
username=SHAREUSER,
password=PASSWD,
encryption='DEFAULT'
) as c:
# perform basic op to fully initialize SMB session
assert c.get_smb_encryption() == 'DEFAULT'
c.ls('/')
smb_status = call('smb.status')[0]
# check session
assert smb_status['encryption']['cipher'] == '-'
assert smb_status['encryption']['degree'] == 'none'
# check share
assert smb_status['share_connections'][0]['encryption']['cipher'] == '-'
assert smb_status['share_connections'][0]['encryption']['degree'] == 'none'
def test__smb_client_encrypt_desired(smb_setup):
with smb_connection(
share=smb_setup['share']['name'],
username=SHAREUSER,
password=PASSWD,
encryption='DESIRED'
) as c:
assert c.get_smb_encryption() == 'DESIRED'
# perform basic op to fully initialize SMB session
c.ls('/')
smb_status = call('smb.status')[0]
# check session
assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['encryption']['degree'] == 'partial'
# check share
assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
def test__smb_client_encrypt_required(smb_setup):
with smb_connection(
share=smb_setup['share']['name'],
username=SHAREUSER,
password=PASSWD,
encryption='REQUIRED'
) as c:
assert c.get_smb_encryption() == 'REQUIRED'
# perform basic op to fully initialize SMB session
c.ls('/')
smb_status = call('smb.status')[0]
# check session
assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['encryption']['degree'] == 'partial'
# check share
assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
@pytest.mark.parametrize('enc_param', ('DESIRED', 'REQUIRED'))
def test__smb_client_server_encrypt(smb_setup, enc_param):
with server_encryption(enc_param):
with smb_connection(
share=smb_setup['share']['name'],
username=SHAREUSER,
password=PASSWD,
encryption='DEFAULT'
) as c:
# check that client credential desired encryption is
# set to expected value
assert c.get_smb_encryption() == 'DEFAULT'
# perform basic op to fully initialize SMB session
c.ls('/')
smb_status = call('smb.status')[0]
# check session
assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['encryption']['degree'] == 'full'
# check share
assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
| 4,374 | Python | .py | 105 | 33.038095 | 94 | 0.608583 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,281 | test_mail.py | truenas_middleware/tests/api2/test_mail.py | from middlewared.test.integration.utils import call
def test_config_settings():
payload = {
"fromemail": "william.spam@ixsystems.com",
"outgoingserver": "mail.ixsystems.com",
"pass": "changeme",
"port": 25,
"security": "PLAIN",
"smtp": True,
"user": "william.spam@ixsystems.com"
}
call("mail.update", payload)
config = call("mail.config")
# test that payload is a subset of config
assert payload.items() <= config.items()
| 504 | Python | .py | 15 | 27.133333 | 51 | 0.622177 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,282 | test_websocket_local_ip.py | truenas_middleware/tests/api2/test_websocket_local_ip.py | from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
def test_websocket_local_ip():
"""This tests to ensure we return the local IP address
of the TrueNAS system based on the websocket session."""
assert call("interface.websocket_local_ip") == truenas_server.ip
| 343 | Python | .py | 6 | 53.833333 | 68 | 0.78806 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,283 | test_can_access_as_user.py | truenas_middleware/tests/api2/test_can_access_as_user.py | import contextlib
import pytest
from middlewared.test.integration.assets.pool import dataset, pool
from middlewared.test.integration.utils import call, ssh
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
@contextlib.contextmanager
def file(name, user, group, permissions):
with dataset('test_perms', pool=pool) as test_dataset:
path = os.path.join('/mnt', test_dataset, name)
with file_at_path(path, user, group, permissions):
yield path
@contextlib.contextmanager
def file_at_path(path, user, group, permissions):
ssh(f'install -o {user} -g {group} -m {permissions} /dev/null {path}')
try:
yield path
finally:
ssh(f'rm -f {path}')
@contextlib.contextmanager
def directory(name, user, group, permissions):
with dataset('test_perms', pool=pool) as test_dataset:
path = os.path.join('/mnt', test_dataset, name)
ssh(f'mkdir -p -m {permissions} {path}')
ssh(f'chown -R {user}:{group} {path}')
try:
yield path
finally:
ssh(f'rm -rf {path}')
def test_non_authorized_user_access():
with file('test', 'root', 'root', '700') as file_path:
for perm_check in ('read', 'write', 'execute'):
assert call('filesystem.can_access_as_user', 'nobody', file_path, {perm_check: True}) is False
def test_authorized_user_access():
for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
with file('test', user, group, '700') as file_path:
for perm_check in ('read', 'write', 'execute'):
assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is True
def test_read_access():
for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
with file('test', user, group, '400') as file_path:
for perm_check, value in (('read', True), ('write', False), ('execute', False)):
assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
def test_write_access():
for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
with file('test', user, group, '200') as file_path:
for perm_check, value in (('read', False), ('write', True), ('execute', False)):
assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
def test_execute_access():
for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
with file('test', user, group, '100') as file_path:
for perm_check, value in (('read', False), ('write', False), ('execute', True)):
assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
def test_nested_perm_execute_check():
with directory('test_dir', 'root', 'root', '700') as dir_path:
file_path = os.path.join(dir_path, 'testfile')
with file_at_path(file_path, 'root', 'root', '777'):
assert call('filesystem.can_access_as_user', 'apps', file_path, {'execute': True}) is False
| 3,076 | Python | .py | 60 | 44.183333 | 106 | 0.626711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,284 | test_disk_get_dev_size.py | truenas_middleware/tests/api2/test_disk_get_dev_size.py | import json
import pytest
from middlewared.test.integration.utils import call, ssh
@pytest.fixture(scope="session")
def blockdevices():
return {i['name']: i for i in json.loads(ssh('lsblk -bJ -o NAME,SIZE'))['blockdevices']}
def test_get_dev_size_for_all_disks(blockdevices):
for disk, disk_info in blockdevices.items():
assert disk_info['size'] == call('disk.get_dev_size', disk)
| 403 | Python | .py | 9 | 41.333333 | 92 | 0.726804 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,285 | test_310_service_announcement.py | truenas_middleware/tests/api2/test_310_service_announcement.py | import contextlib
import random
import re
import socket
import string
from datetime import datetime, timedelta
from time import sleep
from typing import cast
import pytest
from assets.websocket.server import reboot
from assets.websocket.service import (ensure_service_disabled,
ensure_service_enabled,
ensure_service_started,
ensure_service_stopped)
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from pytest_dependency import depends
from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf
from auto_config import ha, password, pool_name, user
from functions import SSH_TEST
from protocols import smb_share
digits = ''.join(random.choices(string.digits, k=4))
dataset_name = f"smb-cifs{digits}"
SMB_NAME1 = f"TestCifsSMB{digits}"
SMB_PATH1 = f"/mnt/{pool_name}/{dataset_name}"
dataset_name2 = f"other{digits}"
SMB_NAME2 = f"OtherTestSMB{digits}"
SMB_PATH2 = f"/mnt/{pool_name}/{dataset_name2}"
# Service names
TIME_MACHINE = '_adisk._tcp.local.' # Automatic Disk
DEVICE_INFO = '_device-info._tcp.local.' # Device Info
HTTP = '_http._tcp.local.'
SMB = '_smb._tcp.local.'
NUT = '_nut._tcp'
DO_MDNS_REBOOT_TEST = False
USE_AVAHI_BROWSE = True
skip_avahi_browse_tests = pytest.mark.skipif(USE_AVAHI_BROWSE, reason="Skip tests broken by use of avahi-browse")
def _get_tm_props(rec, key):
result = {}
for pair in rec['properties'][key].decode('utf-8').split(','):
k, v = pair.split('=')
result[k] = v
return result
def allow_settle(delay=3):
# Delay slightly to allow things to propagate
sleep(delay)
@contextlib.contextmanager
def service_announcement_config(config):
if not config:
yield
else:
old_config = call('network.configuration.config')['service_announcement']
call('network.configuration.update', {'service_announcement': config})
try:
yield
finally:
call('network.configuration.update', {'service_announcement': old_config})
@contextlib.contextmanager
def ensure_aapl_extensions():
# First check
enabled = call('smb.config')['aapl_extensions']
if enabled:
yield
else:
call('smb.update', {'aapl_extensions': True})
try:
yield
finally:
call('smb.update', {'aapl_extensions': False})
def wait_for_avahi_startup(interval=5, timeout=300):
"""When tests are running in a QE environment it can take a long
time for the service to start up completely, because many systems
can be configured with the same hostname.
This function will detect the most recent avahi-daemon startup and
wait for it to complete"""
command = 'journalctl --no-pager -u avahi-daemon --since "10 minute ago"'
brackets = re.compile(r'[\[\]]+')
while timeout > 0:
startup = None
ssh_out = SSH_TEST(command, user, password)
assert ssh_out['result'], str(ssh_out)
output = ssh_out['output']
# First we just look for the most recent startup command
for line in output.split('\n'):
if line.endswith('starting up.'):
startup = line
if startup:
pid = brackets.split(startup)[1]
completion = f'avahi-daemon[{pid}]: Server startup complete.'
for line in output.split('\n'):
if completion in line:
# Did we just complete
finish_plus_five = (datetime.strptime(line.split()[2], "%H:%M:%S") + timedelta(seconds=5)).time()
if finish_plus_five > datetime.now().time():
# Wait 5 seconds to ensure services are published
sleep(5)
return True
sleep(interval)
timeout -= interval
return False
class ZeroconfCollector:
def on_service_state_change(self, zeroconf, service_type, name, state_change):
if state_change is ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
if info:
item = {}
item['addresses'] = [addr for addr in info.parsed_scoped_addresses()]
if self.ip not in item['addresses']:
return
item['port'] = cast(int, info.port)
item['server'] = info.server
if info.properties:
item['properties'] = {}
for key, value in info.properties.items():
if key:
item['properties'][key] = value
else:
item['properties'] = {}
self.result[service_type][name] = item
self.update_internal_hostname(item['server'])
def find_items(self, service_announcement=None, timeout=5):
self.result = {}
for service in self.SERVICES:
self.result[service] = {}
with service_announcement_config(service_announcement):
assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup"
zeroconf = Zeroconf()
ServiceBrowser(zeroconf, self.SERVICES, handlers=[self.on_service_state_change])
try:
sleep(timeout)
finally:
zeroconf.close()
return self.result
def clear_cache(self):
# No-op for zeroconf collector
pass
class AvahiBrowserCollector:
name_to_service = {
'Device Info': DEVICE_INFO,
'Web Site': HTTP,
'Microsoft Windows Network': SMB,
'Apple TimeMachine': TIME_MACHINE,
'_nut._tcp': NUT,
}
def find_items(self, service_announcement=None, timeout=5):
self.result = {}
for service in self.SERVICES:
self.result[service] = {}
with service_announcement_config(service_announcement):
assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup"
# ssh_out = SSH_TEST("avahi-browse -v --all -t -p --resolve", user, password)
# Appears sometimes we need a little more time
ssh_out = SSH_TEST("timeout --preserve-status 5 avahi-browse -v --all -p --resolve", user, password)
assert ssh_out['result'], str(ssh_out)
output = ssh_out['output']
for line in output.split('\n'):
item = {}
items = line.split(';')
if len(items) > 1 and items[0] == '=':
if len(items) == 10:
server = items[3]
pub_ip = items[7]
if pub_ip not in self.ips:
continue
item['addresses'] = [pub_ip]
item['port'] = items[8]
item['server'] = items[6]
service_type = AvahiBrowserCollector.name_to_service[items[4]]
key = f"{server}.{service_type}"
item['properties'] = self.process_properties(items[9], service_type)
self.result[service_type][key] = item
self.update_internal_hostname(item['server'])
return self.result
def process_properties(self, txts, service_type):
props = {}
for txt in txts.split():
if txt.startswith('"') and txt.endswith('"'):
txt = txt[1:-1]
for prop in ['model', 'dk0', 'dk1', 'sys']:
if txt.startswith(f"{prop}="):
props[prop.encode('utf-8')] = txt[len(prop) + 1:].encode('utf-8')
return props
def clear_cache(self):
# We need to restart the avahi-daemon to clear cache
# print("Clearing cache")
ssh("systemctl restart avahi-daemon")
@staticmethod
def get_ipv6(ip):
"""Given an IPv4 address string, find the IPv6 on the same
interface (if present). Returns either the IPv6 address as
a string, or None"""
ips = call('network.general.summary')['ips']
for interface in ips:
matched = False
if 'IPV4' in ips[interface]:
for ipv4 in ips[interface]['IPV4']:
if ipv4.split('/')[0] == ip:
matched = True
break
if matched and 'IPV6' in ips[interface]:
for ipv6 in ips[interface]['IPV6']:
return ipv6.split('/')[0]
return None
class abstractmDNSAnnounceCollector:
"""
Class to help in the discovery (and processing/checking)
of services advertised by a particular IP address/server name.
"""
SERVICES = [TIME_MACHINE, DEVICE_INFO, HTTP, SMB, NUT]
def __init__(self, ip, tn_hostname):
self.ip = socket.gethostbyname(ip)
self.hostname = self.tn_hostname = tn_hostname
def update_internal_hostname(self, published_hostname):
"""If there has been a conflict then it is possible that a derivative
of the original hostname is being used. Check whether this the
published name could be a conflict-resolved name and if so,
update the hostname that will be used during checks.
"""
if published_hostname == self.tn_hostname:
return
possible_new_hostname = published_hostname.split('.')[0]
if possible_new_hostname == self.hostname:
return
# Check whether either 'hostname-...' or '<hostname> #...'
if possible_new_hostname.split()[0].split('-')[0] == self.tn_hostname:
self.hostname = possible_new_hostname
def has_service_type(self, hostname, service_type):
if not hostname:
hostname = self.hostname
key = f"{hostname}.{service_type}"
return key in self.result[service_type]
def get_service_type(self, hostname, service_type):
if not hostname:
hostname = self.hostname
key = f"{hostname}.{service_type}"
if key in self.result[service_type]:
return self.result[service_type][key]
def has_time_machine(self, hostname=None):
return self.has_service_type(hostname, TIME_MACHINE)
def has_device_info(self, hostname=None):
return self.has_service_type(hostname, DEVICE_INFO)
def has_http(self, hostname=None):
return self.has_service_type(hostname, HTTP)
def has_smb(self, hostname=None):
return self.has_service_type(hostname, SMB)
def time_machine(self, hostname=None):
return self.get_service_type(hostname, TIME_MACHINE)
def check_present(self, device_info=True, http=True, smb=True, time_machine=True, hostname=None):
assert self.has_device_info(hostname) == device_info, self.result[DEVICE_INFO]
assert self.has_http(hostname) == http, self.result[HTTP]
assert self.has_smb(hostname) == smb, self.result[SMB]
assert self.has_time_machine(hostname) == time_machine, self.result[TIME_MACHINE]
if USE_AVAHI_BROWSE:
class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, AvahiBrowserCollector):
def __init__(self, ip, tn_hostname):
abstractmDNSAnnounceCollector.__init__(self, ip, tn_hostname)
# avahi-browse can report either an IPv4 address or the
# corresponding IPv6 address if configured on the same interface
# So we will expand our inclusion check to encompass both.
ipv6 = AvahiBrowserCollector.get_ipv6(self.ip)
if ipv6:
self.ips = [self.ip, ipv6]
else:
self.ips = [self.ip]
else:
class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, ZeroconfCollector):
pass
@pytest.fixture(autouse=True, scope="module")
def setup_environment():
try:
with ensure_service_disabled('cifs'):
with ensure_service_stopped('cifs'):
yield
finally:
pass
@pytest.mark.timeout(600)
@pytest.mark.dependency(name="servann_001")
def test_001_initial_config(request):
"""Ensure that the service announcement configuration is as expected."""
global current_hostname
network_config = call('network.configuration.config')
sa = network_config['service_announcement']
if ha:
current_hostname = network_config['hostname_virtual']
else:
current_hostname = network_config['hostname']
# At the moment we only care about mdns
assert sa['mdns'] is True, sa
# Let's restart avahi (in case we've updated middleware)
call('service.restart', 'mdns')
ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
ac.find_items()
ac.check_present(smb=False, time_machine=False)
# This test is broken by the use of avahi-browse as when it is
# called it re-activates the avahi-daemon by means of the
# avahi-daemon.socket.
# The DEV and HTTP service files have NOT been deleted upon
# a service stop, so this reactivation causes the test to
# fail.
# Since the test passes when run with zeroconf library on
# a suitably connected test-runner, no real need to chase.
@pytest.mark.timeout(600)
@skip_avahi_browse_tests
def test_002_mdns_disabled(request):
depends(request, ["servann_001"], scope="session")
ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
ac.clear_cache()
ac.find_items({'mdns': False, 'wsd': True, 'netbios': False})
ac.check_present(False, False, False, False)
# Setting a VERY long timeout as when this test is run in isolation
# on jenkins there can be many (20+) hostname clashes which means
# avahi can take a LONG time to settle down/start up.
#
# We could avoid by setting a unique hostname (as is done during a
# full test run), but it also seems worthwhile exercise to be able
# to test in such a unsuitable environment.
@pytest.mark.timeout(900)
def test_003_mdns_smb_share(request):
"""Perform some mDNS tests wrt SMB and ADISK services."""
depends(request, ["servann_001"], scope="session")
# SMB is not started originally
ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
ac.find_items()
ac.check_present(smb=False, time_machine=False)
with dataset(dataset_name):
with smb_share(SMB_PATH1, {'name': SMB_NAME1, 'comment': 'Test SMB Share'}):
# SMB is still not started
ac.find_items()
ac.check_present(smb=False, time_machine=False)
with ensure_service_started('cifs'):
allow_settle()
ac.find_items()
ac.check_present(time_machine=False)
# OK, the SMB is stopped again, Ensure we don't advertise SMB anymore
ac.clear_cache()
ac.find_items()
ac.check_present(smb=False, time_machine=False)
# Now we're going to setup a time machine share
with ensure_aapl_extensions():
with ensure_service_started('cifs'):
allow_settle()
# Check mDNS before we have a time machine share
ac.find_items()
ac.check_present(time_machine=False)
with smb_share(SMB_PATH1, {'name': SMB_NAME1,
'comment': 'Basic TM SMB Share',
'purpose': 'TIMEMACHINE'}) as shareID1:
allow_settle()
# Check mDNS now we have a time machine share
ac.find_items()
ac.check_present()
# Now read the share details and then check against what mDNS reported
share1 = call('sharing.smb.query', [['id', '=', shareID1]])[0]
tm = ac.time_machine()
props = _get_tm_props(tm, b'dk0')
assert props['adVN'] == SMB_NAME1, props
assert props['adVF'] == '0x82', props
assert props['adVU'] == share1['vuid'], props
# Now make another time machine share
with dataset(dataset_name2):
with smb_share(SMB_PATH2, {'name': SMB_NAME2,
'comment': 'Multiuser TM SMB Share',
'purpose': 'ENHANCED_TIMEMACHINE'}) as shareID2:
share2 = call('sharing.smb.query', [['id', '=', shareID2]])[0]
allow_settle()
ac.find_items()
ac.check_present()
tm = ac.time_machine()
props0 = _get_tm_props(tm, b'dk0')
props1 = _get_tm_props(tm, b'dk1')
assert props0['adVF'] == '0x82', props0
assert props1['adVF'] == '0x82', props1
# Let's not make any assumption about which share is which
if props0['adVN'] == SMB_NAME1:
# SHARE 1 in props0
assert props0['adVU'] == share1['vuid'], props0
# SHARE 2 in props1
assert props1['adVN'] == SMB_NAME2, props1
assert props1['adVU'] == share2['vuid'], props1
else:
# SHARE 1 in props1
assert props1['adVN'] == SMB_NAME1, props1
assert props1['adVU'] == share1['vuid'], props1
# SHARE 2 in props0
assert props0['adVN'] == SMB_NAME2, props0
assert props0['adVU'] == share2['vuid'], props0
# Still have one TM share
allow_settle()
ac.find_items()
ac.check_present()
# Check mDNS now we no longer have a time machine share
ac.clear_cache()
ac.find_items()
ac.check_present(time_machine=False)
# Finally check when SMB is stopped again
ac.clear_cache()
ac.find_items()
ac.check_present(smb=False, time_machine=False)
if DO_MDNS_REBOOT_TEST:
def test_004_reboot_with_mdns_smb_share(request):
"""Create a time-machine SMB and check that it is published
following a reboot."""
depends(request, ["servann_001"], scope="session")
# First let's setup a time machine share
with dataset(dataset_name):
with smb_share(SMB_PATH1, {'name': SMB_NAME1,
'comment': 'Basic TM SMB Share',
'purpose': 'TIMEMACHINE'}):
with ensure_service_enabled('cifs'):
# Next reboot and then check the expected services
# are advertised.
reboot(truenas_server.ip, 'cifs')
ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
ac.find_items()
ac.check_present()
| 19,493 | Python | .py | 416 | 34.850962 | 117 | 0.582781 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,286 | test_job_result.py | truenas_middleware/tests/api2/test_job_result.py | from middlewared.test.integration.utils import call, mock
def test_job_result():
with mock("test.test1", """
from middlewared.service import job
from middlewared.schema import returns, Password
@job()
@returns(Password("my_password"))
def mock(self, job, *args):
return "canary"
"""):
job_id = call("test.test1")
result = call("core.job_wait", job_id, job=True)
# Waiting for result should give unredacted version
assert result == "canary"
# Querying by default should redact
job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
assert job["result"] != "canary"
# but we should also be able to get unredacted result if needed
job = call("core.get_jobs", [["id", "=", job_id]], {"get": True, "extra": {"raw_result": True}})
assert job["result"] == "canary"
| 916 | Python | .py | 20 | 37.45 | 104 | 0.593926 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,287 | test_011_user.py | truenas_middleware/tests/api2/test_011_user.py | import contextlib
import dataclasses
import os
import time
import stat
import pytest
from pytest_dependency import depends
from truenas_api_client import ClientException
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.account import user as user_asset
from middlewared.test.integration.assets.pool import dataset as dataset_asset
from middlewared.test.integration.utils import call, ssh
from functions import SSH_TEST, wait_on_job
from auto_config import pool_name, password, user
SHELL = '/usr/bin/bash'
VAR_EMPTY = '/var/empty'
ROOT_GROUP = 'root'
DEFAULT_HOMEDIR_OCTAL = 0o40700
SMB_CONFIGURED_SENTINEL = '/var/run/samba/.configured'
@dataclasses.dataclass
class HomeAssets:
HOME_FILES = {
'depends_name': '',
'files': {
'~/': oct(DEFAULT_HOMEDIR_OCTAL),
'~/.profile': '0o100644',
'~/.ssh': '0o40700',
'~/.ssh/authorized_keys': '0o100600',
}
}
Dataset01 = {
'depends_name': 'HOME_DS_CREATED',
'create_payload': {
'name': f'{pool_name}/test_homes',
'share_type': 'SMB',
'acltype': 'NFSV4',
'aclmode': 'RESTRICTED'
},
'home_acl': [
{
"tag": "owner@",
"id": None,
"type": "ALLOW",
"perms": {"BASIC": "FULL_CONTROL"},
"flags": {"BASIC": "INHERIT"}
},
{
"tag": "group@",
"id": None,
"type": "ALLOW",
"perms": {"BASIC": "FULL_CONTROL"},
"flags": {"BASIC": "INHERIT"}
},
{
"tag": "everyone@",
"id": None,
"type": "ALLOW",
"perms": {"BASIC": "TRAVERSE"},
"flags": {"BASIC": "NOINHERIT"}
},
],
'new_home': 'new_home',
}
@dataclasses.dataclass
class UserAssets:
TestUser01 = {
'depends_name': 'user_01',
'query_response': dict(),
'get_user_obj_response': dict(),
'create_payload': {
'username': 'testuser',
'full_name': 'Test User',
'group_create': True,
'password': 'test1234',
'uid': None,
'smb': False,
'shell': SHELL
}
}
TestUser02 = {
'depends_name': 'user_02',
'query_response': dict(),
'get_user_obj_response': dict(),
'create_payload': {
'username': 'testuser2',
'full_name': 'Test User2',
'group_create': True,
'password': 'test1234',
'uid': None,
'shell': SHELL,
'sshpubkey': 'canary',
'home': f'/mnt/{HomeAssets.Dataset01["create_payload"]["name"]}',
'home_mode': f'{stat.S_IMODE(DEFAULT_HOMEDIR_OCTAL):03o}',
'home_create': True,
},
'filename': 'testfile_01',
}
ShareUser01 = {
'depends_name': 'share_user_01',
'query_response': dict(),
'get_user_obj_reasponse': dict(),
'create_payload': {
'username': 'shareuser',
'full_name': 'Share User',
'group_create': True,
'groups': [],
'password': 'testing',
'uid': None,
'shell': SHELL
}
}
def check_config_file(file_name, expected_line):
results = SSH_TEST(f'cat {file_name}', user, password)
assert results['result'], results['output']
assert expected_line in results['stdout'].splitlines(), results['output']
@contextlib.contextmanager
def create_user_with_dataset(ds_info, user_info):
with dataset_asset(ds_info['name'], ds_info.get('options', []), **ds_info.get('kwargs', {})) as ds:
if 'path' in user_info:
user_info['payload']['home'] = os.path.join(f'/mnt/{ds}', user_info['path'])
user_id = None
try:
user_id = call('user.create', user_info['payload'])
yield call('user.query', [['id', '=', user_id]], {'get': True})
finally:
if user_id is not None:
call('user.delete', user_id, {"delete_group": True})
@pytest.mark.dependency(name=UserAssets.TestUser01['depends_name'])
def test_001_create_and_verify_testuser():
"""
Test for basic user creation. In this case 'smb' is disabled to bypass
passdb-related code. This is because the passdb add relies on users existing
in passwd database, and errors during error creation will get masked as
passdb errors.
"""
UserAssets.TestUser01['create_payload']['uid'] = call('user.get_next_uid')
call('user.create', UserAssets.TestUser01['create_payload'])
username = UserAssets.TestUser01['create_payload']['username']
qry = call(
'user.query',
[['username', '=', username]],
{'get': True, 'extra': {'additional_information': ['SMB']}}
)
UserAssets.TestUser01['query_response'].update(qry)
# verify basic info
for key in ('username', 'full_name', 'shell'):
assert qry[key] == UserAssets.TestUser01['create_payload'][key]
# verify various /etc files were updated
for f in (
{
'file': '/etc/shadow',
'value': f'{username}:{qry["unixhash"]}:18397:0:99999:7:::'
},
{
'file': '/etc/passwd',
'value': f'{username}:x:{qry["uid"]}:{qry["group"]["bsdgrp_gid"]}:{qry["full_name"]}:{qry["home"]}:{qry["shell"]}'
},
{
'file': '/etc/group',
'value': f'{qry["group"]["bsdgrp_group"]}:x:{qry["group"]["bsdgrp_gid"]}:'
}
):
check_config_file(f['file'], f['value'])
# verify password doesn't leak to middlewared.log
# we do this inside the create and verify function
# because this is severe enough problem that we should
# just "fail" at this step so it sets off a bunch of
# red flags in the CI
results = SSH_TEST(
f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
user, password
)
assert results['result'] is False, str(results['output'])
# non-smb users shouldn't show up in smb's passdb
assert qry['sid'] is None
def test_002_verify_user_exists_in_pwd(request):
"""
get_user_obj is a wrapper around the pwd module.
This check verifies that the user is _actually_ created.
"""
depends(request, [UserAssets.TestUser01['depends_name']])
pw = call(
'user.get_user_obj',
{'username': UserAssets.TestUser01['create_payload']['username'], 'sid_info': True}
)
UserAssets.TestUser01['get_user_obj_response'].update(pw)
# Verify pwd info
assert pw['pw_uid'] == UserAssets.TestUser01['query_response']['uid']
assert pw['pw_shell'] == UserAssets.TestUser01['query_response']['shell']
assert pw['pw_gecos'] == UserAssets.TestUser01['query_response']['full_name']
assert pw['pw_dir'] == VAR_EMPTY
# At this point, we're not an SMB user
assert pw['sid'] is None
assert pw['source'] == 'LOCAL'
assert pw['local'] is True
def test_003_get_next_uid_again(request):
"""user.get_next_uid should always return a unique uid"""
depends(request, [UserAssets.TestUser01['depends_name']])
assert call('user.get_next_uid') != UserAssets.TestUser01['create_payload']['uid']
def test_004_update_and_verify_user_groups(request):
"""Add the user to the root users group"""
depends(request, [UserAssets.TestUser01['depends_name']])
root_group_info = call(
'group.query', [['group', '=', ROOT_GROUP]], {'get': True}
)
call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'groups': [root_group_info['id']]}
)
grouplist = call(
'user.get_user_obj',
{'username': UserAssets.TestUser01['create_payload']['username'], 'get_groups': True}
)['grouplist']
assert root_group_info['gid'] in grouplist
@pytest.mark.dependency(name='SMB_CONVERT')
def test_005_convert_non_smbuser_to_smbuser(request):
depends(request, [UserAssets.TestUser01['depends_name']])
with pytest.raises(ValidationErrors):
"""
SMB auth for local users relies on a stored NT hash. We only generate this hash
for SMB users. This means that converting from non-SMB to SMB requires
re-submitting password so that we can generate the required hash. If
payload submitted without password, then validation error _must_ be raised.
"""
call('user.update', UserAssets.TestUser01['query_response']['id'], {'smb': True})
rv = call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'smb': True, 'password': UserAssets.TestUser01['create_payload']['password']}
)
assert rv
# TODO: why sleep here?
time.sleep(2)
# verify converted smb user doesn't leak password
results = SSH_TEST(
f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
user, password
)
assert results['result'] is False, str(results['output'])
def test_006_verify_converted_smbuser_passdb_entry_exists(request):
"""
At this point the non-SMB user has been converted to an SMB user. Verify
that a passdb entry was appropriately generated.
"""
depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
qry = call(
'user.query',
[['username', '=', UserAssets.TestUser01['create_payload']['username']]],
{'get': True, 'extra': {'additional_information': ['SMB']}}
)
assert qry
assert qry['sid']
def test_007_add_smbuser_to_sudoers(request):
depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
username = UserAssets.TestUser01['create_payload']['username']
# all sudo commands
call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': []}
)
check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL")
# all sudo commands no password
call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'sudo_commands': [], 'sudo_commands_nopasswd': ['ALL']}
)
check_config_file('/etc/sudoers', f"{username} ALL=(ALL) NOPASSWD: ALL")
# all sudo commands and all sudo commands no password
call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': ['ALL']}
)
check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL, NOPASSWD: ALL")
def test_008_disable_smb_and_password(request):
depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
username = UserAssets.TestUser01['create_payload']['username']
call(
'user.update',
UserAssets.TestUser01['query_response']['id'],
{'password_disabled': True, 'smb': False}
)
check_config_file('/etc/shadow', f'{username}:*:18397:0:99999:7:::')
@pytest.mark.parametrize('username', [UserAssets.TestUser01['create_payload']['username']])
def test_009_delete_user(username, request):
depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
# delete the user first
call(
'user.delete',
UserAssets.TestUser01['query_response']['id'],
{'delete_group': True}
)
assert not call(
'user.query',
[['username', '=', UserAssets.TestUser01['query_response']['username']]]
)
# FIXME: why is this being called here randomly in the middle of this test? And why are we using REST?
# def test_25_has_local_administrator_set_up(request):
# depends(request, ["user_02", "user_01"])
# assert GET('/user/has_local_administrator_set_up/', anonymous=True).json() is True
@pytest.mark.dependency(name=UserAssets.ShareUser01['depends_name'])
def test_020_create_and_verify_shareuser():
UserAssets.ShareUser01['create_payload']['uid'] = call('user.get_next_uid')
UserAssets.ShareUser01['create_payload']['groups'].append(
call('group.query', [['group', '=', ROOT_GROUP]], {'get': True})['id']
)
call('user.create', UserAssets.ShareUser01['create_payload'])
qry = call('user.query', [['username', '=', UserAssets.ShareUser01['create_payload']['username']]], {'get': True})
UserAssets.ShareUser01['query_response'].update(qry)
# verify basic info
for key in ('username', 'full_name', 'shell'):
assert qry[key] == UserAssets.ShareUser01['create_payload'][key]
# verify password doesn't leak to middlewared.log
# we do this inside the create and verify function
# because this is severe enough problem that we should
# just "fail" at this step so it sets off a bunch of
# red flags in the CI
results = SSH_TEST(
f'grep -R {UserAssets.ShareUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
user, password
)
assert results['result'] is False, str(results['output'])
@pytest.mark.dependency(name=UserAssets.TestUser02['depends_name'])
def test_031_create_user_with_homedir(request):
"""Create a zfs dataset to be used as a home directory for a
local user. The user's SMB share_type is selected for this test
so that we verify that ACL is being stripped properly from the
newly-created home directory."""
# create the dataset
call('pool.dataset.create', HomeAssets.Dataset01['create_payload'])
call('filesystem.setacl', {
'path': os.path.join('/mnt', HomeAssets.Dataset01['create_payload']['name']),
'dacl': HomeAssets.Dataset01['home_acl']
}, job=True)
# now create the user
UserAssets.TestUser02['create_payload']['uid'] = call('user.get_next_uid')
call('user.create', UserAssets.TestUser02['create_payload'])
qry = call(
'user.query',
[['username', '=', UserAssets.TestUser02['create_payload']['username']]],
{'get': True, 'extra': {'additional_information': ['SMB']}}
)
UserAssets.TestUser02['query_response'].update(qry)
# verify basic info
for key in ('username', 'full_name', 'shell'):
assert qry[key] == UserAssets.TestUser02['create_payload'][key]
# verify password doesn't leak to middlewared.log
# we do this here because this is severe enough
# problem that we should just "fail" at this step
# so it sets off a bunch of red flags in the CI
results = SSH_TEST(
f'grep -R {UserAssets.TestUser02["create_payload"]["password"]!r} /var/log/middlewared.log',
user, password
)
assert results['result'] is False, str(results['output'])
pw = call(
'user.get_user_obj',
{'username': UserAssets.TestUser02['create_payload']['username'], 'sid_info': True}
)
UserAssets.TestUser02['get_user_obj_response'].update(pw)
# verify pwd
assert pw['pw_dir'] == os.path.join(
UserAssets.TestUser02['create_payload']['home'], UserAssets.TestUser02['create_payload']['username']
)
assert pw['pw_name'] == UserAssets.TestUser02['query_response']['username']
assert pw['pw_uid'] == UserAssets.TestUser02['query_response']['uid']
assert pw['pw_shell'] == UserAssets.TestUser02['query_response']['shell']
assert pw['pw_gecos'] == UserAssets.TestUser02['query_response']['full_name']
assert pw['sid'] is not None
assert pw['source'] == 'LOCAL'
assert pw['local'] is True
# verify smb user passdb entry
assert qry['sid']
# verify homedir acl is stripped
st_info = call('filesystem.stat', UserAssets.TestUser02['query_response']['home'])
assert st_info['acl'] is False
def test_035_check_file_perms_in_homedir(request):
depends(request, [UserAssets.TestUser02['depends_name']])
home_path = UserAssets.TestUser02['query_response']['home']
for file, mode in HomeAssets.HOME_FILES['files'].items():
st_info = call('filesystem.stat', os.path.join(home_path, file.removeprefix('~/')))
assert oct(st_info['mode']) == mode, f"{file}: {st_info}"
assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
def test_036_create_testfile_in_homedir(request):
depends(request, [UserAssets.TestUser02['depends_name']])
filename = UserAssets.TestUser02['filename']
filepath = f'{UserAssets.TestUser02["query_response"]["home"]}/{filename}'
results = SSH_TEST(
f'touch {filepath}; chown {UserAssets.TestUser01["query_response"]["uid"]} {filepath}',
user, password
)
assert results['result'] is True, results['output']
assert call('filesystem.stat', filepath)
@pytest.mark.dependency(name="HOMEDIR2_EXISTS")
def test_037_move_homedir_to_new_directory(request):
depends(request, [UserAssets.TestUser02['depends_name']])
# Validation of autocreation of homedir during path update
with dataset_asset('temp_dataset_for_home') as ds:
new_home = os.path.join('/mnt', ds)
call(
'user.update',
UserAssets.TestUser02['query_response']['id'],
{'home': new_home, 'home_create': True}
)
filters = [['method', '=', 'user.do_home_copy']]
opts = {'get': True, 'order_by': ['-id']}
move_job_timeout = 300 # 5 mins
move_job1 = call('core.get_jobs', filters, opts)
assert move_job1
rv = wait_on_job(move_job1['id'], move_job_timeout)
assert rv['state'] == 'SUCCESS', f'JOB: {move_job1!r}, RESULT: {str(rv["results"])}'
st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username']))
assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
# now kick the can down the road to the root of our pool
new_home = os.path.join('/mnt', pool_name)
call(
'user.update',
UserAssets.TestUser02['query_response']['id'],
{'home': new_home, 'home_create': True}
)
move_job2 = call('core.get_jobs', filters, opts)
assert move_job2
assert move_job1['id'] != move_job2['id']
rv = wait_on_job(move_job2['id'], move_job_timeout)
assert rv['state'] == 'SUCCESS', f'JOB: {move_job2!r}, RESULT: {str(rv["results"])}'
st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username']))
assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
def test_038_change_homedir_to_existing_path(request):
depends(request, [UserAssets.ShareUser01['depends_name'], UserAssets.TestUser01['depends_name']])
# Manually create a new home dir
new_home = os.path.join(
'/mnt',
HomeAssets.Dataset01['create_payload']['name'],
HomeAssets.Dataset01['new_home']
)
results = SSH_TEST(f'mkdir {new_home}', user, password)
assert results['result'] is True, results['output']
# Move the homedir to existing dir
call(
'user.update',
UserAssets.TestUser02['query_response']['id'],
{'home': new_home}
)
filters = [['method', '=', 'user.do_home_copy']]
opts = {'get': True, 'order_by': ['-id']}
move_job_timeout = 300 # 5 mins
home_move_job = call('core.get_jobs', filters, opts)
rv = wait_on_job(home_move_job['id'], move_job_timeout)
assert rv['state'] == 'SUCCESS', str(rv['results'])
# verify files in the homedir that were moved are what we expect
for file, mode in HomeAssets.HOME_FILES['files'].items():
st_info = call('filesystem.stat', os.path.join(new_home, file.removeprefix("~/")))
assert oct(st_info['mode']) == mode, f"{file}: {st_info}"
assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
# verify the specific file that existed in the previous homedir location was moved over
# NOTE: this file was created in test_036
assert call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['filename']))
def test_041_lock_smb_user(request):
depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'locked': True})
username = UserAssets.TestUser02['create_payload']['username']
check_config_file('/etc/shadow', f'{username}:!:18397:0:99999:7:::')
username = UserAssets.TestUser02['create_payload']['username']
my_entry = call('smb.passdb_list', [['username', '=', username]], {'get': True})
assert my_entry['acct_ctrl'] & 0x00000400, str(my_entry) # 0x00000400 is AUTO_LOCKED in MS-SAMR
def test_042_disable_smb_user(request):
depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'smb': False})
qry = call(
'user.query',
[['username', '=', UserAssets.TestUser02['create_payload']['username']]],
{'get': True, 'extra': {'additional_information': ['SMB']}}
)
assert qry
assert qry['sid'] is None
def test_043_raise_validation_error_on_homedir_collision(request):
"""
Verify that validation error is raised if homedir collides with existing one.
"""
depends(request, ['HOMEDIR2_EXISTS', UserAssets.TestUser02['depends_name']], scope='session')
# NOTE: this was used in test_038
existing_home = os.path.join(
'/mnt',
HomeAssets.Dataset01['create_payload']['name'],
HomeAssets.Dataset01['new_home']
)
with pytest.raises(ValidationErrors):
call(
'user.update',
UserAssets.ShareUser01['query_response']['id'],
{'home': existing_home}
)
@pytest.mark.parametrize('username', [UserAssets.TestUser02['create_payload']['username']])
def test_046_delete_homedir_user(username, request):
depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
# delete user first
assert call(
'user.delete',
UserAssets.TestUser02['query_response']['id']
)
# now clean-up dataset that was used as homedir
assert call(
'pool.dataset.delete',
UserAssets.TestUser02['create_payload']['home'].removeprefix('/mnt/')
)
def test_050_verify_no_builtin_smb_users(request):
"""
We have builtin SMB groups, but should have no builtin
users. Failure here may indicate an issue with builtin user
synchronization code in middleware. Failure to catch this
may lead to accidentally granting SMB access to builtin
accounts.
"""
qry = call('user.query', [['builtin', '=', True], ['smb', '=', True]], {'count': True})
assert qry == 0
def test_058_create_new_user_knownfails(request):
"""
Specifying an existing path without home_create should
succeed and set mode to desired value.
"""
ds = {'pool': pool_name, 'name': 'user_test_exising_home_path'}
user_info = {
'username': 't1',
"full_name": 'T1',
'group_create': True,
'password': 'test1234',
'home_mode': '770'
}
with create_user_with_dataset(ds, {'payload': user_info, 'path': ''}) as user:
results = call('filesystem.stat', user['home'])
assert results['acl'] is False
assert f'{stat.S_IMODE(results["mode"]):03o}' == '770'
# Attempting to repeat the same with new user should
# fail (no users may share same home path)
user2 = {
'username': 't2',
'full_name': 't2',
'group_create': True,
'password': 'test1234',
'home': user['home']
}
with pytest.raises(ValidationErrors):
# Attempting to repeat the same with new user should
# fail (no users may share same home path)
call('user.create', user2)
with pytest.raises(ValidationErrors):
# Attempting to put homedir in subdirectory of existing homedir
# should also rase validation error
user2.update({'home_create': True})
call('user.create', user2)
with pytest.raises(ValidationErrors):
# Attempting to create a user with non-existing path
user2.update({'home': os.path.join(user2['home'], 'canary')})
call('user.create', user2)
def test_059_create_user_ro_dataset(request):
with dataset_asset('ro_user_ds', {'readonly': 'ON'}) as ds:
with pytest.raises(ValidationErrors):
call('user.create', {
'username': 't1',
'full_name': 'T1',
'group_create': True,
'password': 'test1234',
'home_mode': '770',
'home_create': True,
'home': f'/mnt/{ds}'
})
def test_060_immutable_user_validation(request):
# the `news` user is immutable
immutable_id = call('user.query', [['username', '=', 'news']], {'get': True})['id']
to_validate = [
{'group': 1},
{'home': '/mnt/tank', 'home_create': True},
{'smb': True},
{'username': 'no_way_bad'},
]
for i in to_validate:
with pytest.raises(ValidationErrors) as ve:
call('user.update', immutable_id, i)
assert ve.value.errors[0].errmsg == 'This attribute cannot be changed'
@contextlib.contextmanager
def toggle_smb_configured():
ssh(f'rm {SMB_CONFIGURED_SENTINEL}')
assert call('smb.is_configured') is False
try:
yield
finally:
call('smb.set_configured')
def test_099_cleanup_share_user():
# we have a test that asserts there are no smb accounts created
# by the time it runs so clean up this account
call('user.delete', UserAssets.ShareUser01['query_response']['id'])
| 25,965 | Python | .py | 594 | 36.33165 | 126 | 0.62269 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,288 | test_crud.py | truenas_middleware/tests/api2/test_crud.py | import contextlib
import pytest
from middlewared.test.integration.assets.privilege import privilege
from middlewared.test.integration.utils import client
@pytest.mark.parametrize('offset,limit', [
(0, 4),
(1, 4),
(2, 4),
(3, 4),
(2, 5),
(3, 5),
])
def test_query_filters(offset, limit):
with contextlib.ExitStack() as stack:
for i in range(5):
stack.enter_context(
privilege({
'name': f'Test Privilege {i}',
'web_shell': False
})
)
with client() as c:
query_results = c.call('privilege.query', [], {'select': ['id']})
expected_result = query_results[offset:offset + limit]
actual_result = c.call('privilege.query', [], {'offset': offset, 'limit': limit, 'select': ['id']})
assert actual_result == expected_result
| 904 | Python | .py | 26 | 26.423077 | 111 | 0.568 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,289 | test_audit_permission.py | truenas_middleware/tests/api2/test_audit_permission.py | import os
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_method_calls
JENNY = 8675309
def test_audit_chown():
with dataset('audit_chown') as ds:
path = os.path.join('/mnt', ds)
payload = {'path': path, 'uid': JENNY}
with expect_audit_method_calls([{
'method': 'filesystem.chown',
'params': [payload],
'description': f'Filesystem change owner {path}'
}]):
call('filesystem.chown', payload, job=True)
def test_audit_setperm():
with dataset('audit_setperm') as ds:
path = os.path.join('/mnt', ds)
payload = {'path': path, 'mode': '777'}
with expect_audit_method_calls([{
'method': 'filesystem.setperm',
'params': [payload],
'description': f'Filesystem set permission {path}'
}]):
call('filesystem.setperm', payload, job=True)
def test_audit_setacl():
with dataset('audit_setacl', {'share_type': 'SMB'}) as ds:
path = os.path.join('/mnt', ds)
the_acl = call('filesystem.getacl', os.path.join('/mnt', ds))['acl']
the_acl.append({
'tag': 'USER',
'id': JENNY,
'perms': {'BASIC': 'FULL_CONTROL'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
})
payload = {'path': path, 'dacl': the_acl}
with expect_audit_method_calls([{
'method': 'filesystem.setacl',
'params': [payload],
'description': f'Filesystem set ACL {path}'
}]):
call('filesystem.setacl', payload, job=True)
| 1,741 | Python | .py | 43 | 31.395349 | 78 | 0.573547 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,290 | test_auth_token.py | truenas_middleware/tests/api2/test_auth_token.py | import io
import json
import pytest
import requests
from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template
from middlewared.test.integration.utils import call, client, ssh
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.shell import assert_shell_works
@pytest.fixture(scope="module")
def download_token():
return call("auth.generate_token", 300, {"filename": "debug.txz", "job": 1020}, True)
def test_download_auth_token_cannot_be_used_for_upload(download_token):
r = requests.post(
f"http://{truenas_server.ip}/_upload",
headers={"Authorization": f"Token {download_token}"},
data={
"data": json.dumps({
"method": "filesystem.put",
"params": ["/tmp/upload"],
})
},
files={
"file": io.BytesIO(b"test"),
},
timeout=10
)
assert r.status_code == 403
def test_download_auth_token_cannot_be_used_for_websocket_auth(download_token):
with client(auth=None) as c:
assert not c.call("auth.login_with_token", download_token)
@pytest.mark.timeout(30)
def test_token_created_by_token_can_use_shell():
with client() as c:
token = c.call("auth.generate_token", 300, {}, True)
with client(auth=None) as c2:
assert c2.call("auth.login_with_token", token)
token2 = c2.call("auth.generate_token", 300, {}, True)
assert_shell_works(token2, "root")
@pytest.fixture(scope="module")
def unprivileged_user():
with unprivileged_user_template(
username="test",
group_name="test",
privilege_name="test",
allowlist=[{"method": "CALL", "resource": "system.info"}],
web_shell=True,
):
yield
def test_login_with_token_match_origin(unprivileged_user):
token = ssh(
"sudo -u test midclt -u ws://localhost/api/current -U test -P test1234 call auth.generate_token 300 '{}' true"
).strip()
with client(auth=None) as c:
assert not c.call("auth.login_with_token", token)
def test_login_with_token_no_match_origin(unprivileged_user):
token = ssh(
"sudo -u test midclt -u ws://localhost/api/current -U test -P test1234 call auth.generate_token 300"
).strip()
with client(auth=None) as c:
assert c.call("auth.login_with_token", token)
def test_token_is_for_one_time_use():
token = call("auth.generate_token", 300)
with client(auth=None) as c:
assert c.call("auth.login_with_token", token)
with client(auth=None) as c:
assert not c.call("auth.login_with_token", token)
def test_kill_all_tokens_on_session_termination():
token = call("auth.generate_token", 300)
with client(auth=None) as c:
assert c.call("auth.login_with_token", token)
token = c.call("auth.generate_token")
session = c.call("auth.sessions", [["current", "=", True]], {"get": True})
call("auth.terminate_session", session["id"])
with client(auth=None) as c:
assert not c.call("auth.login_with_token", token)
| 3,180 | Python | .py | 75 | 35.56 | 118 | 0.653984 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,291 | test_groupmap_migrate_share.py | truenas_middleware/tests/api2/test_groupmap_migrate_share.py | import os
import pytest
import json
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, ssh
SMB_NAME = 'groupmap_migrate'
RO_ADMINS = 'truenas_readonly_administrators'
@pytest.fixture(scope='module')
def do_setup():
with dataset('groupmap-migrate', data={'share_type': 'SMB'}) as ds:
with smb_share(os.path.join('/mnt', ds), SMB_NAME) as s:
ro = call('group.query', [['group', '=', RO_ADMINS]], {'get': True})
acl = call('sharing.smb.setacl', {
'share_name': SMB_NAME,
'share_acl': [{
'ae_who_id': {'id_type': 'GROUP', 'id': ro['gid']},
'ae_perm': 'READ',
'ae_type': 'ALLOWED'
}]
})
yield {'dataset': ds, 'share': s, 'acl': acl, 'group': ro}
def test_groupmap_migrate(do_setup):
assert do_setup['acl']['share_name'] == SMB_NAME
assert do_setup['acl']['share_acl'][0]['ae_perm'] == 'READ'
assert do_setup['acl']['share_acl'][0]['ae_who_sid'] == do_setup['group']['sid']
# first delete existing groupmap
ssh(f'net groupmap delete ntgroup={RO_ADMINS}')
# Adding it back will force auto-allocation from low RID range
ssh(f'net groupmap add ntgroup={RO_ADMINS} unixgroup={RO_ADMINS}')
groupmap = json.loads(ssh('net groupmap list --json'))
sid = None
for entry in groupmap['groupmap']:
if entry['gid'] != do_setup['group']['gid']:
continue
sid = entry['sid']
# Make sure we have an actually different sid in the groupmap
assert sid != do_setup['group']['sid']
# first update ACL to have mapping to new sid
call('smb.sharesec.setacl', {'share_name': SMB_NAME, 'share_acl': [{
'ae_who_sid': sid,
'ae_perm': 'READ',
'ae_type': 'ALLOWED'
}]})
# make sure it's actually set
new_acl = call('smb.sharesec.getacl', SMB_NAME)
assert new_acl['share_acl'][0]['ae_who_sid'] == sid
# We catch inconsistency when dumping groupmap and auto-migrate at that time
call('smb.groupmap_list')
new_acl = call('smb.sharesec.getacl', SMB_NAME)
assert new_acl['share_acl'][0]['ae_who_sid'] == do_setup['group']['sid']
| 2,335 | Python | .py | 51 | 38.45098 | 84 | 0.609079 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,292 | test_pool_is_upgraded_alert_removal.py | truenas_middleware/tests/api2/test_pool_is_upgraded_alert_removal.py | import contextlib
import time
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
def assert_has_outdated_pool_alert(pool_name, has):
for i in range(60):
alerts = call("alert.list")
if any((i["klass"] == "PoolUpgraded" and i["args"] == pool_name for i in alerts)) == has:
break
time.sleep(1)
else:
assert False, alerts
@contextlib.contextmanager
def outdated_pool():
with another_pool() as pool:
device = pool["topology"]["data"][0]["path"]
ssh(f"zpool export {pool['name']}")
ssh(f"zpool create test -o altroot=/mnt -o feature@sha512=disabled -f {device}")
assert_has_outdated_pool_alert(pool["name"], True)
yield pool
def test_outdated_pool_alert_removed_on_pool_upgrade():
with outdated_pool() as pool:
call("pool.upgrade", pool["id"])
assert_has_outdated_pool_alert(pool["name"], False)
def test_outdated_pool_alert_removed_on_pool_delete():
with outdated_pool() as pool:
pass
assert_has_outdated_pool_alert(pool["name"], False)
| 1,148 | Python | .py | 28 | 34.857143 | 97 | 0.670578 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,293 | test_cloud_sync_storj.py | truenas_middleware/tests/api2/test_cloud_sync_storj.py | import pytest
from config import (
STORJ_IX_AWS_ACCESS_KEY_ID,
STORJ_IX_AWS_SECRET_ACCESS_KEY,
STORJ_IX_BUCKET,
)
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.assets.cloud_sync import credential, task, run_task
from middlewared.test.integration.assets.pool import dataset
CREDENTIAL = {
"provider": "STORJ_IX",
"attributes": {
"access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID,
"secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY,
}
}
TASK_ATTRIBUTES = {
"bucket": STORJ_IX_BUCKET,
"folder": "",
}
FILENAME = "a"
def test_storj_verify():
result = call("cloudsync.credentials.verify", {
"provider": "STORJ_IX",
"attributes": {
"access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID,
"secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY,
}
})
assert result["valid"], result
@pytest.fixture(scope="module")
def storj_credential():
with credential(CREDENTIAL) as c:
yield c
def test_storj_list_buckets(storj_credential):
assert any(item["Name"] == STORJ_IX_BUCKET for item in call("cloudsync.list_buckets", storj_credential["id"]))
@pytest.fixture(scope="module")
def storj_sync(storj_credential):
"""Reset the remote bucket to only contain a single empty file."""
with dataset("test_storj_sync") as ds:
ssh(f"touch /mnt/{ds}/{FILENAME}")
with task({
"direction": "PUSH",
"transfer_mode": "SYNC",
"path": f"/mnt/{ds}",
"credentials": storj_credential["id"],
"attributes": TASK_ATTRIBUTES,
}) as t:
run_task(t)
def test_storj_list_directory(storj_credential, storj_sync):
result = call("cloudsync.list_directory", {
"credentials": storj_credential["id"],
"attributes": TASK_ATTRIBUTES,
})
assert len(result) == 1
assert result[0]["Name"] == FILENAME
def test_storj_pull(storj_credential, storj_sync):
with dataset("test_storj_sync") as ds:
with task({
"direction": "PULL",
"transfer_mode": "COPY",
"path": f"/mnt/{ds}",
"credentials": storj_credential["id"],
"attributes": TASK_ATTRIBUTES,
}) as t:
run_task(t)
assert ssh(f"ls /mnt/{ds}") == FILENAME + "\n"
| 2,362 | Python | .py | 67 | 28.567164 | 114 | 0.623354 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,294 | test_account.py | truenas_middleware/tests/api2/test_account.py | from middlewared.test.integration.assets.account import user, group
from middlewared.test.integration.utils import call, client
from middlewared.test.integration.utils.audit import expect_audit_method_calls
def test_create_account_audit():
user_id = None
try:
with expect_audit_method_calls([{
"method": "user.create",
"params": [
{
"username": "sergey",
"full_name": "Sergey",
"group_create": True,
"home": "/nonexistent",
"password": "********",
}
],
"description": "Create user sergey",
}]):
payload = {
"username": "sergey",
"full_name": "Sergey",
"group_create": True,
"home": "/nonexistent",
"password": "password",
}
user_id = call("user.create", payload)
finally:
if user_id is not None:
call("user.delete", user_id)
def test_update_account_audit():
with user({
"username": "user2",
"full_name": "user2",
"group_create": True,
"password": "test1234",
}) as u:
with expect_audit_method_calls([{
"method": "user.update",
"params": [u["id"], {}],
"description": "Update user user2",
}]):
call("user.update", u["id"], {})
def test_delete_account_audit():
with user({
"username": "user2",
"full_name": "user2",
"group_create": True,
"password": "test1234",
}) as u:
with expect_audit_method_calls([{
"method": "user.delete",
"params": [u["id"], {}],
"description": "Delete user user2",
}]):
call("user.delete", u["id"], {})
def test_create_group_audit():
group_id = None
try:
with expect_audit_method_calls([{
"method": "group.create",
"params": [
{
"name": "group2",
}
],
"description": "Create group group2",
}]):
payload = {
"name": "group2",
}
group_id = call("group.create", payload)
finally:
if group_id is not None:
call("group.delete", group_id)
def test_update_group_audit():
with group({
"name": "group2",
}) as g:
with expect_audit_method_calls([{
"method": "group.update",
"params": [g["id"], {}],
"description": "Update group group2",
}]):
call("group.update", g["id"], {})
def test_delete_group_audit():
with group({
"name": "group2",
}) as g:
with expect_audit_method_calls([{
"method": "group.delete",
"params": [g["id"]],
"description": "Delete group group2",
}]):
call("group.delete", g["id"])
def test_delete_group_audit_delete_users():
with group({
"name": "group2",
}) as g:
with expect_audit_method_calls([{
"method": "group.delete",
"params": [g["id"], {"delete_users": True}],
"description": "Delete group group2 and all users that have this group as their primary group",
}]):
call("group.delete", g["id"], {"delete_users": True})
def test_update_account_using_token():
token = call("auth.generate_token", 300)
with client(auth=None) as c:
assert c.call("auth.login_with_token", token)
c.call("user.update", 1, {})
| 3,677 | Python | .py | 110 | 23.027273 | 107 | 0.49197 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,295 | test_vmware.py | truenas_middleware/tests/api2/test_vmware.py | import contextlib
import ssl
import time
import types
import pytest
from pyVim import connect, task as VimTask
from pyVmomi import vim
from middlewared.test.integration.assets.nfs import nfs_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.snapshot_task import snapshot_task
from middlewared.test.integration.assets.vmware import vmware
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.string import random_string
import os
import sys
apifolder = os.getcwd()
sys.path.append(apifolder)
try:
from config import (
VCENTER_HOSTNAME,
VCENTER_USERNAME,
VCENTER_PASSWORD,
VCENTER_DATACENTER,
VCENTER_ESX_HOST,
)
except ImportError:
pytestmark = pytest.mark.skip(reason='vCenter credential are missing in config.py')
@contextlib.contextmanager
def vcenter_connection():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
si = connect.SmartConnect(
host=VCENTER_HOSTNAME,
user=VCENTER_USERNAME,
pwd=VCENTER_PASSWORD,
sslContext=ssl_context,
)
try:
yield si
finally:
connect.Disconnect(si)
@contextlib.contextmanager
def datastore(si):
content = si.RetrieveContent()
for datacenter in content.viewManager.CreateContainerView(
content.rootFolder,
[vim.Datacenter],
True,
).view:
if datacenter.name == VCENTER_DATACENTER:
break
else:
raise RuntimeError(f"Datacenter {VCENTER_DATACENTER} not found")
for host in content.viewManager.CreateContainerView(
content.rootFolder,
[vim.HostSystem],
True,
).view:
if host.name == VCENTER_ESX_HOST:
break
else:
raise RuntimeError(f"ESX host {VCENTER_ESX_HOST} not found")
with dataset(f"vm_{random_string()}") as ds:
with nfs_share(ds) as share:
ssh(f"chmod 777 /mnt/{ds}")
datastore_name = random_string()
datastore = host.configManager.datastoreSystem.CreateNasDatastore(
vim.host.NasVolume.Specification(
remoteHost=truenas_server.ip,
remotePath=share["path"],
localPath=datastore_name,
accessMode=vim.host.MountInfo.AccessMode.readWrite,
type=vim.host.FileSystemVolume.FileSystemType.NFS
)
)
try:
yield types.SimpleNamespace(
datacenter=datacenter,
host=host,
name=datastore_name,
dataset=ds,
)
finally:
VimTask.WaitForTask(datastore.Destroy_Task())
@contextlib.contextmanager
def vm(si, datastore):
content = si.RetrieveContent()
vm_name = random_string()
config = vim.vm.ConfigSpec()
config.memoryMB = 2048
config.guestId = "ubuntu64Guest"
config.name = vm_name
config.numCPUs = 1
config.files = vim.vm.FileInfo()
config.files.vmPathName = f"[{datastore.name}]"
VimTask.WaitForTask(datastore.datacenter.vmFolder.CreateVm(
config,
pool=datastore.host.parent.resourcePool,
host=datastore.host,
))
for vm in content.viewManager.CreateContainerView(
content.rootFolder,
[vim.VirtualMachine],
True,
).view:
if vm.name == vm_name:
break
else:
raise RuntimeError("Created VM not found")
try:
VimTask.WaitForTask(vm.PowerOn())
try:
yield vm_name
finally:
VimTask.WaitForTask(vm.PowerOff())
finally:
VimTask.WaitForTask(vm.Destroy_Task())
def test_vmware():
with vcenter_connection() as si:
with datastore(si) as ds:
with vm(si, ds):
result = call(
"vmware.match_datastores_with_datasets",
{
"hostname": VCENTER_HOSTNAME,
"username": VCENTER_USERNAME,
"password": VCENTER_PASSWORD,
},
)
for rds in result["datastores"]:
if (
rds["name"] == ds.name and
rds["description"] == f"NFS mount '/mnt/{ds.dataset}' on {truenas_server.ip}" and
rds["filesystems"] == [ds.dataset]
):
break
else:
assert False, result
with vmware({
"datastore": ds.name,
"filesystem": ds.dataset,
"hostname": VCENTER_HOSTNAME,
"username": VCENTER_USERNAME,
"password": VCENTER_PASSWORD,
}):
with snapshot_task({
"dataset": ds.dataset,
"recursive": False,
"lifetime_value": 1,
"lifetime_unit": "DAY",
"naming_schema": "%Y%m%d%H%M",
}) as task:
call("pool.snapshottask.run", task["id"])
for i in range(60):
time.sleep(1)
snapshots = call("zfs.snapshot.query", [["dataset", "=", ds.dataset]])
if snapshots:
break
else:
assert False
assert snapshots[0]["properties"]["freenas:vmsynced"]["value"] == "Y"
| 5,841 | Python | .py | 162 | 24.364198 | 105 | 0.562909 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,296 | test_bootenv.py | truenas_middleware/tests/api2/test_bootenv.py | import errno
import pytest
from middlewared.service_exception import ValidationErrors, ValidationError
from middlewared.test.integration.utils import call, ssh
def test_get_default_environment_and_make_new_one():
active_be_id = call('bootenv.query', [['activated', '=', True]], {'get': True})['id']
# create duplicate name to test failure
with pytest.raises(ValidationErrors) as ve:
call('bootenv.create', {'name': active_be_id, 'source': active_be_id})
assert ve.value.errors == [
ValidationError('bootenv_create.name', f'The name "{active_be_id}" already exists', errno.EEXIST)
]
# create new bootenv
call('bootenv.create', {'name': 'bootenv01', 'source': active_be_id})
call('bootenv.query', [['name', '=', 'bootenv01']], {'get': True})
# activate it, check that it's pending activation, then activate old bootenv
call('bootenv.activate', 'bootenv01')
assert call('bootenv.query', [['name', '=', 'bootenv01']], {'get': True})['active'] == 'R'
call('bootenv.activate', active_be_id)
assert call('bootenv.query', [['name', '=', 'bootenv01']], {'get': True})['active'] == ''
def test_change_boot_environment_name_and_attributes_then_delete():
call('bootenv.update', 'bootenv01', {'name': 'bootenv03'})
call('bootenv.set_attribute', 'bootenv03', {'keep': True})
assert call('bootenv.query', [['name', '=', 'bootenv03']], {'get': True})['keep'] is True
call('bootenv.set_attribute', 'bootenv03', {'keep': False})
call('bootenv.delete', 'bootenv03')
def test_promote_current_be_datasets():
var_log = ssh('df | grep /var/log').split()[0]
snapshot_name = 'snap-1'
snapshot = f'{var_log}@{snapshot_name}'
ssh(f'zfs snapshot {snapshot}')
try:
clone = 'boot-pool/ROOT/clone'
ssh(f'zfs clone {snapshot} {clone}')
try:
ssh(f'zfs promote {clone}')
assert ssh(f'zfs get -H -o value origin {var_log}').strip() == f'{clone}@{snapshot_name}'
call('bootenv.promote_current_be_datasets')
assert ssh(f'zfs get -H -o value origin {var_log}').strip() == '-'
finally:
ssh(f'zfs destroy {clone}')
finally:
ssh(f'zfs destroy {snapshot}')
| 2,241 | Python | .py | 43 | 45.860465 | 105 | 0.635073 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,297 | test_pool_dataset_snapshot_count.py | truenas_middleware/tests/api2/test_pool_dataset_snapshot_count.py | import textwrap
import pytest
from middlewared.test.integration.utils import call, mock
from middlewared.test.integration.assets.pool import dataset
import os
import sys
sys.path.append(os.getcwd())
def test_empty_for_locked_root_dataset():
with dataset("test_pool_dataset_snapshot_count") as ds:
for i in range(7):
call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
with mock("zfs.snapshot.query", textwrap.dedent("""\
def mock(self, *args):
raise Exception("Should not be called")
""")):
assert call("pool.dataset.snapshot_count", ds) == 7
| 643 | Python | .py | 16 | 33.8125 | 77 | 0.666667 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,298 | test_account_duplicate_uid_gid.py | truenas_middleware/tests/api2/test_account_duplicate_uid_gid.py | import errno
import pytest
from middlewared.service_exception import ValidationErrors, ValidationError
from middlewared.test.integration.assets.account import user, group
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
@pytest.fixture(scope="module")
def uid_1234():
with dataset(f"user1_homedir") as user1_homedir:
with user({
"username": "user1",
"full_name": "user1",
"group_create": True,
"groups": [],
"home": f"/mnt/{user1_homedir}",
"password": "test1234",
"uid": 1234,
}) as uid_1234:
yield uid_1234
@pytest.fixture(scope="module")
def gid_1234():
with group({
"name": "group1",
"gid": 1234,
}) as gid_1234:
yield gid_1234
def test_create_duplicate_uid(uid_1234):
with dataset(f"user2_homedir") as user2_homedir:
with pytest.raises(ValidationErrors) as ve:
with user({
"username": "user2",
"full_name": "user2",
"group_create": True,
"groups": [],
"home": f"/mnt/{user2_homedir}",
"password": "test1234",
"uid": 1234,
}):
pass
assert ve.value.errors == [
ValidationError('user_create.uid', 'Uid 1234 is already used (user user1 has it)', errno.EEXIST),
]
def test_create_duplicate_gid(gid_1234):
with pytest.raises(ValidationErrors) as ve:
with group({
"name": "group2",
"gid": 1234,
}):
pass
assert ve.value.errors == [
ValidationError('group_create.gid', 'Gid 1234 is already used (group group1 has it)', errno.EEXIST),
]
| 1,821 | Python | .py | 52 | 25.865385 | 109 | 0.575982 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,299 | test_pool_expand.py | truenas_middleware/tests/api2/test_pool_expand.py | import time
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
def retry_get_parts_on_disk(disk, max_tries=10):
for i in range(max_tries):
if parts := call('disk.list_partitions', disk):
return parts
time.sleep(1)
else:
assert False, f'Failed after {max_tries} seconds for partition info on {disk!r}'
def test_expand_pool():
with another_pool() as pool:
disk = pool["topology"]["data"][0]["disk"]
original_partition_size = call("disk.list_partitions", disk)[-1]["size"]
# Ensure that the test pool vdev is way larger than 2 GiB
assert original_partition_size > 2147483648 * 2
# Transform this pool into a pool on a vdev with a partition that is only 2 GiB
ssh(f"zpool export {pool['name']}")
ssh(f"sgdisk -d 1 /dev/{disk}")
ssh(f"sgdisk -n 1:0:+2GiB -t 1:BF01 /dev/{disk}")
small_partition = retry_get_parts_on_disk(disk)[-1]
assert small_partition["size"] < 2147483648 * 1.01
device = "disk/by-partuuid/" + small_partition["partition_uuid"]
ssh(f"zpool create {pool['name']} -o altroot=/mnt -f {device}")
# Ensure that the pool size is small now
assert call("pool.get_instance", pool["id"])["size"] < 2147483648 * 1.01
ssh(f"touch /mnt/{pool['name']}/test")
call("pool.expand", pool["id"], job=True)
new_partition = call("disk.list_partitions", disk)[-1]
# Ensure that the partition size is way larger than 2 GiB
assert new_partition["size"] > 2147483648 * 2
# Ensure that the pool size was increased
assert call("pool.get_instance", pool["id"])["size"] > 2147483648 * 2
# Ensure that data was not destroyed
assert ssh(f"ls /mnt/{pool['name']}") == "test\n"
def test_expand_partition_keeps_initial_offset():
disk = call("disk.get_unused")[0]["name"]
call("disk.wipe", disk, "QUICK", job=True)
ssh(f"sgdisk -n 0:8192:1GiB /dev/{disk}")
partition = retry_get_parts_on_disk(disk)[0]
call("pool.expand_partition", partition)
expanded_partition = retry_get_parts_on_disk(disk)[0]
assert expanded_partition["size"] > partition["size"]
assert expanded_partition["start"] == partition["start"]
| 2,332 | Python | .py | 44 | 45.795455 | 88 | 0.646775 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |