id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,100 | conftest.py | truenas_middleware/tests/conftest.py | import os
import pytest
from middlewared.test.integration.assets.roles import unprivileged_user_fixture # noqa
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.pytest import failed
pytest.register_assert_rewrite("middlewared.test")
@pytest.fixture(autouse=True)
def fail_fixture():
# We need to flag test as having failed
if failed[0] is not None:
pytest.exit(failed[0], pytest.ExitCode.TESTS_FAILED)
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
# This is called after test run is finished, right before returning
# exit to the system. At this point we introduce a custom error code
# to indicate to jenkins that the junit results shouldn't be trusted
# due to an early abort of the run (but we still want to present to
# developer
if failed[0] is not None:
session.exitstatus = os.EX_SOFTWARE
@pytest.fixture(autouse=True)
def log_test_name_to_middlewared_log(request):
# Beware that this is executed after session/package/module/class fixtures
# are applied so the logs will still not be exactly precise.
test_name = request.node.name
truenas_server.client.call("test.notify_test_start", test_name)
yield
# That's why we also notify test ends. What happens between a test end
# and the next test start is caused by session/package/module/class
# fixtures setup code.
truenas_server.client.call("test.notify_test_end", test_name)
| 1,518 | Python | .py | 31 | 44.967742 | 87 | 0.764547 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,101 | runtest.py | truenas_middleware/tests/runtest.py | #!/usr/bin/env python3
# Author: Eric Turgeon
# License: BSD
from middlewared.test.integration.utils import client
from ipaddress import ip_interface
from subprocess import run, call
from sys import argv, exit
import os
import getopt
import random
import socket
import sys
import secrets
import string
workdir = os.getcwd()
sys.path.append(workdir)
workdir = os.getcwd()
results_xml = f'{workdir}/results/'
localHome = os.path.expanduser('~')
dotsshPath = localHome + '/.ssh'
keyPath = localHome + '/.ssh/test_id_rsa'
isns_ip = '10.234.24.50' # isns01.qe.ixsystems.net
pool_name = "tank"
ixautomation_dot_conf_url = "https://raw.githubusercontent.com/iXsystems/" \
"ixautomation/master/src/etc/ixautomation.conf.dist"
config_file_msg = "Please add config.py to freenas/tests which can be empty " \
f"or contain settings from {ixautomation_dot_conf_url}"
if not os.path.exists('config.py'):
print(config_file_msg)
exit(1)
error_msg = f"""Usage for %s:
Mandatory option
--ip <###.###.###.###> - IP of the TrueNAS
--password <root password> - Password of the TrueNAS root user
--interface <interface> - The interface that TrueNAS is run one
Optional option
--ip2 - B controller IPv4 of TrueNAS HA machine
--vip - VIP (ipv4) of TrueNAS HA machine
--test <test name> - Test name (Network, ALL)
--tests <test1>[,test2,...] - List of tests to be supplied to pytest
--vm-name <VM_NAME> - Name the the Bhyve VM
--ha - Run test for HA
--ha_license - The base64 encoded string of an HA license
--isns_ip <###.###.###.###> - IP of the iSNS server (default: {isns_ip})
--pool <POOL_NAME> - Name of the ZFS pool (default: {pool_name})
""" % argv[0]
# if have no argument stop
if len(argv) == 1:
print(error_msg)
exit()
option_list = [
"ip=",
"ip2=",
"vip=",
"password=",
"interface=",
'test=',
"vm-name=",
"ha",
"update",
"dev-test",
"log-cli-level=",
"returncode",
"isns_ip=",
"pool=",
"tests=",
"ha_license=",
"hostname=",
"show_locals"
]
# look if all the argument are there.
try:
myopts, args = getopt.getopt(argv[1:], 'aipItk:vxs', option_list)
except getopt.GetoptError as e:
print(str(e))
print(error_msg)
exit()
vm_name = None
testName = ''
testexpr = None
ha = False
update = False
verbose = 0
exitfirst = ''
returncode = False
callargs = []
tests = []
ip2 = vip = ''
netmask = None
gateway = None
ha_license = ''
hostname = None
show_locals = False
for output, arg in myopts:
if output in ('-i', '--ip'):
ip = arg
elif output == '--ip2':
ip2 = arg
elif output == '--vip':
vip = arg
elif output in ('-p', '--password'):
passwd = arg
elif output in ('-I', '--interface'):
interface = arg
elif output in ('-t', '--test'):
testName = arg
elif output == '-k':
testexpr = arg
elif output in ('--vm-name',):
vm_name = f"'{arg}'"
elif output == '--ha':
ha = True
elif output == '--hostname':
hostname = arg
elif output == '--update':
update = True
elif output == '-v':
verbose += 1
elif output == '-x':
exitfirst = True
elif output == '--log-cli-level':
callargs.append('--log-cli-level')
callargs.append(arg)
elif output == '--returncode':
returncode = True
elif output == '--isns_ip':
isns_ip = arg
elif output == '--pool':
pool_name = arg
elif output == '-s':
callargs.append('-s')
elif output == '--tests':
tests.extend(arg.split(','))
elif output == '--ha_license':
ha_license = arg
elif output == '--show_locals':
show_locals = True
if 'ip' not in locals() and 'passwd' not in locals() and 'interface' not in locals():
print("Mandatory option missing!\n")
print(error_msg)
exit()
# create random hostname and random fake domain
digit = ''.join(secrets.choice((string.ascii_uppercase + string.digits)) for i in range(10))
if not hostname:
hostname = f'test{digit}'
domain = f'{hostname}.nb.ixsystems.com'
artifacts = f"{workdir}/artifacts/"
if not os.path.exists(artifacts):
os.makedirs(artifacts)
os.environ["MIDDLEWARE_TEST_IP"] = ip
os.environ["MIDDLEWARE_TEST_PASSWORD"] = passwd
os.environ["SERVER_TYPE"] = "ENTERPRISE_HA" if ha else "STANDARD"
ip_to_use = ip
if ha and ip2:
domain = 'tn.ixsystems.com'
os.environ['controller1_ip'] = ip
os.environ['controller2_ip'] = ip2
def get_ipinfo(ip_to_use):
iface = net = gate = ns1 = ns2 = None
with client(host_ip=ip_to_use) as c:
net_config = c.call('network.configuration.config')
ns1 = net_config.get('nameserver1')
ns2 = net_config.get('nameserver2')
_ip_to_use = socket.gethostbyname(ip_to_use)
for i in c.call('interface.query'):
for j in i['state']['aliases']:
if j.get('address') == _ip_to_use:
iface = i['id']
net = j['netmask']
for k in c.call('route.system_routes'):
if k.get('network') == '0.0.0.0' and k.get('gateway'):
return iface, net, k['gateway'], ns1, ns2
return iface, net, gate, ns1, ns2
interface, netmask, gateway, ns1, ns2 = get_ipinfo(ip_to_use)
if not all((interface, netmask, gateway)):
print(f'Unable to determine interface ({interface!r}), netmask ({netmask!r}) and gateway ({gateway!r}) for {ip_to_use!r}')
exit()
if ha:
if vip:
os.environ['virtual_ip'] = vip
elif os.environ.get('virtual_ip'):
vip = os.environ['virtual_ip']
else:
# reduce risk of trying to assign same VIP to two VMs
# starting at roughly the same time
vip_pool = list(ip_interface(f'{ip}/{netmask}').network)
random.shuffle(vip_pool)
for i in vip_pool:
last_octet = int(i.compressed.split('.')[-1])
if last_octet < 15 or last_octet >= 250:
# addresses like *.255, *.0 and any of them that
# are < *.15 we'll ignore. Those are typically
# reserved for routing/switch devices anyways
continue
elif run(['ping', '-c', '2', '-w', '4', i.compressed]).returncode != 0:
# sent 2 packets to the address and got no response so assume
# it's safe to use
os.environ['virtual_ip'] = i.compressed
vip = i.compressed
break
# Set various env variables for HA, if not already set
if not os.environ.get('domain'):
os.environ['domain'] = domain
if not os.environ.get('hostname_virtual'):
os.environ['hostname_virtual'] = hostname
if not os.environ.get('hostname'):
os.environ['hostname'] = f'{hostname}-nodea'
if not os.environ.get('hostname_b'):
os.environ['hostname_b'] = f'{hostname}-nodeb'
if not os.environ.get('primary_dns'):
os.environ['primary_dns'] = ns1 or '10.230.0.10'
if not os.environ.get('secondary_dns'):
os.environ['secondary_dns'] = ns2 or '10.230.0.11'
cfg_content = f"""#!{sys.executable}
user = "root"
password = "{passwd}"
netmask = "{netmask}"
gateway = "{gateway}"
vip = "{vip}"
vm_name = {vm_name}
hostname = "{hostname}"
domain = "{domain}"
api_url = 'http://{ip}/api/v2.0'
interface = "{interface}"
badNtpServer = "10.20.20.122"
localHome = "{localHome}"
keyPath = "{keyPath}"
pool_name = "{pool_name}"
ha_pool_name = "ha"
ha = {ha}
ha_license = "{ha_license}"
update = {update}
artifacts = "{artifacts}"
isns_ip = "{isns_ip}"
"""
cfg_file = open("auto_config.py", 'w')
cfg_file.writelines(cfg_content)
cfg_file.close()
from functions import setup_ssh_agent, create_key, add_ssh_key, get_folder
from functions import SSH_TEST
# Setup ssh agent before starting test.
setup_ssh_agent()
if os.path.isdir(dotsshPath) is False:
os.makedirs(dotsshPath)
if os.path.exists(keyPath) is False:
create_key(keyPath)
add_ssh_key(keyPath)
f = open(keyPath + '.pub', 'r')
Key = f.readlines()[0].rstrip()
cfg_file = open("auto_config.py", 'a')
cfg_file.writelines(f'sshKey = "{Key}"\n')
cfg_file.close()
if verbose:
callargs.append("-" + "v" * verbose)
if exitfirst:
callargs.append("-x")
if show_locals:
callargs.append('--showlocals')
# Use the right python version to start pytest with sys.executable
# So that we can support virtualenv python pytest.
pytest_command = [
sys.executable,
'-m',
'pytest'
] + callargs + [
"-o", "junit_family=xunit2",
'--timeout=300',
"--junitxml",
'results/api_v2_tests_result.xml',
]
if testexpr:
pytest_command.extend(['-k', testexpr])
def parse_test_name(test):
test = test.removeprefix("api2/")
test = test.removeprefix("api2.")
if ".py" not in test and test.count(".") == 1:
# Test name from Jenkins
filename, testname = test.split(".")
return f"{filename}.py::{testname}"
return test
if tests:
pytest_command.extend(list(map(parse_test_name, tests)))
else:
pytest_command.append(f"api2/{parse_test_name(testName)}")
proc_returncode = call(pytest_command)
# get useful logs
logs_list = [
"/var/log/daemon.log",
"/var/log/debug",
"/var/log/middlewared.log",
"/var/log/messages",
"/var/log/syslog",
]
get_folder('/var/log', f'{artifacts}/log', 'root', 'testing', ip)
# get dmesg and put it in artifacts
results = SSH_TEST('dmesg', 'root', 'testing', ip)
dmsg = open(f'{artifacts}/dmesg', 'w')
dmsg.writelines(results['output'])
dmsg.close()
# get core.get_jobs and put it in artifacts
results = SSH_TEST('midclt call core.get_jobs | jq .', 'root', 'testing', ip)
core_get_jobs = open(f'{artifacts}/core.get_jobs', 'w')
core_get_jobs.writelines(results['output'])
core_get_jobs.close()
if returncode:
exit(proc_returncode)
| 10,057 | Python | .py | 306 | 28.124183 | 126 | 0.620803 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,102 | utils.py | truenas_middleware/tests/utils.py | import contextlib
import urllib.parse
from functions import POST, SSH_TEST, DELETE, wait_on_job
from time import sleep
@contextlib.contextmanager
def create_dataset(dataset, options=None, acl=None, mode=None):
perm_job = None
result = POST("/pool/dataset/", {"name": dataset, **(options or {})})
assert result.status_code == 200, result.text
if mode is not None:
perm_job = POST("/filesystem/setperm/", {'path': f"/mnt/{dataset}", "mode": mode})
elif acl is not None:
perm_job = POST("/filesystem/setacl/", {'path': f"/mnt/{dataset}", "dacl": acl})
if perm_job:
assert perm_job.status_code == 200, result.text
job_status = wait_on_job(perm_job.json(), 180)
assert job_status["state"] == "SUCCESS", str(job_status["results"])
try:
yield dataset
finally:
# dataset may be busy
sleep(5)
result = DELETE(f"/pool/dataset/id/{urllib.parse.quote(dataset, '')}/",
{'recursive': True})
assert result.status_code == 200, result.text
| 1,064 | Python | .py | 25 | 35.92 | 90 | 0.630561 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,103 | functions.py | truenas_middleware/tests/functions.py | #!/usr/bin/env python3
# Author: Eric Turgeon
# License: BSD
import enum
import json
import os
import re
from subprocess import PIPE, Popen, TimeoutExpired, run
from time import sleep
from urllib.parse import urlparse
import requests
from auto_config import password, user
from middlewared.test.integration.utils import host
global header
header = {'Content-Type': 'application/json', 'Vary': 'accept'}
global authentication
authentication = (user, password)
RE_HTTPS = re.compile(r'^http(:.*)')
class SRVTarget(enum.Enum):
DEFAULT = enum.auto()
NODEA = enum.auto()
NODEB = enum.auto()
def get_host_ip(target):
server = host()
if target is SRVTarget.DEFAULT:
return server.ip
elif target is SRVTarget.NODEA:
return server.nodea_ip
elif target is SRVTarget.NODEB:
return server.nodeb_ip
raise ValueError(f'{target}: unexpected target')
def controller_url(target=SRVTarget.DEFAULT):
return f'http://{get_host_ip(target)}/api/v2.0'
def GET(testpath, payload=None, controller_a=False, **optional):
data = {} if payload is None else payload
url = controller_url(SRVTarget.NODEA if controller_a else SRVTarget.DEFAULT)
complete_uri = testpath if testpath.startswith('http') else f'{url}{testpath}'
if optional.get('force_ssl', False):
complete_uri = RE_HTTPS.sub(r'https\1', complete_uri)
timeout = optional.get('timeout', None)
if testpath.startswith('http'):
getit = requests.get(complete_uri, timeout=timeout)
else:
if optional.pop("anonymous", False):
auth = None
else:
auth = optional.pop("auth", authentication)
getit = requests.get(complete_uri, headers=dict(header, **optional.get("headers", {})),
auth=auth, data=json.dumps(data), verify=False, timeout=timeout)
return getit
def POST(testpath, payload=None, controller_a=False, **optional):
data = {} if payload is None else payload
url = controller_url(SRVTarget.NODEA if controller_a else SRVTarget.DEFAULT)
if optional.get("use_ip_only"):
parsed = urlparse(url)
url = f"{parsed.scheme}://{parsed.netloc}"
if optional.pop("anonymous", False):
auth = None
else:
auth = authentication
files = optional.get("files")
headers = dict(({} if optional.get("force_new_headers") else header), **optional.get("headers", {}))
if payload is None:
postit = requests.post(
f'{url}{testpath}', headers=headers, auth=auth, files=files)
else:
postit = requests.post(
f'{url}{testpath}', headers=headers, auth=auth,
data=json.dumps(data), files=files
)
return postit
def PUT(testpath, payload=None, controller_a=False, **optional):
data = {} if payload is None else payload
url = controller_url(SRVTarget.NODEA if controller_a else SRVTarget.DEFAULT)
if optional.pop("anonymous", False):
auth = None
else:
auth = authentication
putit = requests.put(f'{url}{testpath}', headers=dict(header, **optional.get("headers", {})),
auth=auth, data=json.dumps(data))
return putit
def DELETE(testpath, payload=None, controller_a=False, **optional):
data = {} if payload is None else payload
url = controller_url(SRVTarget.NODEA if controller_a else SRVTarget.DEFAULT)
if optional.pop("anonymous", False):
auth = None
else:
auth = authentication
deleteit = requests.delete(f'{url}{testpath}', headers=dict(header, **optional.get("headers", {})),
auth=auth,
data=json.dumps(data))
return deleteit
def SSH_TEST(command, username, passwrd, host=None, timeout=120):
target = host or get_host_ip(SRVTarget.DEFAULT)
cmd = [] if passwrd is None else ["sshpass", "-p", passwrd]
cmd += [
"ssh",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"VerifyHostKeyDNS=no",
f"{username}@{target}",
command
]
# 120 second timeout, to make sure no SSH connection hang.
process = run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
timeout=timeout)
stdout = process.stdout
stderr = process.stderr
return {'stdout': stdout, 'stderr': stderr, 'output': stdout + stderr, 'returncode': process.returncode,
'result': process.returncode == 0}
def async_SSH_start(command, username, passwrd, host):
cmd = [] if passwrd is None else ["sshpass", "-p", passwrd]
cmd += [
"ssh",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"VerifyHostKeyDNS=no",
"-o",
"LogLevel=quiet",
f"{username}@{host}",
command
]
return Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
def async_SSH_done(proc, timeout=120):
try:
outs, errs = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
return outs, errs
def send_file(file, destination, username, passwrd, host):
cmd = [] if passwrd is None else ["sshpass", "-p", passwrd]
cmd += [
"scp",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"VerifyHostKeyDNS=no",
file,
f"{username}@{host}:{destination}"
]
process = run(cmd, stdout=PIPE, universal_newlines=True)
output = process.stdout
if process.returncode != 0:
return {'result': False, 'output': output}
else:
return {'result': True, 'output': output}
def get_file(file, destination, username, passwrd, host):
cmd = [] if passwrd is None else ["sshpass", "-p", passwrd]
cmd += [
"scp",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"VerifyHostKeyDNS=no",
f"{username}@{host}:{file}",
destination
]
process = run(cmd, stdout=PIPE, universal_newlines=True)
output = process.stdout
if process.returncode != 0:
return {'result': False, 'output': output}
else:
return {'result': True, 'output': output}
def get_folder(folder, destination, username, passwrd, host):
cmd = [] if passwrd is None else ["sshpass", "-p", passwrd]
cmd += [
"scp",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"VerifyHostKeyDNS=no",
"-r",
f"{username}@{host}:{folder}",
destination
]
process = run(cmd, stdout=PIPE, universal_newlines=True)
output = process.stdout
if process.returncode != 0:
return {'result': False, 'output': output}
else:
return {'result': True, 'output': output}
def RC_TEST(command):
process = run(command, shell=True)
if process.returncode != 0:
return False
else:
return True
def return_output(command):
process = Popen(command, shell=True, stdout=PIPE, universal_newlines=True)
output = process.stdout.readlines()
if len(output) == 0:
return None
else:
return output[0].strip()
def cmd_test(command):
process = run(command, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
output = process.stdout
err = process.stderr
if process.returncode != 0:
return {'result': False, 'output': output, 'stderr': err}
else:
return {'result': True, 'output': output}
def start_ssh_agent():
process = run(['ssh-agent', '-s'], stdout=PIPE, universal_newlines=True)
to_recompile = r'SSH_AUTH_SOCK=(?P<socket>[^;]+).*SSH_AGENT_PID=(?P<pid>\d+)'
OUTPUT_PATTERN = re.compile(to_recompile, re.MULTILINE | re.DOTALL)
match = OUTPUT_PATTERN.search(process.stdout)
if match is None:
return False
else:
agentData = match.groupdict()
os.environ['SSH_AUTH_SOCK'] = agentData['socket']
os.environ['SSH_AGENT_PID'] = agentData['pid']
return True
def is_agent_setup():
return os.environ.get('SSH_AUTH_SOCK') is not None
def setup_ssh_agent():
if is_agent_setup():
return True
else:
return start_ssh_agent()
def create_key(keyPath):
process = run('ssh-keygen -t rsa -f %s -q -N ""' % keyPath, shell=True)
if process.returncode != 0:
return False
else:
return True
def if_key_listed():
process = run('ssh-add -L', shell=True)
if process.returncode != 0:
return False
else:
return True
def add_ssh_key(keyPath):
process = run(['ssh-add', keyPath])
if process.returncode != 0:
return False
else:
return True
def vm_state(vm_name):
cmd = f'vm info {vm_name} | grep state:'
process = run(cmd, shell=True, stdout=PIPE, universal_newlines=True)
output = process.stdout
return output.partition(':')[2].strip()
def vm_start(vm_name):
cmd = ['vm', 'start', vm_name]
process = run(cmd)
if process.returncode != 0:
return False
else:
return True
def ping_host(host, count, timeout=None):
# this function assumes we're running on linux
cmd = ['ping', f'-c{count}']
if timeout is not None:
cmd.append(f'-W{timeout}')
cmd.append(host)
process = run(cmd, check=False, capture_output=True)
if timeout is not None:
# could be that the system was rebooted and the
# caller specified a `timeout` waiting on the
# system to actually disappear off network OR
# they're waiting for the system to reappear on
# network
return b'100% packet loss' not in process.stdout
else:
return process.returncode == 0
def wait_on_job(job_id, max_timeout):
global job_results
timeout = 0
while True:
job_results = GET(f'/core/get_jobs/?id={job_id}')
job_state = job_results.json()[0]['state']
if job_state in ('RUNNING', 'WAITING'):
sleep(5)
elif job_state in ('SUCCESS', 'FAILED'):
return {'state': job_state, 'results': job_results.json()[0]}
if timeout >= max_timeout:
return {'state': 'TIMEOUT', 'results': job_results.json()[0]}
timeout += 5
| 10,479 | Python | .py | 293 | 29 | 108 | 0.626889 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,104 | run_unit_tests.py | truenas_middleware/tests/run_unit_tests.py | # This script should be run locally from a TrueNAS VM. It runs all tests
# contained within the tests/unit directory as well as middleware specific unit
# tests contained within src/middlewared/middlewared/pytest/unit.
#
# NOTE: this requires `make install_tests` to have been run on the TrueNAS VM.
import argparse
import middlewared
import os
import pytest
import sys
from contextlib import contextmanager
from collections.abc import Generator
from dataclasses import dataclass
from junitparser import JUnitXml
from shutil import copytree, rmtree
from truenas_api_client import Client
from uuid import uuid4
DESCRIPTION = (
'Run unit tests from the specified middleware git repository on the '
'current TrueNAS server (version 25.04 or later). Exit code is one of '
'pytest exit codes with zero indicating success.'
)
UNIT_TESTS = 'tests/unit'
MIDDLEWARE_MODULE_PATH = '/usr/lib/python3/dist-packages/middlewared'
MIDDLEWARE_PYTEST = 'src/middlewared/middlewared/pytest'
MIDDLEWARE_UNIT_TESTS = os.path.join(MIDDLEWARE_PYTEST, 'unit')
MIDDLEWARE_PYTEST_MODULE = os.path.join(MIDDLEWARE_MODULE_PATH, 'pytest')
RESULT_FILE = 'unit_tests_result.xml'
PYTEST_CONFTEST_FILE = 'tests/conftest.py'
if not os.path.exists(MIDDLEWARE_MODULE_PATH):
# If middlware has been reinstalled then we should try to find where it's located
MIDDLEWARE_MODULE_PATH = os.path.dirname(os.path.abspath(middlewared.__file__))
@dataclass()
class UnitTestRun:
tests_dir: str
exit_code: pytest.ExitCode = pytest.ExitCode.NO_TESTS_COLLECTED
junit_file: str | None = None
def run_tests(data: UnitTestRun) -> UnitTestRun:
junit_file = f'unit_tests_result_{uuid4()}.xml'
data.exit_code = pytest.main([
'--disable-warnings', '-vv',
'-o', 'junit_family=xunit2',
'--junitxml', junit_file,
data.tests_dir
])
if data.exit_code is not pytest.ExitCode.OK:
print(
f'{data.tests_dir}: tests failed with code: {data.exit_code}',
file=sys.stderr
)
data.junit_file = junit_file
return data
def run_unit_tests(repo_dir: str) -> pytest.ExitCode:
"""
Iterate through our unit test sources and create a unified junit xml file
for the overall test results.
"""
xml_out = JUnitXml()
exit_code = pytest.ExitCode.NO_TESTS_COLLECTED
for test_dir in (
os.path.join(repo_dir, UNIT_TESTS),
os.path.join(repo_dir, MIDDLEWARE_UNIT_TESTS),
):
if not os.path.exists(test_dir):
raise FileNotFoundError(f'{test_dir}: unit test directory does not exist')
data = run_tests(UnitTestRun(tests_dir=test_dir))
xml_out += JUnitXml.fromfile(data.junit_file)
try:
os.remove(data.junit_file)
except Exception:
pass
match data.exit_code:
case pytest.ExitCode.NO_TESTS_COLLECTED:
# We'll treat this as a partial failure because we still want our
# test results from other runs, but don't want an overall misleading
# result.
print(
f'{test_dir}: not tests collected. Treating as partial failure.',
file=sys.stderr
)
if exit_code is pytest.ExitCode.OK:
exit_code = pytest.ExitCode.TESTS_FAILED
case pytest.ExitCode.OK:
# If this is our first OK test, set exit code
# otherwise preserve existing
if exit_code is pytest.ExitCode.NO_TESTS_COLLECTED:
exit_code = data.exit_code
case _:
# exit codes are an IntEnum. Preserve worst case
if exit_code < data.exit_code:
exit_code = data.exit_code
xml_out.write(RESULT_FILE)
return exit_code
@contextmanager
def disable_api_test_config(path: str) -> Generator[None, None, None]:
""" prevent API tests conftest from being applied """
os.rename(
os.path.join(path, PYTEST_CONFTEST_FILE),
os.path.join(path, f'{PYTEST_CONFTEST_FILE}.tmp')
)
try:
yield
finally:
os.rename(
os.path.join(path, f'{PYTEST_CONFTEST_FILE}.tmp'),
os.path.join(path, PYTEST_CONFTEST_FILE)
)
@contextmanager
def setup_middleware_tests(path: str) -> Generator[None, None, None]:
""" temporarily setup our pytest tests in the python dir """
try:
copytree(
os.path.join(path, MIDDLEWARE_PYTEST),
os.path.join(MIDDLEWARE_PYTEST_MODULE)
)
yield
finally:
rmtree(MIDDLEWARE_PYTEST_MODULE)
def main() -> None:
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-p', '--path',
help='Path to local copy of middleware git repository',
default='./middleware'
)
# lazy check to verify we're on a TrueNAS server
with Client() as c:
assert c.call('system.ready')
args = parser.parse_args()
with disable_api_test_config(args.path):
with setup_middleware_tests(args.path):
exit_code = run_unit_tests(args.path)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 5,265 | Python | .py | 135 | 31.696296 | 86 | 0.657972 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,105 | test_directory.py | truenas_middleware/tests/unit/test_directory.py | import gc
import os
import pytest
import stat
from middlewared.utils import filter_list
from middlewared.utils.filesystem import constants
from middlewared.utils.filesystem import directory
TEST_FILES = [
'testfile1',
'testfile2',
'canary',
'1234_bob'
]
TEST_DIRS = [
'testdir1',
'testdir2',
'1234_larry'
]
@pytest.fixture(scope="function")
def directory_for_test(tmpdir):
for filename in TEST_FILES:
path = os.path.join(tmpdir, filename)
with open(path, 'w'):
pass
os.symlink(path, os.path.join(tmpdir, f'{filename}_sl'))
for dirname in TEST_DIRS:
path = os.path.join(tmpdir, dirname)
os.mkdir(path)
os.symlink(path, os.path.join(tmpdir, f'{dirname}_sl'))
return tmpdir
def get_fd_count():
# Make sure we free up any dangling files waiting for garbage
# collection before we get authoritative count for this module
gc.collect()
return len(os.listdir('/proc/self/fd'))
@pytest.fixture(scope="module")
def fd_count():
return get_fd_count()
def validate_attributes(dirent):
assert dirent['name'] is not None
assert dirent['path'] is not None
assert dirent['realpath'] is not None
st = os.stat(dirent['realpath'])
assert dirent['size'] == st.st_size
assert dirent['mode'] == st.st_mode
assert dirent['uid'] == st.st_uid
assert dirent['gid'] == st.st_gid
assert dirent['allocation_size'] == st.st_blocks * 512
match dirent['type']:
case 'DIRECTORY':
assert stat.S_ISDIR(dirent['mode'])
assert dirent['name'] == os.path.basename(dirent['realpath'])
assert dirent['path'] == dirent['realpath']
case 'FILE':
assert stat.S_ISREG(dirent['mode'])
assert dirent['name'] == os.path.basename(dirent['realpath'])
assert dirent['path'] == dirent['realpath']
case 'SYMLINK':
assert dirent['name'] != os.path.basename(dirent['realpath'])
assert dirent['path'] != dirent['realpath']
# we do not check mode here because we follow symlink for stat output
# for directory entry
case _:
raise ValueError(f'{dirent["type"]}: unexpected dirent type')
def test__length_no_filters(directory_for_test, fd_count):
dir_iter = directory.DirectoryIterator(directory_for_test)
assert len(filter_list(dir_iter, [], {})) == 2 * len(TEST_FILES + TEST_DIRS)
# We're still holding reference for DirectoryFd
assert get_fd_count() == fd_count + 1
dir_iter.close()
assert get_fd_count() == fd_count
def test__length_iter_dirs(directory_for_test, fd_count):
assert len(filter_list(
directory.DirectoryIterator(directory_for_test, file_type=constants.FileType.DIRECTORY),
[], {}
)) == len(TEST_DIRS)
assert len(filter_list(
directory.DirectoryIterator(directory_for_test),
[['type', '=', 'DIRECTORY']], {}
)) == len(TEST_DIRS)
gc.collect()
assert get_fd_count() == fd_count
def test__length_iter_files(directory_for_test):
assert len(filter_list(
directory.DirectoryIterator(directory_for_test, file_type=constants.FileType.FILE),
[], {}
)) == len(TEST_FILES)
assert len(filter_list(
directory.DirectoryIterator(directory_for_test),
[['type', '=', 'FILE']], {}
)) == len(TEST_FILES)
def test__length_iter_symlink(directory_for_test):
expected_symlinks = len(TEST_FILES) + len(TEST_DIRS)
assert len(filter_list(
directory.DirectoryIterator(directory_for_test, file_type=constants.FileType.SYMLINK),
[], {}
)) == expected_symlinks
assert len(filter_list(
directory.DirectoryIterator(directory_for_test),
[['type', '=', 'SYMLINK']], {}
)) == expected_symlinks
def test__stat_attributes_dirents(directory_for_test):
dir_iter = directory.DirectoryIterator(directory_for_test)
for dirent in dir_iter:
validate_attributes(dirent)
def test__directory_zero_request_mask(directory_for_test, fd_count):
dir_iter = directory.DirectoryIterator(directory_for_test, request_mask=0)
for dirent in dir_iter:
assert dirent['realpath'] is None
assert dirent['is_ctldir'] is None
assert dirent['zfs_attrs'] is None
assert dirent['xattrs'] is None
assert dirent['acl'] is None
del(dir_iter)
gc.collect()
assert get_fd_count() == fd_count
def test__directory_realpath_request_mask(directory_for_test):
dir_iter = directory.DirectoryIterator(directory_for_test, request_mask=directory.DirectoryRequestMask.REALPATH)
for dirent in dir_iter:
assert dirent['realpath'] is not None
assert dirent['is_ctldir'] is None
assert dirent['zfs_attrs'] is None
assert dirent['xattrs'] is None
assert dirent['acl'] is None
def test__directory_xattrs_request_mask(directory_for_test):
dir_iter = directory.DirectoryIterator(directory_for_test, request_mask=directory.DirectoryRequestMask.XATTRS)
for dirent in dir_iter:
assert dirent['realpath'] is None
assert dirent['is_ctldir'] is None
assert dirent['zfs_attrs'] is None
assert dirent['xattrs'] is not None
assert dirent['acl'] is None
def test__directory_acl_request_mask(directory_for_test):
with directory.DirectoryIterator(directory_for_test, request_mask=directory.DirectoryRequestMask.ACL) as dir_iter:
for dirent in dir_iter:
assert dirent['realpath'] is None
assert dirent['is_ctldir'] is None
assert dirent['zfs_attrs'] is None
assert dirent['xattrs'] is None
assert dirent['acl'] is not None
def test__directory_request_mask():
for entry in directory.DirectoryRequestMask:
assert entry in directory.ALL_ATTRS
for entry in directory.ALL_ATTRS:
assert directory.DirectoryRequestMask(entry)
def test__directory_is_empty(tmpdir, fd_count):
gc.collect()
assert directory.directory_is_empty(tmpdir)
os.mkdir(os.path.join(tmpdir, 'testfile'))
assert not directory.directory_is_empty(tmpdir)
assert get_fd_count() == fd_count
def test__directory_fd(directory_for_test, fd_count):
# without dir_fd specified (open(2))
dfd = directory.DirectoryFd(directory_for_test)
# basic smoke-test of __repr__ for the DirectoryFd objec
assert str(directory_for_test) in repr(dfd)
# with dir_fd specified (openat(2) with relative path).
dfd2 = directory.DirectoryFd('testdir1', dir_fd=dfd.fileno)
assert 'testdir1' in repr(dfd2)
dfd.close()
assert dfd.fileno is None
with pytest.raises(NotADirectoryError):
directory.DirectoryFd(os.path.join(directory_for_test, 'testfile1'))
# we still have reference to dfd2
assert get_fd_count() == fd_count + 1
| 6,911 | Python | .py | 165 | 35.412121 | 118 | 0.674189 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,106 | test_tdb.py | truenas_middleware/tests/unit/test_tdb.py | import gc
import os
import pytest
from base64 import b64encode
from contextlib import closing
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
from middlewared.utils.tdb import (
close_sysdataset_tdb_handles,
get_tdb_handle,
TDBBatchAction,
TDBBatchOperation,
TDBDataType,
TDBHandle,
TDBOptions,
TDBPathType,
)
from middlewared.service_exception import MatchNotFound
@pytest.fixture(scope='module')
def tdbdirs():
os.makedirs(TDBPathType.PERSISTENT.value, exist_ok=True)
os.makedirs(TDBPathType.VOLATILE.value, exist_ok=True)
os.makedirs(SYSDATASET_PATH, exist_ok=True)
yield
def get_fd_count():
# Make sure we free up any dangling files waiting for garbage
# collection before we get authoritative count for this module
gc.collect()
return len(os.listdir('/proc/self/fd'))
@pytest.fixture(scope="module")
def fd_count():
return get_fd_count()
def basic_tdb_ops(hdl: TDBHandle, datatype: TDBDataType):
match datatype:
case TDBDataType.JSON:
data = {'foo': 'bar'}
case TDBDataType.BYTES:
data = b64encode(b'foobar_bytes').decode()
case TDBDataType.STRING:
data = 'foobar'
case _:
raise ValueError(f'{datatype}: unknown data type')
hdl.store('test_key', data)
val = hdl.get('test_key')
assert val == data
entries = list(hdl.entries())
assert entries == [{'key': 'test_key', 'value': data}]
hdl.delete('test_key')
with pytest.raises(MatchNotFound):
hdl.get('test_key')
assert len(list(hdl.entries())) == 0
def batched_tdb_ops(hdl: TDBHandle, datatype: TDBDataType):
match datatype:
case TDBDataType.JSON:
data1 = {'foo1': 'bar1'}
data2 = {'foo2': 'bar2'}
data3 = {'foo3': 'bar3'}
case TDBDataType.BYTES:
data1 = b64encode(b'foobar_bytes1').decode()
data2 = b64encode(b'foobar_bytes2').decode()
data3 = b64encode(b'foobar_bytes3').decode()
case TDBDataType.STRING:
data1 = 'foobar1'
data2 = 'foobar2'
data3 = 'foobar3'
case _:
raise ValueError(f'{datatype}: unknown data type')
# first try setting three under a lock
batched_ops = [
TDBBatchOperation(TDBBatchAction.SET, key='test_key1', value=data1),
TDBBatchOperation(TDBBatchAction.SET, key='test_key2', value=data2),
TDBBatchOperation(TDBBatchAction.SET, key='test_key3', value=data3),
]
hdl.batch_op(batched_ops)
# check that we get same three back
for key, value in [
('test_key1', data1),
('test_key2', data2),
('test_key3', data3)
]:
assert hdl.get(key) == value
# now fetch one and delete the other two
batched_ops = [
TDBBatchOperation(TDBBatchAction.GET, key='test_key1', value=data1),
TDBBatchOperation(TDBBatchAction.DEL, key='test_key2'),
TDBBatchOperation(TDBBatchAction.DEL, key='test_key3'),
]
out = hdl.batch_op(batched_ops)
assert out['test_key1'] == data1
assert len(list(hdl.entries())) == 1
hdl.clear()
assert len(list(hdl.entries())) == 0
@pytest.mark.parametrize('datatype', TDBDataType)
def test__persistent_tdb(fd_count, tdbdirs, datatype):
tdb_name = 'TEST_PERSISTENT'
expected_path = os.path.join(TDBPathType.PERSISTENT.value, f'{tdb_name}.tdb')
tdb_options = TDBOptions(TDBPathType.PERSISTENT, datatype)
with closing(TDBHandle(tdb_name, tdb_options)) as handle:
basic_tdb_ops(handle, datatype)
batched_tdb_ops(handle, datatype)
assert handle.full_path == expected_path
os.remove(expected_path)
assert get_fd_count() == fd_count
@pytest.mark.parametrize('datatype', TDBDataType)
def test__volatile_tdb(fd_count, tdbdirs, datatype):
tdb_name = 'TEST_VOLATILE'
expected_path = os.path.join(TDBPathType.VOLATILE.value, f'{tdb_name}.tdb')
tdb_options = TDBOptions(TDBPathType.VOLATILE, datatype)
with closing(TDBHandle('TEST_VOLATILE', tdb_options)) as handle:
basic_tdb_ops(handle, datatype)
batched_tdb_ops(handle, datatype)
assert handle.full_path == expected_path
os.remove(expected_path)
assert get_fd_count() == fd_count
@pytest.mark.parametrize('datatype', TDBDataType)
def test__custom_tdb(fd_count, tmpdir, datatype):
""" test that creating a custom TDB file works as expected """
custom_file = os.path.join(tmpdir, 'custom.tdb')
tdb_options = TDBOptions(TDBPathType.CUSTOM, datatype)
with closing(TDBHandle(custom_file, tdb_options)) as handle:
assert handle.full_path == custom_file
basic_tdb_ops(handle, datatype)
batched_tdb_ops(handle, datatype)
os.remove(custom_file)
assert get_fd_count() == fd_count
def test__tdb_connection_caching(fd_count):
""" check that TDB handle caching works as expected """
custom_file = os.path.join(SYSDATASET_PATH, 'sysds.tdb')
tdb_options = TDBOptions(TDBPathType.CUSTOM, TDBDataType.JSON)
hdl_id = None
with get_tdb_handle(custom_file, tdb_options) as hdl:
hdl_id = id(hdl)
basic_tdb_ops(hdl, TDBDataType.JSON)
with get_tdb_handle(custom_file, tdb_options) as hdl:
assert id(hdl) == hdl_id
hdl.close()
assert get_fd_count() == fd_count
with get_tdb_handle(custom_file, tdb_options) as hdl:
assert id(hdl) != hdl_id
hdl.close()
assert get_fd_count() == fd_count
os.remove(custom_file)
with get_tdb_handle(custom_file, tdb_options) as hdl:
pass
assert get_fd_count() != fd_count
close_sysdataset_tdb_handles()
assert get_fd_count() == fd_count
def test__tdb_handle_invalidated_by_delete(fd_count):
""" check that file being deleted is properly detected and does not leak """
assert get_fd_count() == fd_count
custom_file = os.path.join(SYSDATASET_PATH, 'sysds_del.tdb')
tdb_options = TDBOptions(TDBPathType.CUSTOM, TDBDataType.JSON)
with get_tdb_handle(custom_file, tdb_options) as hdl:
os.remove(custom_file)
assert not hdl.validate_handle()
close_sysdataset_tdb_handles()
assert get_fd_count() == fd_count
def test__tdb_handle_invalidated_by_rename(fd_count):
tdb_options = TDBOptions(TDBPathType.PERSISTENT, TDBDataType.JSON)
test_payload = {'foo': 'bar'}
with get_tdb_handle('HANDLE1', tdb_options) as hdl:
# Intentionally avoid closing this handle because we're
# testing auto-close of stale handles
hdl_1_path = hdl.full_path
hdl_1_id = id(hdl)
hdl.store('test_key', test_payload)
with get_tdb_handle('HANDLE2', tdb_options) as hdl:
hdl_2_id = id(hdl)
os.rename(hdl_1_path, hdl.full_path)
assert not hdl.validate_handle()
with get_tdb_handle('HANDLE1', tdb_options) as hdl:
# verify we have new TDB file / handle
assert id(hdl) != hdl_1_id
# intentionally close so that our final count is correct
hdl.close()
with get_tdb_handle('HANDLE2', tdb_options) as hdl:
# verify we have new TDB file / handle
assert id(hdl) != hdl_2_id
# verify we have correct key
assert hdl.get('test_key') == test_payload
# intentionally close so that our final count is correct
hdl.close()
assert get_fd_count() == fd_count
| 7,460 | Python | .py | 182 | 34.406593 | 81 | 0.668281 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,107 | test_api.py | truenas_middleware/tests/unit/test_api.py | from pydantic import Field
from middlewared.api.base import BaseModel
from middlewared.api.base.handler.result import serialize_result
def test_dump_by_alias():
class AliasModel(BaseModel):
field1_: int = Field(..., alias='field1')
field2: str
field3_: bool = Field(alias='field3', default=False)
class AliasModelResult(BaseModel):
result: AliasModel
result = {'field1': 1, 'field2': 'two'}
dump = serialize_result(AliasModelResult, result, False)
assert dump == {'field1': 1, 'field2': 'two', 'field3': False}
| 569 | Python | .py | 13 | 38.307692 | 66 | 0.692727 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,108 | test_sid.py | truenas_middleware/tests/unit/test_sid.py | import pytest
from middlewared.plugins.idmap_.idmap_constants import (
BASE_SYNTHETIC_DATASTORE_ID,
IDType
)
from middlewared.utils.sid import (
db_id_to_rid,
get_domain_rid,
random_sid,
sid_is_valid,
BASE_RID_GROUP,
BASE_RID_USER,
)
@pytest.fixture(scope='module')
def local_sid():
yield random_sid()
@pytest.mark.parametrize('id_type,db_id,expected_rid,valid', [
(IDType.USER, 1000, 1000 + BASE_RID_USER, True),
(IDType.GROUP, 1000, 1000 + BASE_RID_GROUP, True),
(IDType.USER, 1000 + BASE_SYNTHETIC_DATASTORE_ID, None, False),
])
def test__db_id_to_rid(id_type, db_id, expected_rid, valid):
if valid:
assert db_id_to_rid(id_type, db_id) == expected_rid
else:
with pytest.raises(ValueError):
db_id_to_rid(id_type, db_id)
@pytest.mark.parametrize('sid,valid', [
('S-1-5-21-3510196835-1033636670-2319939847-200108', True),
('S-1-5-32-544', True),
('S-1-2-0', False), # technically valid SID but we don't permit it
('S-1-5-21-3510196835-1033636670-2319939847-200108-200108', False),
('S-1-5-21-3510196835-200108', False),
('S-1-5-21-3510196835-1033636670-231993008847-200108', False),
('S-1-5-21-351019683b-1033636670-2319939847-200108', False),
])
def test__sid_is_valid(sid, valid):
assert sid_is_valid(sid) is valid
@pytest.mark.parametrize('sid,rid,valid', [
('S-1-5-21-3510196835-1033636670-2319939847-200108', 200108, True),
('S-1-5-21-3510196835-1033636670-2319939847', None, False),
('S-1-5-32-544', None, False),
])
def test__get_domain_rid(sid, rid, valid):
if valid:
assert get_domain_rid(sid) == rid
else:
with pytest.raises(ValueError):
get_domain_rid(sid)
def test__random_sid_is_valid(local_sid):
assert sid_is_valid(local_sid)
| 1,821 | Python | .py | 51 | 31.196078 | 71 | 0.673678 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,109 | test_passdb.py | truenas_middleware/tests/unit/test_passdb.py | import os
import pytest
import secrets
import string
import subprocess
from dataclasses import asdict
from middlewared.plugins.smb_ import util_passdb
from middlewared.utils.crypto import generate_nt_hash
from time import sleep, time
PDB_DOMAIN = 'CANARY'
PDB_DOM_SID = 'S-1-5-21-710078819-430336432-4106732522'
PDB_DICT_DEFAULTS = {
'username': None,
'nt_username': '',
'domain': PDB_DOMAIN,
'full_name': None,
'comment': '',
'home_dir': '',
'dir_drive': '',
'logon_script': '',
'profile_path': '',
'user_rid': None,
'group_rid': 513, # samba default -- domain users rid
'acct_desc': '',
'acct_ctrl': util_passdb.UserAccountControl.NORMAL_ACCOUNT,
'nt_pw': None,
'logon_count': 0,
'bad_pw_count': 0,
'times': None
}
SAMPLE_USER = {
'id': 75,
'uid': 3000,
'username': 'pdbuser',
'unixhash': '$6$rounds=656000$oYArtLuJhcfrwkkX$uUcNk1VdH7jHWZLd6HXT1svD3MXtS578sBx2oDrag3ZTxVFm41y1mIvpCHcR1/dcGiTiT/lhIyVD8m1QHgovq0', # noqa
'smbhash': '05BC65787F63B56CF6D47F16E32E3ABF',
'home': '/var/empty',
'shell': '/usr/sbin/nologin',
'full_name': 'pdbuser_name',
'builtin': False,
'smb': True,
'password_disabled': False,
'ssh_password_enabled': False,
'locked': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
'email': None,
'group': {
'id': 107,
'bsdgrp_gid': 3000,
'bsdgrp_group': 'pdbuser',
'bsdgrp_builtin': False,
'bsdgrp_sudo_commands': [],
'bsdgrp_sudo_commands_nopasswd': [],
'bsdgrp_smb': False
},
'groups': [
90
],
'sshpubkey': None,
'immutable': False,
'twofactor_auth_configured': False,
'local': True,
'id_type_both': False,
'sid': 'S-1-5-21-710078819-430336432-4106732522-20075',
'roles': []
}
PDB_DOMAIN = 'CANARY'
NORMAL_ACCOUNT = util_passdb.UserAccountControl.NORMAL_ACCOUNT
LOCKED_ACCOUNT = NORMAL_ACCOUNT | util_passdb.UserAccountControl.AUTO_LOCKED
DISABLED_ACCOUNT = NORMAL_ACCOUNT | util_passdb.UserAccountControl.DISABLED
EXPIRED_ACCOUNT = NORMAL_ACCOUNT | util_passdb.UserAccountControl.PASSWORD_EXPIRED
@pytest.fixture(scope='module')
def passdb_dir():
os.makedirs(util_passdb.SMBPath.PASSDB_DIR.path, mode=0o700, exist_ok=True)
os.makedirs(util_passdb.SMBPath.PRIVATEDIR.path, mode=0o700, exist_ok=True)
# valid smb.conf is required for pdbedit command
with open('/etc/smb4.conf', 'w') as f:
f.write('[global]\n')
f.flush()
@pytest.fixture(scope='function')
def pdb_times():
yield util_passdb.PDBTimes(
logon=0,
logoff=util_passdb.PASSDB_TIME_T_MAX,
kickoff=util_passdb.PASSDB_TIME_T_MAX,
bad_password=0,
pass_last_set=int(time()),
pass_can_change=0,
pass_must_change=util_passdb.PASSDB_TIME_T_MAX
)
@pytest.fixture(scope='function')
def pdb_user(pdb_times):
payload = PDB_DICT_DEFAULTS | {
'username': SAMPLE_USER['username'],
'full_name': SAMPLE_USER['full_name'],
'user_rid': 20075,
'times': pdb_times,
'nt_pw': SAMPLE_USER['smbhash']
}
yield util_passdb.PDBEntry(**payload)
def check_pdbedit(usernames):
"""
validate that standard Samba tools see same users. This can fail if for
instance we fail to insert major / minor versions into passdb.tdb because
samba will interpret it as containing struct samu version 0.
"""
expected = set(usernames)
found = []
pdbedit = subprocess.run(['pdbedit', '-L'], capture_output=True)
assert pdbedit.returncode == 0, pdbedit.stderr.decode()
for entry in pdbedit.stdout.decode().strip().splitlines():
found.append(entry.split(':')[0])
assert len(found) == len(usernames), f'expected: {usernames}, found: {found}'
found = set(found)
missing = expected - found
assert missing == set(), str(missing)
extra = found - expected
assert extra == set(), str(extra)
@pytest.mark.parametrize('pdbentrydict', [
PDB_DICT_DEFAULTS | {'username': 'user1', 'full_name': 'user1', 'user_rid': 20071},
PDB_DICT_DEFAULTS | {'username': 'user2', 'full_name': 'user2', 'user_rid': 20072, 'acct_ctrl': LOCKED_ACCOUNT},
PDB_DICT_DEFAULTS | {'username': 'user3', 'full_name': 'user3', 'user_rid': 20073, 'acct_ctrl': DISABLED_ACCOUNT},
PDB_DICT_DEFAULTS | {'username': 'user4', 'full_name': 'user4', 'user_rid': 20074, 'acct_ctrl': EXPIRED_ACCOUNT},
])
def test__insert_query_delete(pdbentrydict, passdb_dir, pdb_times):
""" Add user, verify it shows in our query, verify that it shows in pdbedit, then delete it """
# generate NT hash from randomized password
nt_hash = generate_nt_hash(''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10)))
payload = pdbentrydict | {'times': pdb_times, 'nt_pw': nt_hash}
entry = util_passdb.PDBEntry(**payload)
util_passdb.insert_passdb_entries([entry])
try:
contents = util_passdb.query_passdb_entries([], {})
assert len(contents) == 1
check_pdbedit([entry.username])
finally:
util_passdb.delete_passdb_entry(payload['username'], payload['user_rid'])
assert asdict(entry) == contents[0]
contents = util_passdb.query_passdb_entries([], {})
assert len(contents) == 0, str(contents)
def test__smbhash_parser():
"""
The `smbhash` field in samba has changed from being a smbpasswd string to simply
containing the NT hash for user. This test ensures that we properly extract NT hash
from both entry types.
"""
smb_hash_legacy = "smbuser:3000:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:05BC65787F63B56CF6D47F16E32E3ABF:[U ]:LCT-66C7190E:" # noqa
nt_hash = "05BC65787F63B56CF6D47F16E32E3ABF"
assert util_passdb.user_smbhash_to_nt_pw('smbuser', smb_hash_legacy) == nt_hash
assert util_passdb.user_smbhash_to_nt_pw('smbuser', nt_hash) == nt_hash
@pytest.mark.parametrize('user_data,expected', [
({'locked': False, 'password_disabled': False}, util_passdb.UserAccountControl.NORMAL_ACCOUNT),
({'locked': True, 'password_disabled': False}, LOCKED_ACCOUNT),
({'locked': False, 'password_disabled': True}, DISABLED_ACCOUNT),
({'locked': True, 'password_disabled': True}, DISABLED_ACCOUNT | LOCKED_ACCOUNT),
])
def test__uac_flags_parser(user_data, expected):
"""
This test validates mapping of account status parameters from our user table entries to
MS-SAMU user account control flags
"""
assert util_passdb.user_entry_to_uac_flags(user_data) == expected
@pytest.mark.parametrize('changes', [
{'locked': True, 'full_name': 'bob'},
{'smbhash': generate_nt_hash('Cats')},
])
def test__pdb_update(pdb_user, changes):
"""
This test validates the helper function that generates updated PDBEntry based on user
specified data. For example, if there is an existing entry we should preserve its
timestamps.
"""
sleep(1)
now = int(time())
user_entry = SAMPLE_USER | changes
new_entry = util_passdb.user_entry_to_passdb_entry(PDB_DOMAIN, user_entry, asdict(pdb_user))
# validate that timestamps were preserved
assert now != new_entry.times.pass_last_set
assert pdb_user.times == new_entry.times
assert new_entry.nt_pw == user_entry['smbhash']
assert new_entry.domain == PDB_DOMAIN
assert util_passdb.user_entry_to_uac_flags(user_entry) == new_entry.acct_ctrl
| 7,465 | Python | .py | 183 | 35.825137 | 147 | 0.676738 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,110 | test_copytree.py | truenas_middleware/tests/unit/test_copytree.py | import errno
import gc
import os
import pytest
import random
import stat
from middlewared.utils.filesystem import copy
from operator import eq, ne
from unittest.mock import Mock, patch
TEST_FILE_DATASZ = 128 * 1024
TEST_XATTR_DATASZ = 1024
TEST_FILES = [
('testfile1', random.randbytes(TEST_FILE_DATASZ)),
('testfile2', random.randbytes(TEST_FILE_DATASZ)),
('canary', random.randbytes(TEST_FILE_DATASZ)),
('1234_bob', random.randbytes(TEST_FILE_DATASZ))
]
TEST_FILE_XATTRS = [
('user.filexat1', random.randbytes(TEST_XATTR_DATASZ)),
('user.filexat2', random.randbytes(TEST_XATTR_DATASZ)),
('user.filexat3', random.randbytes(TEST_XATTR_DATASZ)),
]
TEST_DIRS = [
'testdir1',
'testdir2',
'1234_larry'
]
TEST_DIR_XATTRS = [
('user.dirxat1', random.randbytes(TEST_XATTR_DATASZ)),
('user.dirxat2', random.randbytes(TEST_XATTR_DATASZ)),
('user.dirxat3', random.randbytes(TEST_XATTR_DATASZ)),
]
JENNY = 8675309
class Job:
log = []
progress = 0
def set_progress(self, progress: int, msg: str):
self.progress = progress
self.log.append(msg)
def create_test_files(target: str, symlink_target_path: str) -> None:
for filename, data in TEST_FILES:
path = os.path.join(target, filename)
with open(path, 'wb') as f:
f.write(data)
os.fchmod(f.fileno(), 0o666)
os.fchown(f.fileno(), JENNY, JENNY + 1)
f.flush()
for xat_name, xat_data in TEST_FILE_XATTRS:
os.setxattr(path, xat_name, xat_data)
# symlink target outside of dirs to be copied around
sl = f'{filename}_sl'
os.symlink(symlink_target_path, os.path.join(target, sl))
# this needs to be last op on file to avoid having other
# changes affect atime / mtime
os.utime(path, ns=(JENNY + 1, JENNY + 2))
def create_test_data(target: str, symlink_target_path) -> None:
""" generate test data in randomized temporary directory
Basic tree of files and directories including some symlinks
"""
source = os.path.join(target, 'SOURCE')
os.mkdir(source)
for xat_name, xat_data in TEST_DIR_XATTRS:
os.setxattr(source, xat_name, xat_data)
os.chown(source, JENNY + 10, JENNY + 11)
os.chmod(source, 0o777)
create_test_files(source, symlink_target_path)
for dirname in TEST_DIRS:
path = os.path.join(source, dirname)
os.mkdir(path)
os.chmod(path, 0o777)
os.chown(path, JENNY, JENNY)
for xat_name, xat_data in TEST_DIR_XATTRS:
os.setxattr(path, xat_name, xat_data)
# force atime and mtime to some value other than
# current timestamp
os.utime(path, ns=(JENNY + 3, JENNY + 4))
# symlink target outside of dirs to be copied around
sl = f'{dirname}_sl'
os.symlink(symlink_target_path, os.path.join(path, sl))
# create separate symlink dir for our test files
# _outside_ SOURCE
os.mkdir(os.path.join(target, dirname))
create_test_files(path, os.path.join(target, dirname))
os.utime(path, ns=(JENNY + 3, JENNY + 4))
os.utime(source, ns=(JENNY + 5, JENNY + 6))
@pytest.fixture(scope="function")
def directory_for_test(tmpdir):
""" generate test data in randomized temporary directory
Basic tree of files and directories including some symlinks
"""
create_test_data(tmpdir, tmpdir)
return tmpdir
def get_fd_count() -> int:
# Make sure we free up any dangling files waiting for garbage
# collection before we get authoritative count for this module
gc.collect()
return len(os.listdir('/proc/self/fd'))
@pytest.fixture(scope="module")
def fd_count() -> int:
return get_fd_count()
def validate_attributes(
src: str,
dst: str,
flags: copy.CopyFlags
) -> None:
st_src = os.lstat(src)
st_dst = os.lstat(dst)
assert st_src.st_size == st_dst.st_size
match (file_type := stat.S_IFMT(st_src.st_mode)):
case stat.S_IFREG | stat.S_IFDIR:
pass
# validate we set owner / group when requested
op = eq if flags & copy.CopyFlags.OWNER else ne
assert op(st_src.st_uid, st_dst.st_uid)
assert op(st_src.st_gid, st_dst.st_gid)
# validate we preserve file mode when requested
op = eq if flags & copy.CopyFlags.PERMISSIONS else ne
assert op(st_src.st_mode, st_dst.st_mode)
# validate we preserve timestamps when requested
op = eq if flags & copy.CopyFlags.TIMESTAMPS else ne
# checking mtime is sufficient. Atime in test runner
# is enabled and so it will get reset on source when
# we're copying data around.
assert op(st_src.st_mtime_ns, st_dst.st_mtime_ns)
case stat.S_IFLNK:
src_tgt = os.readlink(src)
dst_tgt = os.readlink(dst)
assert eq(src_tgt, dst_tgt)
return
case _:
raise ValueError(f'{src}: unexpected file type: {file_type}')
# validate we set owner / group when requested
op = eq if flags & copy.CopyFlags.OWNER else ne
assert op(st_src.st_uid, st_dst.st_uid)
assert op(st_src.st_gid, st_dst.st_gid)
# validate we preserve file mode when requested
op = eq if flags & copy.CopyFlags.PERMISSIONS else ne
assert op(st_src.st_mode, st_dst.st_mode)
# validate we preserve timestamps when requested
# NOTE: futimens on linux only allows setting atime + mtime
op = eq if flags & copy.CopyFlags.TIMESTAMPS else ne
assert op(st_src.st_mtime_ns, st_dst.st_mtime_ns)
def validate_xattrs(
src: str,
dst: str,
flags: copy.CopyFlags
) -> None:
if stat.S_ISLNK(os.lstat(src).st_mode):
# Nothing to do since we don't follow symlinks
return
xat_src = os.listxattr(src)
xat_dst = os.listxattr(dst)
if flags & copy.CopyFlags.XATTRS:
assert len(xat_src) > 0
assert len(xat_dst) > 0
assert xat_src == xat_dst
for xat_name in xat_src:
xat_data_src = os.getxattr(src, xat_name)
xat_data_dst = os.getxattr(dst, xat_name)
assert len(xat_data_src) > 0
assert xat_data_src == xat_data_dst
else:
assert len(xat_src) > 0
assert len(xat_dst) == 0
def validate_data(
src: str,
dst: str,
flags: copy.CopyFlags
) -> None:
match (file_type := stat.S_IFMT(os.lstat(src).st_mode)):
case stat.S_IFLNK:
# readlink performed in validate_attributes
return
case stat.S_IFDIR:
assert os.listdir(src) == os.listdir(dst)
return
case stat.S_IFREG:
# validation performed below
pass
case _:
raise ValueError(f'{src}: unexpected file type: {file_type}')
with open(src, 'rb') as f:
src_data = f.read()
with open(dst, 'rb') as f:
dst_data = f.read()
assert src_data == dst_data
def validate_the_things(
src: str,
dst: str,
flags: copy.CopyFlags
) -> None:
for fn in (validate_data, validate_xattrs, validate_attributes):
fn(src, dst, flags)
def validate_copy_tree(
src: str,
dst: str,
flags: copy.CopyFlags
):
with os.scandir(src) as it:
for f in it:
if f.name == 'CHILD':
# skip validation of bind mountpoint
continue
new_src = os.path.join(src, f.name)
new_dst = os.path.join(dst, f.name)
validate_the_things(new_src, new_dst, flags)
if f.is_dir() and not f.is_symlink():
validate_copy_tree(new_src, new_dst, flags)
validate_the_things(src, dst, flags)
def test__copytree_default(directory_for_test, fd_count):
""" test basic behavior of copytree """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
config = copy.CopyTreeConfig()
assert config.flags == copy.DEF_CP_FLAGS
stats = copy.copytree(src, dst, config)
validate_copy_tree(src, dst, config.flags)
assert stats.files != 0
assert stats.dirs != 0
assert stats.symlinks != 0
assert get_fd_count() == fd_count
@pytest.mark.parametrize('is_ctldir', [True, False])
def test__copytree_exclude_ctldir(directory_for_test, fd_count, is_ctldir):
""" test that we do not recurse into ZFS ctldir """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
snapdir = os.path.join(src, '.zfs', 'snapshot', 'now')
os.makedirs(snapdir)
with open(os.path.join(snapdir, 'canary'), 'w'):
pass
if is_ctldir:
# Mock over method to determine whether path is in actual .zfs
with patch(
'middlewared.utils.filesystem.copy.path_in_ctldir', Mock(
return_value=True
)
):
copy.copytree(src, dst, copy.CopyTreeConfig())
# We should automatically exclude a real .zfs directory
assert not os.path.exists(os.path.join(dst, '.zfs'))
else:
# This .zfs directory does not have special inode number
# and so we know we can copy it.
copy.copytree(src, dst, copy.CopyTreeConfig())
assert os.path.exists(os.path.join(dst, '.zfs'))
@pytest.mark.parametrize('existok', [True, False])
def test__copytree_existok(directory_for_test, fd_count, existok):
""" test behavior of `exist_ok` configuration option """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
config = copy.CopyTreeConfig(exist_ok=existok)
os.mkdir(dst)
if existok:
copy.copytree(src, dst, config)
validate_copy_tree(src, dst, config.flags)
else:
with pytest.raises(FileExistsError):
copy.copytree(src, dst, config)
assert get_fd_count() == fd_count
@pytest.mark.parametrize('flag', [
copy.CopyFlags.XATTRS,
copy.CopyFlags.PERMISSIONS,
copy.CopyFlags.TIMESTAMPS,
copy.CopyFlags.OWNER
])
def test__copytree_flags(directory_for_test, fd_count, flag):
"""
copytree allows user to specify what types of metadata to
preserve on copy similar to robocopy on Windows. This tests
that setting individual flags results in copy of _only_
the specified metadata.
"""
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
copy.copytree(src, dst, copy.CopyTreeConfig(flags=flag))
validate_copy_tree(src, dst, flag)
assert get_fd_count() == fd_count
def test__force_userspace_copy(directory_for_test, fd_count):
""" force use of shutil.copyfileobj wrapper instead of copy_file_range """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
flags = copy.DEF_CP_FLAGS
copy.copytree(src, dst, copy.CopyTreeConfig(flags=flags, op=copy.CopyTreeOp.USERSPACE))
validate_copy_tree(src, dst, flags)
assert get_fd_count() == fd_count
def test__copytree_into_itself_simple(directory_for_test, fd_count):
""" perform a basic copy of a tree into a subdirectory of itself.
This simulates case where user has mistakenly set homedir to FOO
and performs an update of homedir to switch it to FOO/username.
If logic breaks then we'll end up with this test failing due to
infinite recursion.
"""
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'SOURCE', 'DEST')
copy.copytree(src, dst, copy.CopyTreeConfig())
assert not os.path.exists(os.path.join(directory_for_test, 'SOURCE', 'DEST', 'DEST'))
assert get_fd_count() == fd_count
def test__copytree_into_itself_complex(directory_for_test, fd_count):
""" check recursion guard against deeper nested target """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'SOURCE', 'FOO', 'BAR', 'DEST')
os.makedirs(os.path.join(directory_for_test, 'SOURCE', 'FOO', 'BAR'))
copy.copytree(src, dst, copy.CopyTreeConfig())
# we expect to copy everything up to the point where we'd start infinite
# recursion
assert os.path.exists(os.path.join(dst, 'FOO', 'BAR'))
# but not quite get there
assert not os.path.exists(os.path.join(dst, 'FOO', 'BAR', 'DEST'))
assert get_fd_count() == fd_count
def test__copytree_job_log(directory_for_test, fd_count):
""" check that providing job object causes progress to be written properly """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
job = Job()
config = copy.CopyTreeConfig(job=job, job_msg_inc=1)
copy.copytree(src, dst, config)
assert job.progress == 100
assert len(job.log) > 0
last = job.log[-1]
assert last.startswith('Successfully copied')
def test__copytree_job_log_prefix(directory_for_test, fd_count):
""" check that log message prefix gets written as expected """
src = os.path.join(directory_for_test, 'SOURCE')
dst = os.path.join(directory_for_test, 'DEST')
job = Job()
config = copy.CopyTreeConfig(job=job, job_msg_inc=1, job_msg_prefix='Canary: ')
copy.copytree(src, dst, config)
assert job.progress == 100
assert len(job.log) > 0
last = job.log[-1]
assert last.startswith('Canary: Successfully copied')
def test__clone_file_somewhat_large(tmpdir):
src_fd = os.open(os.path.join(tmpdir, 'test_large_clone_src'), os.O_CREAT | os.O_RDWR)
dst_fd = os.open(os.path.join(tmpdir, 'test_large_clone_dst'), os.O_CREAT | os.O_RDWR)
chunk_sz = 1024 ** 2
try:
for i in range(0, 128):
payload = random.randbytes(chunk_sz)
os.pwrite(src_fd, payload, i * chunk_sz)
copy.clone_file(src_fd, dst_fd)
for i in range(0, 128):
src = os.pread(src_fd, chunk_sz, i * chunk_sz)
dst = os.pread(dst_fd, chunk_sz, i * chunk_sz)
assert src == dst
finally:
os.close(src_fd)
os.close(dst_fd)
os.unlink(os.path.join(tmpdir, 'test_large_clone_src'))
os.unlink(os.path.join(tmpdir, 'test_large_clone_dst'))
def test__copy_default_fallthrough(tmpdir):
""" verify we can fallthrough from CLONE to USERSPACE """
src_fd = os.open(os.path.join(tmpdir, 'test_default_fallthrough_src'), os.O_CREAT | os.O_RDWR)
dst_fd = os.open(os.path.join(tmpdir, 'test_default_fallthrough_dst'), os.O_CREAT | os.O_RDWR)
chunk_sz = 1024 ** 2
try:
for i in range(0, 128):
payload = random.randbytes(chunk_sz)
os.pwrite(src_fd, payload, i * chunk_sz)
# return value of 0 triggers fallthrough code
with patch('os.sendfile', Mock(return_value=0)):
# raising EXDEV triggers clone fallthrough
with patch('middlewared.utils.filesystem.copy.clone_file', Mock(side_effect=OSError(errno.EXDEV, 'MOCK'))):
copy.clone_or_copy_file(src_fd, dst_fd)
for i in range(0, 128):
src = os.pread(src_fd, chunk_sz, i * chunk_sz)
dst = os.pread(dst_fd, chunk_sz, i * chunk_sz)
assert src == dst
finally:
os.close(src_fd)
os.close(dst_fd)
os.unlink(os.path.join(tmpdir, 'test_default_fallthrough_src'))
os.unlink(os.path.join(tmpdir, 'test_default_fallthrough_dst'))
def test__copy_sendfile_fallthrough(tmpdir):
""" verify that fallthrough to userspace copy from copy_sendfile works """
src_fd = os.open(os.path.join(tmpdir, 'test_sendfile_fallthrough_src'), os.O_CREAT | os.O_RDWR)
dst_fd = os.open(os.path.join(tmpdir, 'test_sendfile_fallthrough_dst'), os.O_CREAT | os.O_RDWR)
chunk_sz = 1024 ** 2
try:
for i in range(0, 128):
payload = random.randbytes(chunk_sz)
os.pwrite(src_fd, payload, i * chunk_sz)
# return value of 0 triggers fallthrough code
with patch('os.sendfile', Mock(return_value=0)):
copy.copy_sendfile(src_fd, dst_fd)
for i in range(0, 128):
src = os.pread(src_fd, chunk_sz, i * chunk_sz)
dst = os.pread(dst_fd, chunk_sz, i * chunk_sz)
assert src == dst
finally:
os.close(src_fd)
os.close(dst_fd)
os.unlink(os.path.join(tmpdir, 'test_sendfile_fallthrough_src'))
os.unlink(os.path.join(tmpdir, 'test_sendfile_fallthrough_dst'))
def test__copy_sendfile(tmpdir):
""" verify that copy.sendfile preserves file data and does not by default fallthrogh to userspace """
src_fd = os.open(os.path.join(tmpdir, 'test_large_sendfile_src'), os.O_CREAT | os.O_RDWR)
dst_fd = os.open(os.path.join(tmpdir, 'test_large_sendfile_dst'), os.O_CREAT | os.O_RDWR)
chunk_sz = 1024 ** 2
try:
for i in range(0, 128):
payload = random.randbytes(chunk_sz)
os.pwrite(src_fd, payload, i * chunk_sz)
with patch(
'middlewared.utils.filesystem.copy.copy_file_userspace', Mock(
side_effect=Exception('Unexpected fallthrough to copy_userspace')
)
):
copy.copy_sendfile(src_fd, dst_fd)
for i in range(0, 128):
src = os.pread(src_fd, chunk_sz, i * chunk_sz)
dst = os.pread(dst_fd, chunk_sz, i * chunk_sz)
assert src == dst
finally:
os.close(src_fd)
os.close(dst_fd)
os.unlink(os.path.join(tmpdir, 'test_large_sendfile_src'))
os.unlink(os.path.join(tmpdir, 'test_large_sendfile_dst'))
| 17,721 | Python | .py | 417 | 35.266187 | 119 | 0.642791 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,111 | test_statx.py | truenas_middleware/tests/unit/test_statx.py | import os
import pytest
import stat
from middlewared.utils.filesystem import stat_x as sx
from middlewared.utils.filesystem.utils import timespec_convert_float
BASIC_STAT_ATTRS = [
'MODE',
'UID',
'GID',
'ATIME',
'MTIME',
'CTIME',
'DEV',
'INO',
'SIZE',
'BLOCKS',
'BLKSIZE',
'NLINK',
]
def do_stat(filename, isdir):
if not isdir:
with open(filename, "w"):
pass
return (os.stat(filename), sx.statx(filename))
def validate_stat(stat_prop, st1, st2):
match stat_prop:
case 'MODE':
assert st1.st_mode == st2.stx_mode
case 'UID':
assert st1.st_uid == st2.stx_uid
case 'GID':
assert st1.st_gid == st2.stx_gid
case 'ATIME':
assert st1.st_atime == timespec_convert_float(st2.stx_atime)
case 'MTIME':
assert st1.st_mtime == timespec_convert_float(st2.stx_mtime)
case 'CTIME':
assert st1.st_ctime == timespec_convert_float(st2.stx_ctime)
case 'INO':
assert st1.st_ino == st2.stx_ino
case 'DEV':
assert st1.st_dev == os.makedev(st2.stx_dev_major, st2.stx_dev_minor)
case 'BLOCKS':
assert st1.st_blocks == st2.stx_blocks
case 'BLKSIZE':
assert st1.st_blksize == st2.stx_blksize
case 'NLINK':
assert st1.st_nlink == st2.stx_nlink
case 'SIZE':
assert st1.st_size == st2.stx_size
case _:
raise ValueError(f'{stat_prop}: unknown stat property')
@pytest.mark.parametrize('stat_prop', BASIC_STAT_ATTRS)
def test__check_statx_vs_stat_file(tmpdir, stat_prop):
st1, st2 = do_stat(os.path.join(tmpdir, 'testfile'), False)
validate_stat(stat_prop, st1, st2)
@pytest.mark.parametrize('stat_prop', BASIC_STAT_ATTRS)
def test__check_statx_vs_stat_dir(tmpdir, stat_prop):
st1, st2 = do_stat(str(tmpdir), True)
validate_stat(stat_prop, st1, st2)
def test__check_dirfd(tmpdir):
testfile = os.path.join(tmpdir, 'testfile')
with open(testfile, 'w'):
pass
stx1 = sx.statx(testfile)
try:
dirfd = os.open(tmpdir, os.O_PATH)
stx2 = sx.statx('testfile', dir_fd=dirfd)
finally:
os.close(dirfd)
assert stx1.stx_ino == stx2.stx_ino
def test__check_statx_empty_path(tmpdir):
# test fstat equivalent via statx interface
testfile = os.path.join(tmpdir, 'testfile')
with open(testfile, 'w'):
pass
stx1 = sx.statx(testfile)
try:
fd = os.open(testfile, os.O_PATH)
stx2 = sx.statx('', dir_fd=fd, flags=sx.ATFlags.EMPTY_PATH.value)
finally:
os.close(fd)
assert stx1.stx_ino == stx2.stx_ino
| 2,733 | Python | .py | 83 | 26 | 81 | 0.613688 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,112 | test_fenced.py | truenas_middleware/tests/unit/test_fenced.py | import pytest
from fenced.main import parse_ed
from fenced.utils import load_disks_impl
@pytest.mark.parametrize(
"exclude,expected",
[
("sda,", ("sda",)),
("", ()),
("sda,sdb", ("sda", "sdb")),
("sda, sdb", ("sda", "sdb")),
("sda,sdb sdc", ("sda", "sdb", "sdc")),
("sda sdb sdc", ("sda", "sdb", "sdc")),
("sda sdb sdc", ("sda", "sdb", "sdc")),
],
)
def test_parse_ed(exclude, expected):
assert parse_ed(exclude) == expected
@pytest.mark.parametrize("exclude", [tuple(), ("sda"), ("sda,sdb")])
def test_load_disks(exclude):
"""We need to make sure that fenced always enumerates
a list of disks."""
disks = load_disks_impl(exclude)
assert disks
for disk in exclude:
assert disk not in disks
| 805 | Python | .py | 25 | 27 | 68 | 0.569032 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,113 | test_groupmap.py | truenas_middleware/tests/unit/test_groupmap.py | import os
import pytest
from middlewared.plugins.smb_.util_groupmap import (
insert_groupmap_entries,
delete_groupmap_entry,
list_foreign_group_memberships,
query_groupmap_entries,
SMBGroupMap,
SMBGroupMembership,
GroupmapEntryType,
GroupmapFile,
)
from middlewared.service_exception import MatchNotFound
from middlewared.utils.sid import (
lsa_sidtype,
random_sid
)
from middlewared.utils.tdb import close_sysdataset_tdb_handles
@pytest.fixture(scope='module')
def groupmap_dir():
os.makedirs('/var/db/system/samba4', exist_ok=True)
try:
# pre-emptively delete in case we're running on a TrueNAS VM
os.unlink('/var/db/system/samba4/group_mapping.tdb')
except FileNotFoundError:
pass
@pytest.fixture(scope='module')
def local_sid():
try:
yield random_sid()
finally:
# cleanup our tdb handles
close_sysdataset_tdb_handles()
def test__insert_groupmap(groupmap_dir, local_sid):
""" Test that we can properly insert and retrieve UNIXGROUP TDB entries """
entries = [
SMBGroupMap(
sid=f'{local_sid}-2000010',
gid=3000,
sid_type=lsa_sidtype.ALIAS,
name='bob',
comment=''
),
SMBGroupMap(
sid=f'{local_sid}-2000011',
gid=3001,
sid_type=lsa_sidtype.ALIAS,
name='larry',
comment=''
)
]
insert_groupmap_entries(GroupmapFile.DEFAULT, entries)
bob = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name],
['name', '=', 'bob']
], {'get': True})
assert bob['sid'] == f'{local_sid}-2000010'
assert bob['gid'] == 3000
assert bob['sid_type'] == lsa_sidtype.ALIAS
assert bob['name'] == 'bob'
assert bob['comment'] == ''
larry = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name],
['name', '=', 'larry']
], {'get': True})
assert larry['sid'] == f'{local_sid}-2000011'
assert larry['gid'] == 3001
assert larry['sid_type'] == lsa_sidtype.ALIAS
assert larry['name'] == 'larry'
assert larry['comment'] == ''
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.GROUP_MAPPING,
f'{local_sid}-2000010'
)
entry = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name],
], {'get': True})
assert entry['name'] == 'larry'
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.GROUP_MAPPING,
f'{local_sid}-2000011'
)
with pytest.raises(MatchNotFound):
query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name],
['name', '=', 'larry']
], {'get': True})
groupmaps = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name],
], {})
assert len(groupmaps) == 0
def test__insert_group_membership(groupmap_dir, local_sid):
""" test that we can insert, retrive, and delete MEMBEROF TDB entries """
# Create mutiple entries that are members of same set of groups
# so that we can test reverse lookups.
entries = [
SMBGroupMembership(
sid=f'{local_sid}-2000010',
groups=('S-1-5-32-544',)
),
SMBGroupMembership(
sid=f'{local_sid}-2000011',
groups=('S-1-5-32-544',)
),
SMBGroupMembership(
sid=f'{local_sid}-2000012',
groups=('S-1-5-32-545',)
),
SMBGroupMembership(
sid=f'{local_sid}-2000013',
groups=('S-1-5-32-545',)
),
]
# Validate we can set multiple entries
insert_groupmap_entries(GroupmapFile.DEFAULT, entries)
res = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.MEMBERSHIP.name],
], {})
for entry in res:
# Validate that the values are associated with expected keys
if entry['sid'] in (f'{local_sid}-2000010', f'{local_sid}-2000011'):
assert set(entry['groups']) == {'S-1-5-32-544'}
elif entry['sid'] in (f'{local_sid}-2000012', f'{local_sid}-2000013'):
assert set(entry['groups']) == {'S-1-5-32-545'}
else:
raise ValueError(f'Unexpected entry: {entry}')
# validate that the reverse lookups by SID also work correctly and return
# expected set of SIDs.
res = list_foreign_group_memberships(GroupmapFile.DEFAULT, 'S-1-5-32-544')
assert set(res) == {f'{local_sid}-2000010', f'{local_sid}-2000011'}
res = list_foreign_group_memberships(GroupmapFile.DEFAULT, 'S-1-5-32-545')
assert set(res) == {f'{local_sid}-2000012', f'{local_sid}-2000013'}
# Validate that deleting MEMBEROF entries works correctly
for entry in entries:
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.MEMBERSHIP,
entry.sid
)
with pytest.raises(MatchNotFound):
query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.MEMBERSHIP.name],
['sid', '=', entry.sid]
], {'get': True})
entries = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.MEMBERSHIP.name],
], {})
assert len(entries) == 0, str(entries)
| 5,599 | Python | .py | 150 | 29.773333 | 79 | 0.618406 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,114 | test_pam_tdb.py | truenas_middleware/tests/unit/test_pam_tdb.py | import os
import pam
import pwd
import pytest
import tdb
from collections.abc import Generator
from contextlib import contextmanager
from middlewared.utils import crypto
from middlewared.utils import user_api_key
from time import monotonic
EXPIRED_TS = 1
BASE_ID = 1325
LEGACY_ENTRY_KEY = 'rtpz6u16l42XJJGy5KMJOVfkiQH7CyitaoplXy7TqFTmY7zHqaPXuA1ob07B9bcB'
LEGACY_ENTRY_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
INVALID_HASH_TYPE = '$pbkdf2-canary$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
INVALID_SALT = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc0*g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
INVALID_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4a*I'
MISSING_SALT = '$pbkdf2-sha256$29000$$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
MISSING_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$'
EMPTY_HASH_STRING = ''
PAM_DIR = '/etc/pam.d'
PAM_FILE = 'middleware-api-key'
PAM_AUTH_LINE = 'auth [success=1 default=die] pam_tdb.so debug '
PAM_FAIL_DELAY = 1 # pam_fail_delay is 2s, but we need a little wiggle-room
PAM_FILE_REMAINING_CONTENTS = """
@include common-auth-unix
@include common-account
password required pam_deny.so
session required pam_deny.so
"""
def write_tdb_file(
username: str,
hashlist: list[str],
expired: bool = False
) -> int:
"""
Generate a tdb file based on the specified parameters
The resulting TDB will have one entry for `username` and
a varying amount of hashes.
Although each hash supports a separate expiry, we are only
concerned in these tests expired hashes generate PAM_AUTH_ERR
as expected.
"""
keys = []
idx = 0
for idx, thehash in enumerate(hashlist):
keys.append(user_api_key.UserApiKey(
userhash=thehash,
dbid=BASE_ID + idx,
expiry=EXPIRED_TS if expired else 0
))
entry = user_api_key.PamTdbEntry(username=username, keys=keys)
user_api_key.flush_user_api_keys([entry])
return BASE_ID + idx
def truncate_tdb_file(username: str) -> None:
"""
Truncate tdb entry to make pascal string point off end of buffer
If this sets PAM_AUTH_ERR then we need to look closely to make
sure we don't have parser issues in pam_tdb.c
"""
hdl = tdb.open(user_api_key.PAM_TDB_FILE)
try:
data = hdl.get(username.encode())
hdl.store(username.encode(), data[0:len(data) - 5])
finally:
hdl.close()
def make_tdb_garbage(username: str) -> None:
""" fill entry with non-api-key data """
hdl = tdb.open(user_api_key.PAM_TDB_FILE)
try:
hdl.get(username.encode())
hdl.store(username.encode(), b'meow')
finally:
hdl.close()
def make_null_tdb_entry(username: str) -> None:
""" throw some nulls into the mix for fun """
hdl = tdb.open(user_api_key.PAM_TDB_FILE)
try:
hdl.get(username.encode())
hdl.store(username.encode(), b'\x00' * 128)
finally:
hdl.close()
@contextmanager
def pam_service(
file_name: str = PAM_FILE,
admin_user: str | None = None,
) -> Generator[str, None, None]:
""" Create a pam service file for pam_tdb.so """
auth_entry = PAM_AUTH_LINE
if admin_user:
auth_entry += f'truenas_admin={admin_user}'
pam_service_path = os.path.join(PAM_DIR, file_name)
with open(pam_service_path, 'w') as f:
f.write(auth_entry)
f.write(PAM_FILE_REMAINING_CONTENTS)
f.flush()
try:
yield file_name
finally:
os.remove(pam_service_path)
@contextmanager
def fail_delay() -> Generator[None, None, None]:
"""
Assert if failure case finishes faster than our expected fail delay, which
is an amount of time randomly distributed (by up to 25%) about the longest
value set. In our case, this is 2 seconds and so the can be anywhere
between 1.5 seconds and 2.5 seconds. We are only concerned in these tests
about the lower margin to ensure that we aren't immediately failing. If
we don't insert a failure delay then this may introduce vulnerability to
timing attacks on passwords.
"""
now = monotonic()
yield
elapsed = monotonic() - now
assert elapsed > PAM_FAIL_DELAY
@pytest.fixture(scope='module')
def current_username():
""" for simplicity sake we'll test against current user """
return pwd.getpwuid(os.geteuid()).pw_name
def test_unknown_user(current_username):
"""
A user without an entry in the file should fail with appropriate error
and generate a fail delay
"""
db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH])
with pam_service(admin_user=current_username) as svc:
p = pam.pam()
with fail_delay():
authd = p.authenticate('canary', f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc)
assert authd is False
assert p.code == pam.PAM_USER_UNKNOWN
def test_legacy_auth_admin(current_username):
""" This should succeed for specified admin user """
db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH])
with pam_service(admin_user=current_username) as svc:
p = pam.pam()
authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc)
assert authd is True
assert p.code == pam.PAM_SUCCESS
with fail_delay():
# attempt to authenticate with invalid key should trigger a fail delay
authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY[0:-1]}', service=svc)
assert authd is False
assert p.code == pam.PAM_AUTH_ERR
def test_legacy_auth_admin_expired_key(current_username):
""" Verify that an expired key results in PAM_CRED_EXPIRED """
db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH], True)
with pam_service(admin_user=current_username) as svc:
p = pam.pam()
authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc)
assert authd is False
assert p.code == pam.PAM_CRED_EXPIRED
def test_legacy_auth_non_admin(current_username):
""" Test that legacy hash doesn't work for non-admin user
We really want to deprecate these legacy keys.
"""
write_tdb_file(current_username, [LEGACY_ENTRY_HASH])
with pam_service() as svc:
with fail_delay():
p = pam.pam()
authd = p.authenticate(current_username, LEGACY_ENTRY_KEY, service=svc)
assert authd is False
assert p.code == pam.PAM_AUTH_ERR
def test_legacy_auth_multiple_entries(current_username):
""" verify last entry in hash list can be used to auth
We allow multiple keys per user. Ensure that we can use more than the
first key.
"""
hashes = [crypto.generate_pbkdf2_512('canary') for i in range(0, 5)]
hashes.append(LEGACY_ENTRY_HASH)
db_id = write_tdb_file(current_username, hashes)
with pam_service(admin_user=current_username) as svc:
p = pam.pam()
authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc)
assert authd is True
assert p.code == pam.PAM_SUCCESS
def test_new_auth(current_username):
""" verify that that new hash works as expected """
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)])
with pam_service() as svc:
p = pam.pam()
# verify that using correct key succeeds
authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc)
assert authd is True
assert p.code == pam.PAM_SUCCESS
# verify that using incorrect key fails
with fail_delay():
authd = p.authenticate(current_username, f'{db_id}-{key[0:-1]}', service=svc)
assert authd is False
assert p.code == pam.PAM_AUTH_ERR
def test_new_auth_truncated_password(current_username):
""" Verify that truncated password generates auth error """
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)])
with pam_service() as svc:
p = pam.pam()
with fail_delay():
authd = p.authenticate(current_username, f'{db_id}-', service=svc)
assert authd is False
assert p.code == pam.PAM_AUTH_ERR
def test_new_auth_multi(current_username):
""" verify that second key works with newer hash """
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [
LEGACY_ENTRY_HASH,
crypto.generate_pbkdf2_512(key)
])
with pam_service() as svc:
p = pam.pam()
# verify that using correct key succeeds
authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc)
assert authd is True
assert p.code == pam.PAM_SUCCESS
# verify that using incorrect key fails
with fail_delay():
authd = p.authenticate(current_username, f'{db_id}-{key[0:-1]}', service=svc)
assert authd is False
assert p.code == pam.PAM_AUTH_ERR
def test_new_auth_timeout(current_username):
""" verify that valid but expired key denies auth with expected error code """
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)], True)
with pam_service() as svc:
p = pam.pam()
with fail_delay():
authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc)
assert authd is False
assert p.code == pam.PAM_CRED_EXPIRED
def test_unsupported_service_file_name(current_username):
""" pam_tdb has strict check that it can't be used for other services """
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)])
with pam_service(file_name='canary') as svc:
p = pam.pam()
# verify that using correct key succeeds
authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc)
assert authd is False
assert p.code == pam.PAM_SYSTEM_ERR
@pytest.mark.parametrize('thehash,pam_error', [
(INVALID_HASH_TYPE, pam.PAM_AUTH_ERR),
(INVALID_SALT, pam.PAM_AUTH_ERR),
(INVALID_HASH, pam.PAM_AUTH_ERR),
(MISSING_SALT, pam.PAM_AUTH_ERR),
(MISSING_HASH, pam.PAM_AUTH_ERR),
(EMPTY_HASH_STRING, pam.PAM_AUTHINFO_UNAVAIL),
])
def test_invalid_hash(current_username, thehash, pam_error):
""" Check that variations of broken hash entries generate expected error """
db_id = write_tdb_file(current_username, [thehash])
with pam_service(admin_user=current_username) as svc:
p = pam.pam()
# verify that using correct key succeeds
authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc)
assert authd is False
assert p.code == pam_error
@pytest.mark.parametrize('fuzz_fn', [
truncate_tdb_file,
make_tdb_garbage,
make_null_tdb_entry,
])
def test_invalid_tdb_data(current_username, fuzz_fn):
""" verify we detect garbage tdb entry and flag for reinit"""
key = crypto.generate_string(string_size=64)
db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)], True)
fuzz_fn(current_username)
with pam_service() as svc:
p = pam.pam()
authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc)
assert authd is False
assert p.code == pam.PAM_AUTHINFO_UNAVAIL
| 11,711 | Python | .py | 271 | 37.103321 | 109 | 0.678995 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,115 | test_cloud_sync.py | truenas_middleware/tests/api2/test_cloud_sync.py | import re
import time
import pytest
from middlewared.test.integration.assets.cloud_sync import (
credential, task, local_ftp_credential, local_ftp_task, run_task,
)
from middlewared.test.integration.assets.ftp import anonymous_ftp_server, ftp_server_with_user_account
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, pool, ssh
from middlewared.test.integration.utils.client import truenas_server
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import ha
def test_include():
with local_ftp_task({
"include": ["/office/**", "/work/**"],
}) as task:
ssh(f'mkdir {task["path"]}/office')
ssh(f'touch {task["path"]}/office/paper')
ssh(f'mkdir {task["path"]}/work')
ssh(f'touch {task["path"]}/work/code')
ssh(f'mkdir {task["path"]}/games')
ssh(f'touch {task["path"]}/games/minecraft')
ssh(f'touch {task["path"]}/fun')
run_task(task)
assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'office\nwork\n'
def test_exclude_recycle_bin():
with local_ftp_task({
"exclude": ["$RECYCLE.BIN/"],
}) as task:
ssh(f'mkdir {task["path"]}/\'$RECYCLE.BIN\'')
ssh(f'touch {task["path"]}/\'$RECYCLE.BIN\'/garbage')
ssh(f'touch {task["path"]}/file')
run_task(task)
assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'file\n'
@pytest.mark.flaky(reruns=5, reruns_delay=5)
@pytest.mark.parametrize("anonymous", [True, False])
@pytest.mark.parametrize("defaultroot", [True, False])
@pytest.mark.parametrize("has_leading_slash", [True, False])
def test_ftp_subfolder(anonymous, defaultroot, has_leading_slash):
with dataset("cloudsync_local") as local_dataset:
config = {"defaultroot": defaultroot}
with (anonymous_ftp_server if anonymous else ftp_server_with_user_account)(config) as ftp:
remote_dataset = ftp.dataset
ssh(f"touch /mnt/{remote_dataset}/bad-file")
ssh(f"mkdir /mnt/{remote_dataset}/data")
ssh(f"touch /mnt/{remote_dataset}/data/another-bad-file")
ssh(f"mkdir /mnt/{remote_dataset}/data/child")
ssh(f"touch /mnt/{remote_dataset}/data/child/good-file")
with credential({
"name": "Test",
"provider": "FTP",
"attributes": {
"host": "localhost",
"port": 21,
"user": ftp.username,
"pass": ftp.password,
},
}) as c:
folder = f"{'/' if has_leading_slash else ''}data/child"
if not anonymous and not defaultroot:
# We have access to the FTP server root directory
if has_leading_slash:
# A path with a leading slash should be complete path in this case
folder = f"/mnt/{ftp.dataset}/data/child"
with task({
"direction": "PULL",
"transfer_mode": "MOVE",
"path": f"/mnt/{local_dataset}",
"credentials": c["id"],
"attributes": {
"folder": folder,
},
}) as t:
run_task(t)
assert ssh(f'ls /mnt/{local_dataset}') == 'good-file\n'
@pytest.mark.parametrize("has_zvol_sibling", [True, False])
def test_snapshot(has_zvol_sibling):
with dataset("test_cloudsync_snapshot") as ds:
ssh(f"mkdir -p /mnt/{ds}/dir1/dir2")
ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1")
if has_zvol_sibling:
ssh(f"zfs create -V 1gb {pool}/zvol")
try:
with local_ftp_task({
"path": f"/mnt/{ds}/dir1/dir2",
"bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}], # So it'll take 5 seconds
"snapshot": True,
}) as task:
job_id = call("cloudsync.sync", task["id"])
time.sleep(2.5)
ps_ax = ssh("ps ax | grep rclone")
call("core.job_wait", job_id, job=True)
assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax)
time.sleep(1)
assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == []
finally:
if has_zvol_sibling:
ssh(f"zfs destroy -r {pool}/zvol")
def test_sync_onetime():
with dataset("cloudsync_local") as local_dataset:
with local_ftp_credential() as c:
call("cloudsync.sync_onetime", {
"direction": "PUSH",
"transfer_mode": "COPY",
"path": f"/mnt/{local_dataset}",
"credentials": c["id"],
"attributes": {
"folder": "",
},
}, job=True)
def test_abort():
with dataset("test_cloudsync_abort") as ds:
ssh(f"dd if=/dev/urandom of=/mnt/{ds}/blob bs=1M count=1")
with local_ftp_task({
"path": f"/mnt/{ds}",
"bwlimit": [{"time": "00:00", "bandwidth": 1024 * 100}], # So it'll take 10 seconds
}) as task:
job_id = call("cloudsync.sync", task["id"])
time.sleep(2.5)
call("core.job_abort", job_id)
for i in range(10):
time.sleep(1)
state = call("cloudsync.query", [["id", "=", task["id"]]], {"get": True})["job"]["state"]
if state == "RUNNING":
continue
elif state == "ABORTED":
break
else:
assert False, f"Cloud sync task is {state}"
else:
assert False, "Cloud sync task was not aborted"
assert "rclone" not in ssh("ps ax")
@pytest.mark.flaky(reruns=5, reruns_delay=5)
@pytest.mark.parametrize("create_empty_src_dirs", [True, False])
def test_create_empty_src_dirs(create_empty_src_dirs):
with dataset("cloudsync_local") as local_dataset:
ssh(f"mkdir /mnt/{local_dataset}/empty-dir")
ssh(f"mkdir /mnt/{local_dataset}/non-empty-dir")
ssh(f"touch /mnt/{local_dataset}/non-empty-dir/file")
with anonymous_ftp_server() as ftp:
with credential({
"name": "Test",
"provider": "FTP",
"attributes": {
"host": "localhost",
"port": 21,
"user": ftp.username,
"pass": ftp.password,
},
}) as c:
with task({
"direction": "PUSH",
"transfer_mode": "SYNC",
"path": f"/mnt/{local_dataset}",
"credentials": c["id"],
"attributes": {
"folder": "",
},
"create_empty_src_dirs": create_empty_src_dirs,
}) as t:
run_task(t)
if create_empty_src_dirs:
assert ssh(f'ls /mnt/{ftp.dataset}') == 'empty-dir\nnon-empty-dir\n'
else:
assert ssh(f'ls /mnt/{ftp.dataset}') == 'non-empty-dir\n'
def test_state_persist():
with dataset("test_cloudsync_state_persist") as ds:
with local_ftp_task({
"path": f"/mnt/{ds}",
}) as task:
call("cloudsync.sync", task["id"], job=True)
row = call("datastore.query", "tasks.cloudsync", [["id", "=", task["id"]]], {"get": True})
assert row["job"]["state"] == "SUCCESS"
if ha:
def test_state_failover():
assert call("failover.status") == "MASTER"
ha_ips = truenas_server.ha_ips()
with dataset("test_cloudsync_state_failover") as ds:
with local_ftp_task({"path": f"/mnt/{ds}"}) as task:
call("cloudsync.sync", task["id"], job=True)
time.sleep(5) # Job sending is not synchronous, allow it to propagate
file1_path = call("cloudsync.get_instance", task["id"])["job"]["logs_path"]
file1_contents = ssh(f'cat {file1_path}', ip=ha_ips['active'])
assert file1_contents
file2_path = call("failover.call_remote", "cloudsync.get_instance", [task["id"]])["job"]["logs_path"]
file2_contents = ssh(f'cat {file2_path}', ip=ha_ips['standby'])
assert file2_contents
assert file1_contents == file2_contents
| 8,740 | Python | .py | 191 | 33.010471 | 117 | 0.524997 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,116 | test_dataset_encryption_keys_in_replication.py | truenas_middleware/tests/api2/test_dataset_encryption_keys_in_replication.py | import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.replication import replication_task
from middlewared.test.integration.utils import call
BASE_REPLICATION = {
'direction': 'PUSH',
'transport': 'LOCAL',
'source_datasets': [],
'target_dataset': None,
'recursive': False,
'auto': False,
'retention_policy': 'NONE',
}
def encryption_props():
return {
'encryption_options': {'generate_key': True},
'encryption': True,
'inherit_encryption': False
}
def make_assertions(source_datasets, task_id, target_dataset, unlocked_datasets):
for source_ds in source_datasets:
call('zfs.snapshot.create', {'dataset': source_ds, 'name': 'snaptest-1', 'recursive': True})
call('replication.run', task_id, job=True)
keys = call('pool.dataset.export_keys_for_replication_internal', task_id)
unlocked_info = call(
'pool.dataset.unlock', target_dataset.split('/', 1)[0], {
'datasets': [{'name': name, 'key': key} for name, key in keys.items()],
'recursive': True,
}, job=True
)
assert set(unlocked_info['unlocked']) == set(unlocked_datasets), unlocked_info
def test_single_source_replication():
with dataset('source_test', encryption_props()) as src:
with dataset('parent_destination', encryption_props()) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [src],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
}) as task:
make_assertions([src], task['id'], dst, [dst])
def test_single_source_recursive_replication():
with dataset('source_test', encryption_props()) as src:
with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src:
with dataset('parent_destination', encryption_props()) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [src],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
'recursive': True,
}) as task:
make_assertions([src], task['id'], dst, [dst, f'{dst}/{child_src.rsplit("/", 1)[-1]}'])
def test_single_source_child_encrypted_replication():
with dataset('source_test', encryption_props()) as src:
with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src:
with dataset('parent_destination', encryption_props()) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [child_src],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
'recursive': True,
}) as task:
make_assertions([child_src], task['id'], dst, [dst])
def test_multiple_source_replication():
with dataset('source_test1', encryption_props()) as src1:
with dataset('source_test2', encryption_props()) as src2:
with dataset('parent_destination', encryption_props()) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [src1, src2],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
}) as task:
make_assertions(
[src1, src2], task['id'], dst, [f'{dst}/{k.rsplit("/", 1)[-1]}' for k in [src1, src2]]
)
def test_multiple_source_recursive_replication():
with dataset('source_test1', encryption_props()) as src1:
with dataset(f'{src1.rsplit("/", 1)[-1]}/child_source_test1', encryption_props()) as child_src1:
with dataset('source_test2', encryption_props()) as src2:
with dataset(f'{src2.rsplit("/", 1)[-1]}/child_source_test2', encryption_props()) as child_src2:
with dataset('parent_destination', encryption_props()) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [src1, src2],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
'recursive': True,
}) as task:
make_assertions(
[src1, src2], task['id'], dst, [
f'{dst}/{"/".join(k.rsplit("/")[-abs(n):])}' for k, n in [
(src1, 1), (src2, 1), (child_src1, 2), (child_src2, 2),
]
]
)
@pytest.mark.parametrize('keys_available_for_download', [False, True])
def test_replication_task_reports_keys_available_for_download(keys_available_for_download):
with dataset('source_test', encryption_props() if keys_available_for_download else {}) as src:
with dataset('parent_destination', encryption_props() if keys_available_for_download else {}) as parent_ds:
with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
with replication_task({
**BASE_REPLICATION,
'name': 'encryption_replication_test',
'source_datasets': [src],
'target_dataset': dst,
'name_regex': '.+',
'auto': False,
}) as task:
task = call(
'replication.get_instance', task['id'], {'extra': {'check_dataset_encryption_keys': True}}
)
assert task['has_encrypted_dataset_keys'] is keys_available_for_download, task
| 7,138 | Python | .py | 130 | 37.930769 | 115 | 0.507514 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,117 | test_pool_dataset_create.py | truenas_middleware/tests/api2/test_pool_dataset_create.py | import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
@pytest.mark.parametrize("child", ["a/b", "a/b/c"])
def test_pool_dataset_create_ancestors(child):
with dataset("ancestors_create_test") as test_ds:
name = f"{test_ds}/{child}"
call("pool.dataset.create", {"name": name, "create_ancestors": True})
call("pool.dataset.get_instance", name)
| 445 | Python | .py | 9 | 45 | 77 | 0.718245 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,118 | test_password_reset.py | truenas_middleware/tests/api2/test_password_reset.py | #!/usr/bin/env python3
import errno
import pytest
import secrets
import string
from middlewared.service_exception import CallError, ValidationErrors
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.account import unprivileged_user
from middlewared.test.integration.utils import call, client
from middlewared.test.integration.utils.audit import expect_audit_method_calls
TEST_USERNAME = 'testpasswduser'
TEST_USERNAME_2 = 'testpasswduser2'
TEST_GROUPNAME = 'testpasswdgroup'
TEST_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
TEST_PASSWORD_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
TEST_PASSWORD2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
TEST_PASSWORD2_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
REDACTED = '********'
def test_restricted_user_set_password():
with unprivileged_user(
username=TEST_USERNAME,
group_name=TEST_GROUPNAME,
privilege_name='TEST_PASSWD_RESET_PRIVILEGE',
allowlist=[],
web_shell=False,
roles=['READONLY_ADMIN']
) as acct:
with client(auth=(acct.username, acct.password)) as c:
payload = {
'username': acct.username,
'old_password': acct.password,
'new_password': TEST_PASSWORD
}
# Password reset using existing password and current user should work
with expect_audit_method_calls([{
'method': 'user.set_password',
'params': [{
'username': acct.username,
'old_password': REDACTED,
'new_password': REDACTED
}],
'description': f'Set account password {acct.username}',
}]):
c.call('user.set_password', payload)
# Should be able to create new client session with new password
with client(auth=(acct.username, TEST_PASSWORD)) as c2:
c2.call('auth.me')
# FULL_ADMIN privileges should also allow password reset:
call('user.set_password', {
'username': acct.username,
'old_password': TEST_PASSWORD,
'new_password': TEST_PASSWORD_2
})
# FULL_ADMIN should also be able to skip password checks
call('user.set_password', {
'username': acct.username,
'new_password': TEST_PASSWORD_2,
})
group_id = call('group.query', [['group', '=', TEST_GROUPNAME]], {'get': True})['id']
# Create additional user with READONLY privilege
with user({
'username': TEST_USERNAME_2,
'full_name': TEST_USERNAME_2,
'group_create': True,
'groups': [group_id],
'smb': False,
'password': TEST_PASSWORD2
}) as u:
with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2:
# Limited users should not be able to change other
# passwords of other users
with pytest.raises(CallError) as ve:
c2.call('user.set_password', {
'username': acct.username,
'old_password': TEST_PASSWORD_2,
'new_password': 'CANARY'
})
assert ve.value.errno == errno.EPERM
with pytest.raises(ValidationErrors) as ve:
# Limited users should not be able to skip password checks
c2.call('user.set_password', {
'username': TEST_USERNAME_2,
'new_password': 'CANARY',
})
with pytest.raises(ValidationErrors) as ve:
# Providing invalid old password for a limited user
# should raise an error
c2.call('user.set_password', {
'username': TEST_USERNAME_2,
'old_password': 'ANOTHER CANARY',
'new_password': 'CANARY',
})
call("user.update", u['id'], {'password_disabled': True})
with pytest.raises(ValidationErrors) as ve:
# This should fail because we've disabled password auth
call('user.set_password', {
'username': TEST_USERNAME_2,
'old_password': TEST_PASSWORD2,
'new_password': 'CANARY'
})
call("user.update", u['id'], {
'password_disabled': False,
'locked': True
})
with pytest.raises(ValidationErrors) as ve:
# This should fail because we've locked account
call('user.set_password', {
'username': TEST_USERNAME_2,
'old_password': TEST_PASSWORD2,
'new_password': 'CANARY'
})
call("user.update", u['id'], {
'password_disabled': False,
'locked': False
})
# Unlocking user should allow password reset to succeed
with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2:
c2.call('user.set_password', {
'username': TEST_USERNAME_2,
'old_password': TEST_PASSWORD2,
'new_password': TEST_PASSWORD2_2
})
| 5,607 | Python | .py | 122 | 32.360656 | 99 | 0.551775 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,119 | test_user_create_dir.py | truenas_middleware/tests/api2/test_user_create_dir.py | import errno
import os
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
DS_NAME = 'user-create-homedir'
@pytest.fixture(scope='function')
def setup_user():
with dataset(DS_NAME, data={'share_type': 'SMB'}) as ds:
with user({
'username': 'usercreate',
'full_name': 'usercreate',
'group_create': True,
'home': os.path.join('/mnt', ds),
'home_create': False,
'password': 'ABCD1234'
}) as u:
yield u | {'dataset': ds}
def test_create_homedir(setup_user):
""" This test validates we can set create a new homedir within the currently set homedir """
call('user.update', setup_user['id'], {
'home': setup_user['home'],
'home_create': True
})
new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
assert new['home'] == os.path.join(setup_user['home'], setup_user['username'])
# verify that we won't endlessly create new homedirs within existing one if a user
# is not very API / design savvy
call('user.update', setup_user['id'], {
'home': setup_user['home'],
'home_create': True
})
new2 = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
assert new2['home'] == new['home']
def test_user_change_homedir_no_traverse(setup_user):
""" we should not recurse into child datasets """
with dataset(f'{DS_NAME}/subds') as subds:
# Verify that new dataset exists in source
call('filesystem.listdir', setup_user['home'], [['name', '=', 'subds']], {'get': True})
with dataset('new-path', data={'share_type': 'SMB'}) as ds:
call('user.update', setup_user['id'], {
'home': os.path.join('/mnt', ds),
'home_create': True
})
new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
# Verify that we did not try to copy over the dataset
with pytest.raises(CallError) as ce:
call('filesystem.stat', os.path.join(new['home'], 'subds'))
assert ce.value.errno == errno.ENOENT
def test_user_change_homedir_no_zfs_ctldir(setup_user):
""" we should not recurse into / try to copy .zfs if snapdir visible """
call('pool.dataset.update', setup_user['dataset'], {'snapdir': 'VISIBLE'})
call('user.update', setup_user['id'], {
'home': setup_user['home'],
'home_create': True
})
new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
assert new['home'] == os.path.join(setup_user['home'], setup_user['username'])
with pytest.raises(CallError) as ce:
call('filesystem.stat', os.path.join(new['home'], '.zfs'))
assert ce.value.errno == errno.ENOENT
def test_user_change_homedir_acl_preserve(setup_user):
""" If for some reason files within homedir have ACL, it should be preserved on copy """
ACL = [{
'tag': 'owner@',
'id': -1,
'perms': {'BASIC': 'FULL_CONTROL'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
}]
call('filesystem.mkdir', {'path': os.path.join(setup_user['home'], 'canary')})
call('filesystem.setacl', {
'path': os.path.join(setup_user['home'], 'canary'),
'dacl': ACL
}, job=True)
call('user.update', setup_user['id'], {
'home': setup_user['home'],
'home_create': True
})
new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
acl = call('filesystem.getacl', os.path.join(new['home'], 'canary'))['acl']
assert acl == ACL
| 3,818 | Python | .py | 84 | 38.047619 | 96 | 0.592002 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,120 | test_iscsi_extent_crud_roles.py | truenas_middleware/tests/api2/test_iscsi_extent_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.extent.query", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, False)
common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, False)
common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_EXTENT_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, True)
common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, True)
common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, True)
| 1,178 | Python | .py | 15 | 75.2 | 106 | 0.771626 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,121 | test_truenas_verify.py | truenas_middleware/tests/api2/test_truenas_verify.py | from middlewared.test.integration.utils import ssh
def test_truenas_verify():
response = ssh('truenas_verify', check=False, complete_response=True)
# Jenkins vms alter the system files for setup, so truenas_verify should generate errors.
assert not response['result']
assert ssh('head /var/log/truenas_verify.log'), 'Test environment should log file verification errors.'
| 391 | Python | .py | 6 | 61 | 107 | 0.769634 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,122 | test_root_session_alert.py | truenas_middleware/tests/api2/test_root_session_alert.py | import pytest
from middlewared.test.integration.assets.product import product_type
from middlewared.test.integration.utils.client import client, truenas_server
from middlewared.test.integration.utils import call
from time import sleep
@pytest.fixture(scope="function")
def set_product_type(request):
# force SCALE_ENTERPRISE product type
with product_type():
yield
def get_session_alert(call_fn, session_id):
# sleep a little while to let auth event get logged
sleep(5)
alert = call_fn('alert.run_source', 'AdminSession')
assert alert
assert session_id in alert[0]['args']['sessions'], str(alert[0]['args'])
def check_session_alert(call_fn):
session_id = call_fn('auth.sessions', [['current', '=', True]], {'get': True})['id']
get_session_alert(call_fn, session_id)
def test_root_session(set_product_type):
# first check with our regular persistent session
check_session_alert(call)
with client(host_ip=truenas_server.ip) as c:
# check that we also pick up second alert
check_session_alert(c.call)
| 1,082 | Python | .py | 25 | 38.92 | 88 | 0.730144 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,123 | test_system_advanced.py | truenas_middleware/tests/api2/test_system_advanced.py | import pytest
from middlewared.service_exception import ValidationErrors, ValidationError
from middlewared.test.integration.utils import call, ssh
@pytest.mark.parametrize(
'key,value,grep_file,sshd_config_cmd,validation_error', [
('motd', 'TrueNAS Message Of The Day', '/etc/motd', None, ''),
('login_banner', 'TrueNAS Login Banner', '/etc/login_banner', 'grep Banner /etc/ssh/sshd_config', ''),
('kernel_extra_options', 'zfs_arc_min=21474836480', None, None, ''),
('kernel_extra_options', '', None, None, ''),
('kernel_extra_options', 'zfs_arc_min=<21474836480>', None, None, 'Invalid syntax'),
],
ids=[
'Test MOTD',
'Test Login Banner',
'Test Valid Kernel Extra Options 1',
'Test Valid Kernel Extra Options 2',
'Test Invalid Kernel Extra Options 1',
],
)
def test_(key, value, grep_file, sshd_config_cmd, validation_error):
if not validation_error:
call('system.advanced.update', {key: value})
assert call('system.advanced.config')[key] == value
if grep_file is not None:
assert ssh(f'grep "{value}" {grep_file}', complete_response=True)['result']
if sshd_config_cmd is not None:
assert ssh(sshd_config_cmd, complete_response=True)['result']
else:
with pytest.raises(ValidationErrors) as ve:
call('system.advanced.update', {key: value})
assert ve.value.errors == [ValidationError(key, validation_error)]
| 1,498 | Python | .py | 31 | 41.16129 | 110 | 0.64959 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,124 | test_enable_disable_services.py | truenas_middleware/tests/api2/test_enable_disable_services.py | from middlewared.test.integration.utils import call
def test_01_enable_services():
for svc in filter(lambda x: not x['enable'], call('service.query')):
call('service.update', svc['id'], {'enable': True})
def test_02_disable_services():
for svc in filter(lambda x: x['enable'], call('service.query')):
call('service.update', svc['id'], {'enable': False})
| 382 | Python | .py | 7 | 49.571429 | 72 | 0.668464 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,125 | test_listdir_request_mask.py | truenas_middleware/tests/api2/test_listdir_request_mask.py | import enum
import pytest
from middlewared.test.integration.utils import call
class DirectoryRequestMask(enum.IntFlag):
ACL = enum.auto()
CTLDIR = enum.auto()
REALPATH = enum.auto()
XATTRS = enum.auto()
ZFS_ATTRS = enum.auto()
@pytest.mark.parametrize('select_key,request_mask', [
('realpath', DirectoryRequestMask.REALPATH.value),
('acl', DirectoryRequestMask.ACL.value),
('zfs_attrs', DirectoryRequestMask.ZFS_ATTRS.value),
('is_ctldir', DirectoryRequestMask.CTLDIR.value),
('xattrs', DirectoryRequestMask.XATTRS.value),
(['xattrs', 'user_xattrs'], DirectoryRequestMask.XATTRS.value),
([], None),
('name', 0)
])
def test__select_to_request_mask(select_key, request_mask):
if select_key == []:
val = call('filesystem.listdir_request_mask', [])
assert val is None
else:
val = call('filesystem.listdir_request_mask', [select_key])
assert val == request_mask
| 954 | Python | .py | 26 | 31.961538 | 67 | 0.687974 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,126 | test_rsync_ssh_authentication.py | truenas_middleware/tests/api2/test_rsync_ssh_authentication.py | import base64
import contextlib
import errno
from unittest.mock import ANY
import pytest
from middlewared.service_exception import ValidationErrors, ValidationError
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.keychain import localhost_ssh_credentials
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.unittest import RegexString
@contextlib.contextmanager
def task(data):
data = {
**data
}
task = call("rsynctask.create", data)
try:
yield task
finally:
call("rsynctask.delete", task["id"])
def run_task(task, timeout=120):
call("rsynctask.run", task["id"], job=True, timeout=timeout)
@pytest.fixture(scope="module")
def localuser():
with dataset("localuser_homedir") as localuser_homedir:
with user({
"username": "localuser",
"full_name": "Local User",
"group_create": True,
"home": f"/mnt/{localuser_homedir}",
"password": "test1234",
}) as u:
yield u
@pytest.fixture(scope="module")
def remoteuser():
with dataset("remoteuser_homedir") as remoteuser_homedir:
with user({
"username": "remoteuser",
"full_name": "Remote User",
"group_create": True,
"home": f"/mnt/{remoteuser_homedir}",
"password": "test1234",
}) as u:
yield u
@pytest.fixture(scope="module")
def src(localuser):
with dataset("src") as src:
path = f"/mnt/{src}"
yield path
@pytest.fixture(scope="module")
def dst(remoteuser):
with dataset("dst") as dst:
path = f"/mnt/{dst}"
ssh(f"chown -R remoteuser:remoteuser {path}")
yield path
@pytest.fixture(scope="module")
def ssh_credentials(remoteuser):
with localhost_ssh_credentials(username="remoteuser") as c:
yield c
@pytest.fixture(scope="module")
def ipv6_ssh_credentials(remoteuser):
with localhost_ssh_credentials(url="http://[::1]", username="remoteuser") as c:
yield c
@pytest.fixture(scope="function")
def cleanup(localuser, src, dst):
ssh(f"rm -rf {localuser['home']}/.ssh")
ssh(f"rm -rf {src}/*", check=False)
ssh(f"touch {src}/test")
ssh(f"chown -R localuser:localuser {src}")
ssh(f"rm -rf {dst}/*", check=False)
def test_no_credential_provided_create(cleanup, localuser, remoteuser, src, dst):
with pytest.raises(ValidationErrors) as e:
with task({
"path": f"{src}/",
"user": "localuser",
"remotehost": "remoteuser@localhost",
"remoteport": 22,
"mode": "SSH",
"remotepath": dst,
}):
pass
assert e.value.errors == [
ValidationError(
"rsync_task_create.user",
RegexString(".*you need a user with a private key.*"),
errno.EINVAL,
)
]
def test_home_directory_key_invalid_permissions(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
ssh(f"mkdir {localuser['home']}/.ssh")
call(
"filesystem.file_receive",
f"{localuser['home']}/.ssh/id_rsa",
base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
{"mode": 0o0644},
)
ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
with pytest.raises(ValidationErrors) as e:
with task({
"path": f"{src}/",
"user": "localuser",
"remotehost": "remoteuser@localhost",
"remoteport": 22,
"mode": "SSH",
"remotepath": dst,
}):
pass
assert e.value.errors == [
ValidationError(
"rsync_task_create.user",
RegexString("Permissions 644 .* are too open.*"),
errno.EINVAL,
)
]
@pytest.mark.parametrize("validate_rpath", [True, False])
def test_home_directory_key_not_in_known_hosts(cleanup, localuser, remoteuser, src, dst, ssh_credentials,
validate_rpath):
ssh(f"mkdir {localuser['home']}/.ssh")
call(
"filesystem.file_receive",
f"{localuser['home']}/.ssh/id_rsa",
base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
{"mode": 0o600},
)
ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
with pytest.raises(ValidationErrors) as e:
with task({
"path": f"{src}/",
"user": "localuser",
"remotehost": "remoteuser@localhost",
"remoteport": 22,
"mode": "SSH",
"remotepath": dst,
"validate_rpath": validate_rpath,
}):
pass
assert e.value.errors == [
ValidationError(
"rsync_task_create.remotehost",
ANY,
ValidationError.ESSLCERTVERIFICATIONERROR,
)
]
def test_ssh_keyscan_does_not_duplicate_host_keys(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
ssh(f"mkdir {localuser['home']}/.ssh")
ssh(f"ssh-keyscan localhost >> {localuser['home']}/.ssh/known_hosts")
call(
"filesystem.file_receive",
f"{localuser['home']}/.ssh/id_rsa",
base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
{"mode": 0o600},
)
ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
known_hosts = ssh(f"cat {localuser['home']}/.ssh/known_hosts")
with task({
"path": f"{src}/",
"user": "localuser",
"remotehost": "remoteuser@localhost",
"remoteport": 22,
"mode": "SSH",
"remotepath": dst,
"ssh_keyscan": True,
}) as t:
pass
assert ssh(f"cat {localuser['home']}/.ssh/known_hosts") == known_hosts
def test_home_directory_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
ssh(f"mkdir {localuser['home']}/.ssh")
call(
"filesystem.file_receive",
f"{localuser['home']}/.ssh/id_rsa",
base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
{"mode": 0o600},
)
ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
with task({
"path": f"{src}/",
"user": "localuser",
"remotehost": "remoteuser@localhost",
"remoteport": 22,
"mode": "SSH",
"remotepath": dst,
"ssh_keyscan": True,
}) as t:
run_task(t)
assert ssh(f"ls -1 {dst}") == "test\n"
def test_ssh_credentials_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
with task({
"path": f"{src}/",
"user": "localuser",
"ssh_credentials": ssh_credentials["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
run_task(t)
assert ssh(f"ls -1 {dst}") == "test\n"
def test_ssh_credentials_delete(cleanup, localuser, remoteuser, src, dst):
with localhost_ssh_credentials(username="remoteuser") as c:
path = f"{src}/"
with task({
"path": path,
"user": "localuser",
"ssh_credentials": c["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
assert call("keychaincredential.used_by", c["credentials"]["id"]) == [
{"title": f"Rsync task for {path!r}", "unbind_method": "disable"},
]
call("keychaincredential.delete", c["credentials"]["id"], {"cascade": True})
t = call("rsynctask.get_instance", t["id"])
assert not t["enabled"]
def test_state_persist(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
with task({
"path": f"{src}/",
"user": "localuser",
"ssh_credentials": ssh_credentials["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
run_task(t)
row = call("datastore.query", "tasks.rsync", [["id", "=", t["id"]]], {"get": True})
assert row["rsync_job"]["state"] == "SUCCESS"
def test_local_path_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
src = f"{src}/work stuff"
ssh(f"mkdir '{src}'")
ssh(f"touch '{src}/test2'")
ssh(f"chown -R localuser:localuser '{src}'")
with task({
"path": f"{src}/",
"user": "localuser",
"ssh_credentials": ssh_credentials["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
run_task(t)
assert ssh(f"ls -1 '{dst}'") == "test2\n"
def test_remotepath_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
dst = f"{dst}/work stuff"
ssh(f"mkdir '{dst}'")
ssh(f"chown remoteuser:remoteuser '{dst}'")
with task({
"path": f"{src}/",
"user": "localuser",
"ssh_credentials": ssh_credentials["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
run_task(t)
assert ssh(f"ls -1 '{dst}'") == "test\n"
def test_ipv6_ssh_credentials(cleanup, localuser, remoteuser, src, dst, ipv6_ssh_credentials):
with task({
"path": f"{src}/",
"user": "localuser",
"ssh_credentials": ipv6_ssh_credentials["credentials"]["id"],
"mode": "SSH",
"remotepath": dst,
}) as t:
run_task(t)
assert ssh(f"ls -1 {dst}") == "test\n"
| 9,637 | Python | .py | 260 | 29.173077 | 114 | 0.588128 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,127 | test_iscsi_portal_crud_roles.py | truenas_middleware/tests/api2/test_iscsi_portal_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.portal.query", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, False)
common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, False)
common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_PORTAL_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, True)
common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, True)
common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, True)
| 1,178 | Python | .py | 15 | 75.2 | 106 | 0.771626 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,128 | test_events.py | truenas_middleware/tests/api2/test_events.py | import errno
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.utils import client
def test_can_subscribe_to_failover_status_event_without_authorization():
with client(auth=None) as c:
c.subscribe("failover.status", lambda *args, **kwargs: None)
def test_can_not_subscribe_to_an_event_without_authorization():
with client(auth=None) as c:
with pytest.raises(CallError) as ve:
c.subscribe("core.get_jobs", lambda *args, **kwargs: None)
assert ve.value.errno == errno.EACCES
| 574 | Python | .py | 12 | 42.583333 | 72 | 0.742342 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,129 | test_zfs_snapshot_events.py | truenas_middleware/tests/api2/test_zfs_snapshot_events.py | import errno
import pprint
import pytest
from unittest.mock import ANY
from middlewared.service_exception import InstanceNotFound, ValidationErrors, ValidationError
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import client
def test_create():
with dataset("test_snapshot_events_create") as ds:
with client() as c:
events = []
def callback(type, **message):
events.append((type, message))
c.subscribe("zfs.snapshot.query", callback, sync=True)
c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
assert len(events) == 1, pprint.pformat(events, indent=2)
assert events[0][0] == "ADDED"
assert events[0][1] == {"collection": "zfs.snapshot.query", "msg": "added", "id": f"{ds}@test",
"fields": ANY}
def test_delete():
with dataset("test_snapshot_events_delete") as ds:
with client() as c:
c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
events = []
def callback(type, **message):
events.append((type, message))
c.subscribe("zfs.snapshot.query", callback, sync=True)
c.call("zfs.snapshot.delete", f"{ds}@test")
assert len(events) == 1, pprint.pformat(events, indent=2)
assert events[0][0] == "REMOVED"
assert events[0][1] == {"collection": "zfs.snapshot.query", "msg": "removed", "id": f"{ds}@test",
"extra": {"recursive": False}}
def test_delete_with_dependent_clone():
with dataset("test_snapshot_events_dependent_clone") as ds:
with client() as c:
c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
c.call("zfs.snapshot.clone", {"snapshot": f"{ds}@test", "dataset_dst": f"{ds}/clone01"})
with pytest.raises(ValidationErrors) as ve:
c.call("zfs.snapshot.delete", f"{ds}@test")
assert ve.value.errors == [
ValidationError(
"options.defer",
f"Please set this attribute as '{ds}@test' snapshot has dependent clones: {ds}/clone01",
errno.EINVAL
),
]
def test_delete_nonexistent_snapshot():
with dataset("test_snapshot_events_nonexistent_snapshot") as ds:
with client() as c:
c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
with pytest.raises(InstanceNotFound) as e:
c.call("zfs.snapshot.delete", f"{ds}@testing")
assert str(e.value) == f"[ENOENT] None: Snapshot {ds}@testing not found"
| 2,761 | Python | .py | 53 | 40.283019 | 109 | 0.58169 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,130 | test_replication_utils.py | truenas_middleware/tests/api2/test_replication_utils.py | import pytest
from middlewared.test.integration.utils import call, pool
@pytest.fixture(scope="module")
def localhost_ssh_connection():
credential = call("keychaincredential.create", {
"name": "key",
"type": "SSH_KEY_PAIR",
"attributes": call("keychaincredential.generate_ssh_key_pair"),
})
try:
token = call("auth.generate_token")
connection = call("keychaincredential.remote_ssh_semiautomatic_setup", {
"name": "localhost",
"url": "http://localhost",
"token": token,
"private_key": credential["id"],
})
try:
yield connection["id"]
finally:
call("keychaincredential.delete", connection["id"])
finally:
call("keychaincredential.delete", credential["id"])
@pytest.mark.parametrize("transport", ["SSH", "SSH+NETCAT"])
def test_list_datasets_ssh(localhost_ssh_connection, transport):
assert pool in call("replication.list_datasets", transport, localhost_ssh_connection)
| 1,036 | Python | .py | 26 | 32.346154 | 89 | 0.643781 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,131 | test_audit_audit.py | truenas_middleware/tests/api2/test_audit_audit.py | import os
import requests
import time
import operator
import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.utils import call, url
from middlewared.test.integration.utils.audit import expect_audit_log, expect_audit_method_calls
from unittest.mock import ANY
# =====================================================================
# Fixtures and utilities
# =====================================================================
@pytest.fixture(scope='class')
def report_exists(request):
report_pathname = request.config.cache.get('report_pathname', None)
assert report_pathname is not None
yield report_pathname
# =====================================================================
# Tests
# =====================================================================
@pytest.mark.parametrize('payload,success', [
({'retention': 20}, True),
({'retention': 0}, False)
])
def test_audit_config_audit(payload, success):
'''
Test the auditing of Audit configuration changes
'''
initial_audit_config = call('audit.config')
rest_operator = operator.eq if success else operator.ne
expected_log_template = {
'service_data': {
'vers': {
'major': 0,
'minor': 1,
},
'origin': ANY,
'protocol': 'WEBSOCKET',
'credentials': {
'credentials': 'LOGIN_PASSWORD',
'credentials_data': {'username': 'root', 'login_at': ANY},
},
},
'event': 'METHOD_CALL',
'event_data': {
'authenticated': True,
'authorized': True,
'method': 'audit.update',
'params': [payload],
'description': 'Update Audit Configuration',
},
'success': success
}
try:
with expect_audit_log([expected_log_template]):
if success:
call('audit.update', payload)
else:
with pytest.raises(ValidationErrors):
call('audit.update', payload)
finally:
# Restore initial state
restore_payload = {
'retention': initial_audit_config['retention'],
}
call('audit.update', restore_payload)
def test_audit_export_audit(request):
'''
Test the auditing of the audit export function
'''
payload = {
'export_format': 'CSV'
}
with expect_audit_method_calls([{
'method': 'audit.export',
'params': [payload],
'description': 'Export Audit Data',
}]):
report_pathname = call('audit.export', payload, job=True)
request.config.cache.set('report_pathname', report_pathname)
class TestAuditDownload:
'''
Wrap these tests in a class for the 'report_exists' fixture
'''
def test_audit_download_audit(self, report_exists):
'''
Test the auditing of the audit download function
'''
report_pathname = report_exists
st = call('filesystem.stat', report_pathname)
init_audit_query = call('audit.query', {
'query-filters': [['event_data.method', '=', 'audit.download_report']],
'query-options': {'select': ['event_data', 'success']}
})
init_len = len(init_audit_query)
report_name = os.path.basename(report_pathname)
payload = {
'report_name': report_name
}
job_id, download_data = call(
'core.download', 'audit.download_report', [payload], 'report.csv'
)
r = requests.get(f'{url()}{download_data}')
r.raise_for_status()
assert len(r.content) == st['size']
post_audit_query = call('audit.query', {
'query-filters': [['event_data.method', '=', 'audit.download_report']],
'query-options': {'select': ['event_data', 'success']}
})
post_len = len(post_audit_query)
# This usually requires only one cycle
count_down = 10
while count_down > 0 and post_len == init_len:
time.sleep(1)
count_down -= 1
post_audit_query = call('audit.query', {
'query-filters': [['event_data.method', '=', 'audit.download_report']],
'query-options': {'select': ['event_data', 'success']}
})
post_len = len(post_audit_query)
assert count_down > 0, 'Timed out waiting for the audit entry'
assert post_len > init_len
# Confirm this download is recorded
entry = post_audit_query[-1]
event_data = entry['event_data']
params = event_data['params'][0]
assert report_name in params['report_name']
| 4,793 | Python | .py | 127 | 29.401575 | 96 | 0.544301 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,132 | test_job_logs.py | truenas_middleware/tests/api2/test_job_logs.py | import requests
from middlewared.test.integration.assets.account import unprivileged_user_client
from middlewared.test.integration.utils import mock, url
def test_job_download_logs():
with mock("test.test1", """
from middlewared.service import job
@job(logs=True)
def mock(self, job, *args):
job.logs_fd.write(b'Job logs')
"""):
with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
jid = c.call("test.test1")
c.call("core.job_wait", jid, job=True)
path = c.call("core.job_download_logs", jid, 'logs.txt')
r = requests.get(f"{url()}{path}")
r.raise_for_status()
assert r.headers["Content-Disposition"] == "attachment; filename=\"logs.txt\""
assert r.headers["Content-Type"] == "application/octet-stream"
assert r.text == "Job logs"
| 932 | Python | .py | 19 | 39.631579 | 101 | 0.61547 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,133 | test_900_docs.py | truenas_middleware/tests/api2/test_900_docs.py | #!/usr/bin/env python3
# License: BSD
import pytest
import sys
import os
from pytest_dependency import depends
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import SSH_TEST
from auto_config import user, password
def test_core_get_methods(request):
results = SSH_TEST("midclt call core.get_methods", user, password)
assert results['result'] is True, results
| 389 | Python | .py | 13 | 28 | 70 | 0.793011 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,134 | test_apps_roles.py | truenas_middleware/tests/api2/test_apps_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
('app.query', 'APPS_READ', True, False),
('app.query', 'APPS_WRITE', True, False),
('app.query', 'DOCKER_READ', False, False),
('app.config', 'APPS_READ', True, True),
('app.config', 'APPS_WRITE', True, True),
('app.config', 'DOCKER_READ', False, False),
('app.update', 'APPS_READ', False, False),
('app.update', 'APPS_WRITE', True, True),
('app.create', 'APPS_READ', False, False),
('app.create', 'APPS_WRITE', True, True),
('app.delete', 'APPS_READ', False, False),
('app.delete', 'APPS_WRITE', True, True),
('app.convert_to_custom', 'APPS_READ', False, False),
('app.convert_to_custom', 'APPS_WRITE', True, True),
))
def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
| 1,048 | Python | .py | 20 | 48.25 | 113 | 0.667317 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,135 | test_run_as_user_impl.py | truenas_middleware/tests/api2/test_run_as_user_impl.py | import sys
import os
from contextlib import contextmanager
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import wait_on_job
from middlewared.test.integration.utils import call, ssh
@contextmanager
def create_cron_job(owner, ownerGroup, user):
test_folder = ssh('mktemp -d').strip()
ssh(f'chown -R {owner}:{ownerGroup} {test_folder}')
cron = call(
'cronjob.create', {
'command': f'touch {test_folder}/test.txt', 'user': user, 'stderr': False, 'stdout': False}
)
try:
yield cron
finally:
ssh(f'rm -rf {test_folder}')
@contextmanager
def run_cron_job(cron_id):
job_id = call('cronjob.run', cron_id)
try:
yield wait_on_job(job_id, 300)
finally:
call('cronjob.delete', cron_id)
def test_01_running_as_valid_user():
with create_cron_job(owner='apps', ownerGroup='apps', user='apps') as cron_job:
with run_cron_job(cron_job['id']) as job_detail:
assert job_detail['results']['error'] is None
def test_02_running_as_invalid_user():
with create_cron_job(owner='root', ownerGroup='root', user='apps') as cron_job:
with run_cron_job(cron_job['id']) as job_detail:
assert f'"{cron_job["command"]}" exited with 1' in job_detail['results']['error'], job_detail
| 1,312 | Python | .py | 34 | 33.323529 | 105 | 0.662727 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,136 | test_system_general_ui_allowlist.py | truenas_middleware/tests/api2/test_system_general_ui_allowlist.py | import socket
import time
import requests
import websocket
from middlewared.test.integration.utils import call, host, mock, ssh, url, websocket_url
def test_system_general_ui_allowlist():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host().ip, 1)) # connect() for UDP doesn't send packets
local_ip = s.getsockname()[0]
with mock("vm.query", return_value=[
{"id": 1, "name": ""},
]):
with mock("vm.device.query", return_value=[
{"id": 1, "attributes": {"bind": "127.0.0.1", "port": 1, "web_port": 1}, "vm": 1}
]):
try:
protected_endpoints = (
"/_download",
"/_upload",
"/_plugins",
"/api/docs",
"/api/v2.0",
"/progress",
"/vm/display/1",
)
protected_ws_endpoints = (
("/websocket", '{"msg": "connect", "version": "1"}'),
("/websocket/shell", '{"token": "invalid"}'),
)
# Ensure we are testing endpoints that do not give 403 by default
for endpoint in protected_endpoints:
r = requests.get(url() + endpoint, timeout=10)
assert r.status_code != 403
for endpoint, message in protected_ws_endpoints:
ws = websocket.create_connection(websocket_url() + endpoint)
ws.send(message)
resp_opcode, msg = ws.recv_data()
assert resp_opcode == 1, msg
# Set `ui_allowlist` to IP we are using
call("system.general.update", {"ui_allowlist": [local_ip]})
call("system.general.ui_restart", 0)
time.sleep(10)
# Check everything still works
for endpoint in protected_endpoints:
r = requests.get(url() + endpoint, timeout=10)
assert r.status_code != 403
for endpoint, message in protected_ws_endpoints:
ws = websocket.create_connection(websocket_url() + endpoint)
ws.send(message)
resp_opcode, msg = ws.recv_data()
assert resp_opcode == 1, msg
# Set it to an invalid IP
call("system.general.update", {"ui_allowlist": ["8.8.8.8"]})
call("system.general.ui_restart", 0)
time.sleep(10)
# Ensure we are still able to open the UI
r = requests.get(url(), timeout=10)
assert r.status_code == 200
# Ensure that we can't access API
for endpoint in protected_endpoints:
r = requests.get(url() + endpoint, timeout=10)
assert r.status_code == 403, (endpoint, r.text)
for endpoint, message in protected_ws_endpoints:
ws = websocket.create_connection(websocket_url() + endpoint)
ws.send(message)
resp_opcode, msg = ws.recv_data()
assert resp_opcode == 8, msg
assert msg[2:].decode("utf-8") == "You are not allowed to access this resource"
finally:
# We are not allowed to access API, bring things back to normal via SSH
ssh("midclt call system.general.update '{\"ui_allowlist\": []}'")
ssh("midclt call system.general.ui_restart 0")
time.sleep(10)
| 3,631 | Python | .py | 73 | 33.684932 | 99 | 0.503242 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,137 | test_261_iscsi_cmd.py | truenas_middleware/tests/api2/test_261_iscsi_cmd.py | import contextlib
import enum
import errno
import ipaddress
import os
import random
import socket
import string
from time import sleep
import iscsi
import pyscsi
import pytest
import requests
from assets.websocket.iscsi import (alua_enabled, initiator, initiator_portal,
portal, read_capacity16, target,
target_extent_associate, verify_capacity,
verify_luns, verify_ha_inquiry, verify_ha_device_identification, TUR)
from middlewared.service_exception import CallError, InstanceNotFound, ValidationError, ValidationErrors
from middlewared.test.integration.assets.iscsi import target_login_test
from middlewared.test.integration.assets.pool import dataset, snapshot
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from pyscsi.pyscsi.scsi_sense import sense_ascq_dict
from pytest_dependency import depends
from auto_config import ha, hostname, isns_ip, password, pool_name, user
from functions import SSH_TEST
from protocols import (initiator_name_supported, iscsi_scsi_connection,
isns_connection)
# Setup some flags that will enable/disable tests based upon the capabilities of the
# python-scsi package in use
try:
from pyscsi.pyscsi.scsi_cdb_persistentreservein import PR_SCOPE, PR_TYPE
pyscsi_has_persistent_reservations = 'PersistentReserveOut' in dir(pyscsi.pyscsi.scsi)
LU_SCOPE = PR_SCOPE.LU_SCOPE
except ImportError:
pyscsi_has_persistent_reservations = False
LU_SCOPE = 0
skip_persistent_reservations = pytest.mark.skipif(not pyscsi_has_persistent_reservations,
reason="PYSCSI does not support persistent reservations")
skip_multi_initiator = pytest.mark.skipif(not initiator_name_supported(),
reason="PYSCSI does not support persistent reservations")
skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests")
skip_invalid_initiatorname = pytest.mark.skipif(not initiator_name_supported(),
reason="Invalid initiatorname will be presented")
pyscsi_has_report_target_port_groups = 'ReportTargetPortGroups' in dir(pyscsi.pyscsi.scsi)
# See: https://github.com/python-scsi/cython-iscsi/pull/8
pyscsi_supports_check_condition = hasattr(iscsi.Task, 'raw_sense')
skip_no_check_condition = pytest.mark.skipif(not pyscsi_supports_check_condition, "PYSCSI does not support CHECK CONDITION")
# The following strings are taken from pyscsi/pyscsi/scsi_exception
class CheckType(enum.Enum):
CHECK_CONDITION = "CheckCondition"
CONDITIONS_MET = "ConditionsMet"
BUSY_STATUS = "BusyStatus"
RESERVATION_CONFLICT = "ReservationConflict"
TASK_SET_FULL = "TaskSetFull"
ACA_ACTIVE = "ACAActive"
TASK_ABORTED = "TaskAborted"
def __str__(self):
return self.value
# Some constants
MB = 1024 * 1024
MB_100 = 100 * MB
MB_200 = 200 * MB
MB_256 = 256 * MB
MB_512 = 512 * MB
PR_KEY1 = 0xABCDEFAABBCCDDEE
PR_KEY2 = 0x00000000DEADBEEF
CONTROLLER_A_TARGET_PORT_GROUP_ID = 101
CONTROLLER_B_TARGET_PORT_GROUP_ID = 102
# Some variables
digit = ''.join(random.choices(string.digits, k=2))
file_mountpoint = f'/tmp/iscsi-file-{hostname}'
zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}'
target_name = f"target{digit}"
dataset_name = f"iscsids{digit}"
file_name = f"iscsi{digit}"
basename = "iqn.2005-10.org.freenas.ctl"
zvol_name = f"ds{digit}"
zvol = f'{pool_name}/{zvol_name}'
def snapshot_rollback(snapshot_id):
call('zfs.snapshot.rollback', snapshot_id)
def other_node(node):
if node == 'A':
return 'B'
if node == 'B':
return 'A'
raise ValueError("Invalid node supplied")
def get_ip_addr(ip):
try:
ipaddress.ip_address(ip)
return ip
except ValueError:
actual_ip = socket.gethostbyname(ip)
ipaddress.ip_address(actual_ip)
return actual_ip
@contextlib.contextmanager
def iscsi_auth(tag, user, secret, peeruser=None, peersecret=None):
payload = {
'tag': tag,
'user': user,
'secret': secret,
}
if peeruser and peersecret:
payload.update({
'peeruser': peeruser,
'peersecret': peersecret
})
auth_config = call('iscsi.auth.create', payload)
try:
yield auth_config
finally:
call('iscsi.auth.delete', auth_config['id'])
@contextlib.contextmanager
def iscsi_discovery_auth(authmethod, authgroup):
config = call('iscsi.discoveryauth.create', {'authmethod': authmethod, 'authgroup': authgroup})
try:
yield config
finally:
call('iscsi.discoveryauth.delete', config['id'])
@contextlib.contextmanager
def file_extent(pool_name, dataset_name, file_name, filesize=MB_512, extent_name='extent', serial=None):
payload = {
'type': 'FILE',
'name': extent_name,
'filesize': filesize,
'path': f'/mnt/{pool_name}/{dataset_name}/{file_name}'
}
# We want to allow any non-None serial to be specified (even '')
if serial is not None:
payload.update({'serial': serial})
extent_config = call('iscsi.extent.create', payload)
try:
yield extent_config
finally:
call('iscsi.extent.delete', extent_config['id'], True, True)
@contextlib.contextmanager
def zvol_dataset(zvol, volsize=MB_512, recursive=False, force=False):
payload = {
'name': zvol,
'type': 'VOLUME',
'volsize': volsize,
'volblocksize': '16K'
}
dataset_config = call('pool.dataset.create', payload)
try:
yield dataset_config
finally:
try:
call('pool.dataset.delete', dataset_config['id'], {'recursive': recursive, 'force': force})
except InstanceNotFound:
pass
def modify_extent(ident, payload):
call('iscsi.extent.update', ident, payload)
def file_extent_resize(ident, filesize):
payload = {
'filesize': filesize,
}
modify_extent(ident, payload)
def extent_disable(ident):
modify_extent(ident, {'enabled': False})
def extent_enable(ident):
modify_extent(ident, {'enabled': True})
def zvol_resize(zvol, volsize):
payload = {
'volsize': volsize,
}
call('pool.dataset.update', zvol, payload)
def _get_iscsi_sessions(filters=None):
if filters:
return call('iscsi.global.sessions', filters)
else:
return call('iscsi.global.sessions')
def get_iscsi_sessions(filters=None, check_length=None):
if isinstance(check_length, int):
for _ in range(10):
data = _get_iscsi_sessions(filters)
if len(data) == check_length:
return data
sleep(1)
assert len(data) == check_length, data
else:
data = _get_iscsi_sessions(filters)
return data
def get_client_count():
return call('iscsi.global.client_count')
def get_volthreading(zvolid):
return call('zfs.dataset.query', [['id', '=', zvolid]], {'get': True})['properties']['volthreading']['value']
def verify_client_count(count, retries=10):
"""Verify that the client count is the expected value, but include some
retries to allow things to settle if necessary."""
assert retries > 0
while retries:
if get_client_count() == count:
# All is good
return
retries -= 1
sleep(1)
assert get_client_count() == count
@contextlib.contextmanager
def zvol_extent(zvol, extent_name='zvol_extent'):
payload = {
'type': 'DISK',
'disk': f'zvol/{zvol}',
'name': extent_name,
}
extent_config = call('iscsi.extent.create', payload)
try:
yield extent_config
finally:
try:
call('iscsi.extent.delete', extent_config['id'], True, True)
except InstanceNotFound:
pass
@contextlib.contextmanager
def configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name, alias=None, filesize=MB_512, extent_name='extent'):
portal_id = config['portal']['id']
with target(target_name, [{'portal': portal_id}], alias) as target_config:
target_id = target_config['id']
with dataset(dataset_name) as dataset_config:
with file_extent(pool_name, dataset_name, file_name, filesize=filesize, extent_name=extent_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
newconfig = config.copy()
newconfig.update({
'target': target_config,
'dataset': dataset_config,
'extent': extent_config,
})
yield newconfig
@contextlib.contextmanager
def add_file_extent_target_lun(config, lun, filesize=MB_512, extent_name=None):
name = config['target']['name']
target_id = config['target']['id']
dataset_name = f"iscsids{name}"
lun_file_name = f'{name}_lun{lun}'
if not extent_name:
extent_name = lun_file_name
with file_extent(pool_name, dataset_name, lun_file_name, filesize=filesize, extent_name=extent_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id, lun):
newconfig = config.copy()
newconfig.update({
f'extent_lun{lun}': extent_config,
})
yield newconfig
@contextlib.contextmanager
def configured_target_to_zvol_extent(config, target_name, zvol, alias=None, extent_name='zvol_extent', volsize=MB_512):
portal_id = config['portal']['id']
with target(target_name, [{'portal': portal_id}], alias) as target_config:
target_id = target_config['id']
with zvol_dataset(zvol, volsize) as dataset_config:
with zvol_extent(zvol, extent_name=extent_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id) as associate_config:
newconfig = config.copy()
newconfig.update({
'associate': associate_config,
'target': target_config,
'dataset': dataset_config['id'],
'extent': extent_config,
})
yield newconfig
@contextlib.contextmanager
def add_zvol_extent_target_lun(config, lun, volsize=MB_512, extent_name=None):
name = config['target']['name']
zvol_name = f"ds{name}"
zvol = f'{pool_name}/{zvol_name}_lun{lun}'
target_id = config['target']['id']
lun_file_name = f'{name}_lun{lun}'
if not extent_name:
extent_name = lun_file_name
with zvol_dataset(zvol, volsize) as dataset_config:
with zvol_extent(zvol, extent_name=extent_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id, lun) as associate_config:
newconfig = config.copy()
newconfig.update({
f'dataset_lun{lun}': dataset_config,
f'associate_lun{lun}': associate_config,
f'extent_lun{lun}': extent_config,
})
yield newconfig
@contextlib.contextmanager
def configured_target(config, name, extent_type, alias=None, extent_size=MB_512):
assert extent_type in ["FILE", "VOLUME"]
if extent_type == "FILE":
ds_name = f"iscsids{name}"
with configured_target_to_file_extent(config, name, pool_name, ds_name, file_name, alias, extent_size, name) as newconfig:
yield newconfig
elif extent_type == "VOLUME":
zvol_name = f"ds{name}"
zvol = f'{pool_name}/{zvol_name}'
with configured_target_to_zvol_extent(config, name, zvol, alias, name, extent_size) as newconfig:
yield newconfig
@contextlib.contextmanager
def isns_enabled(delay=5):
payload = {'isns_servers': [isns_ip]}
call('iscsi.global.update', payload)
try:
yield
finally:
payload = {'isns_servers': []}
call('iscsi.global.update', payload)
if delay:
print(f'Sleeping for {delay} seconds after turning off iSNS')
sleep(delay)
def expect_check_condition(s, text=None, check_type=CheckType.CHECK_CONDITION):
"""
Expect a CHECK CONDITION containing the specified text.
:param s: a pyscsi.SCSI instance
:param text: string expected as part of the CHECK CONDITION
:param check_type: CheckType enum of the expected CHECK_CONDITION
Issue a TEST UNIT READY and verify that the expected CHECK CONDITION is raised.
If this version of pyscsi(/cython-iscsi) does not support CHECK CONDITION
then just swallow the condition by issuing another TEST UNIT READY.
"""
assert check_type in CheckType, f"Parameter '{check_type}' is not a CheckType"
if pyscsi_supports_check_condition:
with pytest.raises(Exception) as excinfo:
s.testunitready()
e = excinfo.value
assert e.__class__.__name__ == str(check_type), f"Unexpected CHECK CONDITION type. Got '{e.__class__.__name__}', expected {str(check_type)}"
if text:
assert text in str(e), f"Exception did not match: {text}"
else:
# If we cannot detect a CHECK CONDITION, then swallow it by retrying a TUR
try:
s.testunitready()
except TypeError:
s.testunitready()
def _verify_inquiry(s):
"""
Verify that the supplied SCSI has the expected INQUIRY response.
:param s: a pyscsi.SCSI instance
"""
TUR(s)
r = s.inquiry()
data = r.result
assert data['t10_vendor_identification'].decode('utf-8').startswith("TrueNAS"), str(data)
assert data['product_identification'].decode('utf-8').startswith("iSCSI Disk"), str(data)
def get_target(targetid):
"""
Return target JSON data.
"""
return call('iscsi.target.get_instance', int(targetid))
def get_targets():
"""
Return a dictionary of target JSON data, keyed by target name.
"""
return {target['name']: target for target in call('iscsi.target.query')}
def modify_target(targetid, payload):
call('iscsi.target.update', targetid, payload)
def set_target_alias(targetid, newalias):
modify_target(targetid, {'alias': newalias})
def set_target_initiator_id(targetid, initiatorid):
target_data = get_target(targetid)
assert 'groups' in target_data, target_data
groups = target_data['groups']
assert len(groups) == 1, target_data
groups[0]['initiator'] = initiatorid
modify_target(targetid, {'groups': groups})
def _get_service(service_name='iscsitarget'):
return call('service.query', [['service', '=', service_name]], {'get': True})
@pytest.mark.dependency(name="iscsi_cmd_00")
def test_00_setup(request):
# Enable iSCSI service
payload = {"enable": True}
call('service.update', 'iscsitarget', payload)
# Start iSCSI service
call('service.start', 'iscsitarget')
sleep(1)
# Verify running
service = _get_service()
assert service['state'] == "RUNNING", service
def test_01_inquiry(request):
"""
This tests the Vendor and Product information in an INQUIRY response
are 'TrueNAS' and 'iSCSI Disk' respectively.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator():
with portal() as portal_config:
portal_id = portal_config['id']
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
with file_extent(pool_name, dataset_name, file_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
_verify_inquiry(s)
def test_02_read_capacity16(request):
"""
This tests that the target created returns the correct size to READ CAPACITY (16).
It performs this test with a couple of sizes for both file & zvol based targets.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator():
with portal() as portal_config:
portal_id = portal_config['id']
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
# 100 MB file extent
with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_capacity(s, MB_100)
# 512 MB file extent
with file_extent(pool_name, dataset_name, file_name, MB_512) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_capacity(s, MB_512)
# 100 MB zvol extent
with zvol_dataset(zvol, MB_100):
with zvol_extent(zvol) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_capacity(s, MB_100)
# 512 MB zvol extent
with zvol_dataset(zvol):
with zvol_extent(zvol) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_capacity(s, MB_512)
def target_test_readwrite16(ip, iqn):
"""
This tests WRITE SAME (16), READ (16) and WRITE (16)
operations on the specified target.
"""
zeros = bytearray(512)
deadbeef = bytearray.fromhex('deadbeef') * 128
deadbeef_lbas = [1, 5, 7]
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
# Now let's write DEADBEEF to a few LBAs using WRITE (16)
for lba in deadbeef_lbas:
s.write16(lba, 1, deadbeef)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Drop the iSCSI connection and login again
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Do a WRITE for > 1 LBA
s.write16(10, 2, deadbeef * 2)
# Check results using READ (16)
deadbeef_lbas.extend([10, 11])
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Do a couple of READ (16) for > 1 LBA
# At this stage we have written deadbeef to LBAs 1,5,7,10,11
r = s.read16(0, 2)
assert r.datain == zeros + deadbeef, r.datain
r = s.read16(1, 2)
assert r.datain == deadbeef + zeros, r.datain
r = s.read16(2, 2)
assert r.datain == zeros * 2, r.datain
r = s.read16(10, 2)
assert r.datain == deadbeef * 2, r.datain
def test_03_readwrite16_file_extent(request):
"""
This tests WRITE SAME (16), READ (16) and WRITE (16) operations with
a file extent based iSCSI target.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name):
iqn = f'{basename}:{target_name}'
target_test_readwrite16(truenas_server.ip, iqn)
def test_04_readwrite16_zvol_extent(request):
"""
This tests WRITE SAME (16), READ (16) and WRITE (16) operations with
a zvol extent based iSCSI target.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
iqn = f'{basename}:{target_name}'
target_test_readwrite16(truenas_server.ip, iqn)
@skip_invalid_initiatorname
def test_05_chap(request):
"""
This tests that CHAP auth operates as expected.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
user = "user1"
secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
with initiator():
with portal() as portal_config:
portal_id = portal_config['id']
auth_tag = 1
with iscsi_auth(auth_tag, user, secret):
with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP', 'auth': auth_tag}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
with file_extent(pool_name, dataset_name, file_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
# Try and fail to connect without supplying CHAP creds
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
assert False, "Should not have been able to connect without CHAP credentials."
assert 'Unable to connect to' in str(ve), ve
# Try and fail to connect supplying incorrect CHAP creds
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s:
TUR(s)
assert False, "Should not have been able to connect without CHAP credentials."
assert 'Unable to connect to' in str(ve), ve
# Finally ensure we can connect with the right CHAP creds
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s:
_verify_inquiry(s)
@skip_invalid_initiatorname
def test_06_mutual_chap(request):
"""
This tests that Mutual CHAP auth operates as expected.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
user = "user1"
secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
peer_user = "user2"
peer_secret = 'sec2' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
with initiator():
with portal() as portal_config:
portal_id = portal_config['id']
auth_tag = 1
with iscsi_auth(auth_tag, user, secret, peer_user, peer_secret):
with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP_MUTUAL', 'auth': auth_tag}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
with file_extent(pool_name, dataset_name, file_name) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqn = f'{basename}:{target_name}'
# Try and fail to connect without supplying Mutual CHAP creds
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
assert False, "Should not have been able to connect without CHAP credentials."
assert 'Unable to connect to' in str(ve), ve
# Try and fail to connect supplying incorrect CHAP creds (not mutual)
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s:
TUR(s)
assert False, "Should not have been able to connect with incorrect CHAP credentials."
assert 'Unable to connect to' in str(ve), ve
# Ensure we can connect with the right CHAP creds, if we *choose* not
# to validate things.
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s:
_verify_inquiry(s)
# Try and fail to connect supplying incorrect Mutual CHAP creds
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, "WrongSecret") as s:
TUR(s)
assert False, "Should not have been able to connect with incorrect Mutual CHAP credentials."
assert 'Unable to connect to' in str(ve), ve
# Finally ensure we can connect with the right Mutual CHAP creds
with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, peer_secret) as s:
_verify_inquiry(s)
def test_06_discovery_auth():
"""
Test Discovery Auth
"""
assert [] == call('iscsi.discoveryauth.query')
with pytest.raises(ValidationErrors) as ve:
call('iscsi.discoveryauth.create', {'authmethod': 'CHAP', 'authgroup': 100})
assert ve.value.errors == [
ValidationError(
'iscsi_discoveryauth_create.authgroup',
'The specified authgroup does not contain any entries.'
)]
with pytest.raises(ValidationErrors) as ve:
call('iscsi.discoveryauth.create', {'authmethod': 'None', 'authgroup': 0})
assert ve.value.errors == [
ValidationError(
'iscsi_discoveryauth_create.authmethod',
'Invalid choice: None',
errno.EINVAL
)]
randsec = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
with iscsi_auth(1, 'user1', 'sec1' + randsec) as auth_config:
with iscsi_discovery_auth('CHAP', 1) as item:
assert item['authmethod'] == 'CHAP'
assert item['authgroup'] == 1
with pytest.raises(ValidationErrors) as ve:
call('iscsi.discoveryauth.create', {'authmethod': 'CHAP', 'authgroup': 1})
assert ve.value.errors == [
ValidationError(
'iscsi_discoveryauth_create.authgroup',
'The specified authgroup is already in use.'
)]
# Now that the auth is in use, we should NOT be able to delete it
with pytest.raises(CallError) as e:
call('iscsi.auth.delete', auth_config['id'])
assert f'Authorized access of {auth_config["id"]} is being used by discovery auth(s): {item["id"]}' in str(e), e
with iscsi_auth(2, 'user2', 'sec2' + randsec, 'peeruser2', 'psec2' + randsec) as auth_config:
with iscsi_discovery_auth('CHAP_MUTUAL', 2) as item:
with pytest.raises(ValidationErrors) as ve:
call('iscsi.discoveryauth.create', {'authmethod': 'CHAP', 'authgroup': 2})
assert ve.value.errors == [
ValidationError(
'iscsi_discoveryauth_create.authgroup',
'The specified authgroup is already in use.'
)]
def test_07_report_luns(request):
"""
This tests REPORT LUNS and accessing multiple LUNs on a target.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
iqn = f'{basename}:{target_name}'
with initiator():
with portal() as portal_config:
portal_id = portal_config['id']
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
# LUN 0 (100 MB file extent)
with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_luns(s, [0])
verify_capacity(s, MB_100)
# Now create a 512 MB zvol and associate with LUN 1
with zvol_dataset(zvol):
with zvol_extent(zvol) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id, 1):
# Connect to LUN 0
with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s0:
verify_luns(s0, [0, 1])
verify_capacity(s0, MB_100)
# Connect to LUN 1
with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s1:
verify_luns(s1, [0, 1])
verify_capacity(s1, MB_512)
# Check again now that LUN 1 has been removed again.
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_luns(s, [0])
verify_capacity(s, MB_100)
def target_test_snapshot_single_login(ip, iqn, dataset_id):
"""
This tests snapshots with an iSCSI target using a single
iSCSI session.
"""
zeros = bytearray(512)
deadbeef = bytearray.fromhex('deadbeef') * 128
deadbeef_lbas = [1, 5, 7]
all_deadbeef_lbas = [1, 5, 7, 10, 11]
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
# Take snap0
with snapshot(dataset_id, "snap0", get=True) as snap0_config:
# Now let's write DEADBEEF to a few LBAs using WRITE (16)
for lba in deadbeef_lbas:
s.write16(lba, 1, deadbeef)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Take snap1
with snapshot(dataset_id, "snap1", get=True) as snap1_config:
# Do a WRITE for > 1 LBA
s.write16(10, 2, deadbeef * 2)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in all_deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Now revert to snap1
snapshot_rollback(snap1_config['id'])
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Now revert to snap0
snapshot_rollback(snap0_config['id'])
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
"""
This tests snapshots with an iSCSI target using multiple
iSCSI sessions.
"""
zeros = bytearray(512)
deadbeef = bytearray.fromhex('deadbeef') * 128
deadbeef_lbas = [1, 5, 7]
all_deadbeef_lbas = [1, 5, 7, 10, 11]
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
# Take snap0
with snapshot(dataset_id, "snap0", get=True) as snap0_config:
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# Now let's write DEADBEEF to a few LBAs using WRITE (16)
for lba in deadbeef_lbas:
s.write16(lba, 1, deadbeef)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Take snap1
with snapshot(dataset_id, "snap1", get=True) as snap1_config:
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# Do a WRITE for > 1 LBA
s.write16(10, 2, deadbeef * 2)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in all_deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Now revert to snap1
snapshot_rollback(snap1_config['id'])
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
if lba in deadbeef_lbas:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
# Now revert to snap0
snapshot_rollback(snap0_config['id'])
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
def test_08_snapshot_zvol_extent(request):
"""
This tests snapshots with a zvol extent based iSCSI target.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
iqn = f'{basename}:{target_name}'
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset'])
with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset'])
def test_09_snapshot_file_extent(request):
"""
This tests snapshots with a file extent based iSCSI target.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
iqn = f'{basename}:{target_name}'
with initiator_portal() as config:
with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config:
target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset'])
with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset'])
def test_10_target_alias(request):
"""
This tests iSCSI target alias.
At the moment SCST does not use the alias usefully (e.g. TargetAlias in
LOGIN response). When this is rectified this test should be extended.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
data = {}
for t in ["A", "B"]:
data[t] = {}
data[t]['name'] = f"{target_name}{t.lower()}"
data[t]['alias'] = f"{target_name}{t}_alias"
data[t]['file'] = f"{target_name}{t}_file"
A = data['A']
B = data['B']
with initiator_portal() as config:
with configured_target_to_file_extent(config, A['name'], pool_name, dataset_name, A['file'], A['alias']) as iscsi_config:
with target(B['name'], [{'portal': iscsi_config['portal']['id']}]) as targetB_config:
with file_extent(pool_name, dataset_name, B['file'], extent_name="extentB") as extentB_config:
with target_extent_associate(targetB_config['id'], extentB_config['id']):
# Created two targets, one with an alias, one without. Check them.
targets = get_targets()
assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias']
assert targets[B['name']]['alias'] is None, targets[B['name']]['alias']
# Update alias for B
set_target_alias(targets[B['name']]['id'], B['alias'])
targets = get_targets()
assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias']
assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias']
# Clear alias for A
set_target_alias(targets[A['name']]['id'], "")
targets = get_targets()
assert targets[A['name']]['alias'] is None, targets[A['name']]['alias']
assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias']
# Clear alias for B
set_target_alias(targets[B['name']]['id'], "")
targets = get_targets()
assert targets[A['name']]['alias'] is None, targets[A['name']]['alias']
assert targets[B['name']]['alias'] is None, targets[B['name']]['alias']
def test_11_modify_portal(request):
"""
Test that we can modify a target portal.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with portal() as portal_config:
assert portal_config['comment'] == 'Default portal', portal_config
# First just change the comment
payload = {'comment': 'New comment'}
call('iscsi.portal.update', portal_config['id'], payload)
new_config = call('iscsi.portal.get_instance', portal_config['id'])
assert new_config['comment'] == 'New comment', new_config
# Then try to reapply everything
payload = {'comment': 'test1', 'discovery_authmethod': 'NONE', 'discovery_authgroup': None, 'listen': [{'ip': '0.0.0.0'}]}
# payload = {'comment': 'test1', 'discovery_authmethod': 'NONE', 'discovery_authgroup': None, 'listen': [{'ip': '0.0.0.0'}, {'ip': '::'}]}
call('iscsi.portal.update', portal_config['id'], payload)
new_config = call('iscsi.portal.get_instance', portal_config['id'])
assert new_config['comment'] == 'test1', new_config
def test_12_pblocksize_setting(request):
"""
This tests whether toggling pblocksize has the desired result on READ CAPACITY 16, i.e.
whether setting it results in LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT being zero.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
iqn = f'{basename}:{target_name}'
with initiator_portal() as config:
with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config:
extent_config = iscsi_config['extent']
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
data = s.readcapacity16().result
# By default 512 << 3 == 4096
assert data['lbppbe'] == 3, data
# First let's just change the blocksize to 2K
payload = {'blocksize': 2048}
call('iscsi.extent.update', extent_config['id'], payload)
expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
data = s.readcapacity16().result
assert data['block_length'] == 2048, data
assert data['lbppbe'] == 1, data
# Now let's change it back to 512, but also set pblocksize
payload = {'blocksize': 512, 'pblocksize': True}
call('iscsi.extent.update', extent_config['id'], payload)
expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
data = s.readcapacity16().result
assert data['block_length'] == 512, data
assert data['lbppbe'] == 0, data
with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
extent_config = iscsi_config['extent']
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
data = s.readcapacity16().result
# We created a vol with volblocksize == 16K (512 << 5)
assert data['lbppbe'] == 5, data
# First let's just change the blocksize to 4K
payload = {'blocksize': 4096}
call('iscsi.extent.update', extent_config['id'], payload)
expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
data = s.readcapacity16().result
assert data['block_length'] == 4096, data
assert data['lbppbe'] == 2, data
# Now let's also set pblocksize
payload = {'pblocksize': True}
call('iscsi.extent.update', extent_config['id'], payload)
TUR(s)
data = s.readcapacity16().result
assert data['block_length'] == 4096, data
assert data['lbppbe'] == 0, data
def generate_name(length, base="target"):
result = f"{base}-{length}-"
remaining = length - len(result)
assert remaining >= 0, f"Function not suitable for such a short length: {length}"
return result + ''.join(random.choices(string.ascii_lowercase + string.digits, k=remaining))
@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
def test_13_test_target_name(request, extent_type):
"""
Test the user-supplied target name.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
name64 = generate_name(64)
with configured_target(config, name64, extent_type):
iqn = f'{basename}:{name64}'
target_test_readwrite16(truenas_server.ip, iqn)
name65 = generate_name(65)
with pytest.raises(ValidationErrors) as ve:
with configured_target(config, name65, extent_type):
assert False, f"Should not have been able to create a target with name length {len(name65)}."
assert ve.value.errors == [
ValidationError('iscsi_extent_create.name', 'The value may not be longer than 64 characters', errno.EINVAL),
]
@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
def test_14_target_lun_extent_modify(request, extent_type):
"""
Perform some tests of the iscsi.targetextent.update API, including
trying tp provide invalid
"""
depends(request, ["iscsi_cmd_00"], scope="session")
name1 = f'{target_name}1'
name2 = f'{target_name}2'
name3 = f'{target_name}3'
name4 = f'{target_name}4'
@contextlib.contextmanager
def expect_lun_in_use_failure():
with pytest.raises(ValidationErrors) as ve:
yield
assert False, "Should not be able to associate because LUN in use"
assert "LUN ID is already being used for this target." in str(ve.value)
@contextlib.contextmanager
def expect_extent_in_use_failure():
with pytest.raises(ValidationErrors) as ve:
yield
assert False, "Should not be able to associate because extent in use"
assert "Extent is already in use" in str(ve.value)
# The following will create the extents with the same name as the target.
with initiator_portal() as config:
with configured_target(config, name1, extent_type) as config1:
with configured_target(config, name2, extent_type) as config2:
with configured_target(config, name3, extent_type) as config3:
# Create an extra extent to 'play' with
with zvol_dataset(zvol):
with zvol_extent(zvol, extent_name=name4) as config4:
# First we will attempt some new, but invalid associations
# LUN in use
with expect_lun_in_use_failure():
payload = {
'target': config1['target']['id'],
'lunid': 0,
'extent': config4['id']
}
call('iscsi.targetextent.create', payload)
# extent in use
with expect_extent_in_use_failure():
payload = {
'target': config1['target']['id'],
'lunid': 1,
'extent': config2['extent']['id']
}
call('iscsi.targetextent.create', payload)
# Now succeed in creating a new target/lun/extent association
payload = {
'target': config1['target']['id'],
'lunid': 1,
'extent': config4['id']
}
call('iscsi.targetextent.create', payload)
# Get the current config
textents = call('iscsi.targetextent.query')
# Now perform some updates that will not succeed
textent4 = next(textent for textent in textents if textent['extent'] == config4['id'])
# Attempt some invalid updates
# LUN in use
with expect_lun_in_use_failure():
payload = {
'target': textent4['target'],
'lunid': 0,
'extent': textent4['extent']
}
call('iscsi.targetextent.update', textent4['id'], payload)
# extent in use in another target
with expect_extent_in_use_failure():
payload = {
'target': textent4['target'],
'lunid': textent4['lunid'],
'extent': config3['extent']['id']
}
call('iscsi.targetextent.update', textent4['id'], payload)
# extent in use in this target
with expect_extent_in_use_failure():
payload = {
'target': textent4['target'],
'lunid': textent4['lunid'],
'extent': config1['extent']['id']
}
call('iscsi.targetextent.update', textent4['id'], payload)
# Move a target to LUN 1
textent2 = next(textent for textent in textents if textent['extent'] == config2['extent']['id'])
payload = {
'target': textent2['target'],
'lunid': 1,
'extent': textent2['extent']
}
call('iscsi.targetextent.update', textent2['id'], payload)
# Try to move it (to target1) just by changing the target, will clash
with expect_lun_in_use_failure():
payload = {
'target': config1['target']['id'],
'lunid': 1,
'extent': textent2['extent']
}
call('iscsi.targetextent.update', textent2['id'], payload)
# But can move it elsewhere (target3)
payload = {
'target': config3['target']['id'],
'lunid': 1,
'extent': textent2['extent']
}
call('iscsi.targetextent.update', textent2['id'], payload)
# Delete textent4 association
call('iscsi.targetextent.delete', textent4['id'])
# Now can do the move that previously failed
payload = {
'target': config1['target']['id'],
'lunid': 1,
'extent': textent2['extent']
}
call('iscsi.targetextent.update', textent2['id'], payload)
# Restore it
payload = {
'target': config2['target']['id'],
'lunid': 0,
'extent': textent2['extent']
}
call('iscsi.targetextent.update', textent2['id'], payload)
def _isns_wait_for_iqn(isns_client, iqn, timeout=10):
iqns = set(isns_client.list_targets())
while timeout > 0 and iqn not in iqns:
sleep(1)
iqns = set(isns_client.list_targets())
return iqns
def test_15_test_isns(request):
"""
Test ability to register targets with iSNS.
"""
# Will use a more unique target name than usual, just in case several test
# runs are hitting the same iSNS server at the same time.
depends(request, ["iscsi_cmd_00"], scope="session")
_host = socket.gethostname()
_rand = ''.join(random.choices(string.digits + string.ascii_lowercase, k=12))
_name_base = f'isnstest:{_host}:{_rand}'
_target1 = f'{_name_base}:1'
_target2 = f'{_name_base}:2'
_initiator = f'iqn.2005-10.org.freenas.ctl:isnstest:{_name_base}:initiator'
_iqn1 = f'{basename}:{_target1}'
_iqn2 = f'{basename}:{_target1}'
with isns_connection(isns_ip, _initiator) as isns_client:
# First let's ensure that the targets are not already present.
base_iqns = set(isns_client.list_targets())
for iqn in [_iqn1, _iqn2]:
assert iqn not in base_iqns, iqn
# Create target1 and ensure it is still not present (because we
# haven't switched on iSNS yet).
with initiator_portal() as config:
with configured_target_to_file_extent(config,
_target1,
pool_name,
dataset_name,
file_name) as iscsi_config:
iqns = set(isns_client.list_targets())
assert _iqn1 not in iqns, _iqn1
# Now turn on the iSNS server
with isns_enabled():
iqns = _isns_wait_for_iqn(isns_client, _iqn1)
assert _iqn1 in iqns, _iqn1
# Create another target and ensure it shows up too
with target(_target2,
[{'portal': iscsi_config['portal']['id']}]
) as target2_config:
target_id = target2_config['id']
with zvol_dataset(zvol):
with zvol_extent(zvol) as extent_config:
extent_id = extent_config['id']
with target_extent_associate(target_id, extent_id):
iqns = _isns_wait_for_iqn(isns_client, _iqn2)
for inq in [_iqn1, _iqn2]:
assert iqn in iqns, iqn
# Now that iSNS is disabled again, ensure that our target is
# no longer advertised
iqns = set(isns_client.list_targets())
assert _iqn1 not in iqns, _iqn1
# Finally let's ensure that neither target is present.
base_iqns = set(isns_client.list_targets())
for iqn in [_iqn1, _iqn2]:
assert iqn not in base_iqns, iqn
class TestFixtureInitiatorName:
"""Fixture for test_16_invalid_initiator_name"""
iqn = f'{basename}:{target_name}'
@pytest.fixture(scope='class')
def create_target(self):
with initiator_portal() as config:
with configured_target(config, target_name, "FILE"):
yield
params = [
(None, True),
("iqn.1991-05.com.microsoft:fake-host", True),
("iqn.1991-05.com.microsoft:fake-/-host", False),
("iqn.1991-05.com.microsoft:fake-#-host", False),
("iqn.1991-05.com.microsoft:fake-%s-host", False),
("iqn.1991-05.com.microsoft:unicode-\u6d4b\u8bd5-ok", True), # 测试
("iqn.1991-05.com.microsoft:unicode-\u30c6\u30b9\u30c8-ok", True), # テスト
("iqn.1991-05.com.microsoft:unicode-\u180E-bad", False), # Mongolian vowel separator
("iqn.1991-05.com.microsoft:unicode-\u2009-bad", False), # Thin Space
("iqn.1991-05.com.microsoft:unicode-\uFEFF-bad", False), # Zero width no-break space
]
@pytest.mark.parametrize("initiator_name, expected", params)
def test_16_invalid_initiator_name(self, request, create_target, initiator_name, expected):
"""
Deliberately send SCST some invalid initiator names and ensure it behaves OK.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
if expected:
with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s:
_verify_inquiry(s)
else:
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s:
assert False, "Should not have been able to connect with invalid initiator name."
assert 'Unable to connect to' in str(ve), ve
def _pr_check_registered_keys(s, expected=[]):
opcodes = s.device.opcodes
data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_KEYS)
assert len(data.result['reservation_keys']) == len(expected), data.result
if len(expected):
expected_set = set(expected)
received_set = set(data.result['reservation_keys'])
assert expected_set == received_set, received_set
return data.result
def _pr_check_reservation(s, expected={'reservation_key': None, 'scope': None, 'type': None}):
opcodes = s.device.opcodes
data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_RESERVATION)
for key, value in expected.items():
actual_value = data.result.get(key)
assert value == actual_value, data.result
return data.result
def _pr_register_key(s, value):
opcodes = s.device.opcodes
s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER,
service_action_reservation_key=value)
def _pr_unregister_key(s, value):
opcodes = s.device.opcodes
s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER,
reservation_key=value,
service_action_reservation_key=0)
def _pr_reserve(s, pr_type, scope=LU_SCOPE, **kwargs):
opcodes = s.device.opcodes
s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RESERVE,
scope=scope,
pr_type=pr_type,
**kwargs)
def _pr_release(s, pr_type, scope=LU_SCOPE, **kwargs):
opcodes = s.device.opcodes
s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RELEASE,
scope=scope,
pr_type=pr_type,
**kwargs)
@contextlib.contextmanager
def _pr_registration(s, key):
_pr_register_key(s, key)
try:
yield
finally:
_pr_unregister_key(s, key)
# There is room for improvement here wrt SPC-5 5.14.11.2.3, but not urgent as
# we are hygenic wrt releasing reservations before unregistering keys
@contextlib.contextmanager
def _pr_reservation(s, pr_type, scope=LU_SCOPE, other_connections=[], **kwargs):
assert s not in other_connections, "Invalid parameter mix"
_pr_reserve(s, pr_type, scope, **kwargs)
try:
yield
finally:
_pr_release(s, pr_type, scope, **kwargs)
# Do processing as specified by SPC-5 5.14.11.2.2 Releasing
# For the time being we will ignore the NUAR bit from SPC-5 7.5.11 Control mode page
if pr_type in [PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY,
PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY,
PR_TYPE.WRITE_EXCLUSIVE_ALL_REGISTRANTS,
PR_TYPE.EXCLUSIVE_ACCESS_ALL_REGISTRANTS]:
sleep(5)
for s2 in other_connections:
expect_check_condition(s2, sense_ascq_dict[0x2A04]) # "RESERVATIONS RELEASED"
@skip_persistent_reservations
@pytest.mark.dependency(name="iscsi_basic_persistent_reservation")
def test_17_basic_persistent_reservation(request):
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
_pr_check_registered_keys(s, [])
_pr_check_reservation(s)
with _pr_registration(s, PR_KEY1):
_pr_check_registered_keys(s, [PR_KEY1])
_pr_check_reservation(s)
with _pr_reservation(s, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1):
_pr_check_registered_keys(s, [PR_KEY1])
_pr_check_reservation(s, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s, [PR_KEY1])
_pr_check_reservation(s)
_pr_check_registered_keys(s, [])
_pr_check_reservation(s)
@contextlib.contextmanager
def _pr_expect_reservation_conflict(s):
try:
yield
assert False, "Failed to get expected PERSISTENT CONFLICT"
except Exception as e:
if e.__class__.__name__ != str(CheckType.RESERVATION_CONFLICT):
raise e
def _check_target_rw_paths(s1, s2):
"""
Check that the two supplied paths can read/write data, and they point at the same LUN.
"""
zeros = bytearray(512)
deadbeef = bytearray.fromhex('deadbeef') * 128
abba = bytearray.fromhex('abbaabba') * 128
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s1.writesame16(0, 12, zeros)
# Check results using READ (16)
for s in (s1, s2):
for lba in range(0, 12):
r = s.read16(lba, 1)
assert r.datain == zeros, r.datain
# Update some blocks from each initiator using WRITE SAME
s1.writesame16(0, 6, deadbeef)
s2.writesame16(6, 6, abba)
# Check results using READ (16)
for s in (s1, s2):
for lba in range(0, 6):
r = s.read16(lba, 1)
assert r.datain == deadbeef, r.datain
for lba in range(6, 12):
r = s.read16(lba, 1)
assert r.datain == abba, r.datain
def _check_persistent_reservations(s1, s2):
#
# First just do a some basic tests (register key, reserve, release, unregister key)
#
_pr_check_registered_keys(s1, [])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [])
_pr_check_reservation(s2)
with _pr_registration(s1, PR_KEY1):
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s2)
with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s2)
with _pr_registration(s2, PR_KEY2):
_pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s2)
with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
_pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s2)
with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY2, other_connections=[s1]):
_pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s1, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY})
_pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s2, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY})
_pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
_pr_check_reservation(s2)
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s2)
_pr_check_registered_keys(s1, [])
_pr_check_reservation(s1)
_pr_check_registered_keys(s2, [])
_pr_check_reservation(s2)
#
# Now let's fail some stuff
# See:
# - SPC-5 5.14 Table 66
# - SBC-4 4.17 Table 13
#
zeros = bytearray(512)
dancing_queen = bytearray.fromhex('00abba00') * 128
deadbeef = bytearray.fromhex('deadbeef') * 128
with _pr_registration(s1, PR_KEY1):
with _pr_registration(s2, PR_KEY2):
# With registrations only, both initiators can write
s1.write16(0, 1, deadbeef)
s2.write16(1, 1, dancing_queen)
r = s1.read16(1, 1)
assert r.datain == dancing_queen, r.datain
r = s2.read16(0, 1)
assert r.datain == deadbeef, r.datain
with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
s1.writesame16(0, 2, zeros)
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
with _pr_expect_reservation_conflict(s2):
s2.write16(1, 1, dancing_queen)
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
with _pr_expect_reservation_conflict(s2):
with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY2):
pass
with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS, reservation_key=PR_KEY1, other_connections=[s2]):
with _pr_expect_reservation_conflict(s2):
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1, other_connections=[s2]):
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
# s2 no longer is registered
with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1):
with _pr_expect_reservation_conflict(s2):
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY1):
r = s2.read16(0, 2)
assert r.datain == zeros + zeros, r.datain
@skip_persistent_reservations
@skip_multi_initiator
def test_18_persistent_reservation_two_initiators(request):
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s1:
TUR(s1)
initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_name2) as s2:
TUR(s2)
_check_persistent_reservations(s1, s2)
def _get_node(timeout=None):
return call('failover.node')
def _get_ha_failover_status():
# Make sure we're talking to the master
return call('failover.status')
def _get_ha_remote_failover_status():
return call('failover.call_remote', 'failover.status')
def _get_ha_failover_in_progress():
# Make sure we're talking to the master
return call('failover.in_progress')
def _check_master():
status = _get_ha_failover_status()
assert status == 'MASTER'
def _check_ha_node_configuration():
both_nodes = ['A', 'B']
# Let's perform some sanity checking wrt controller and IP address
# First get node and calculate othernode
node = _get_node()
assert node in both_nodes
_check_master()
# Now let's get IPs and ensure that
# - Node A has truenas_server.nodea_ip
# - Node B has truenas_server.nodeb_ip
# We will need this later when we start checking TPG, etc
ips = {}
for anode in both_nodes:
ips[anode] = set()
if anode == node:
interfaces = call('interface.query')
else:
interfaces = call('failover.call_remote', 'interface.query')
for i in interfaces:
for alias in i['state']['aliases']:
if alias.get('type') == 'INET':
ips[anode].add(alias['address'])
# Ensure that truenas_server.nodea_ip and truenas_server.nodeb_ip are what we expect
assert truenas_server.nodea_ip in ips['A']
assert truenas_server.nodea_ip not in ips['B']
assert truenas_server.nodeb_ip in ips['B']
assert truenas_server.nodeb_ip not in ips['A']
def _verify_ha_report_target_port_groups(s, tpgs, active_tpg):
"""
Verify that the REPORT TARGET PORT GROUPS command returns the expected
results.
"""
x = s.reporttargetportgroups()
for tpg_desc in x.result['target_port_group_descriptors']:
tpg_id = tpg_desc['target_port_group']
ids = set([x['relative_target_port_id'] for x in tpg_desc['target_ports']])
assert ids == set(tpgs[tpg_id]), ids
# See SPC-5 6.36 REPORT TARGET PORT GROUPS
# Active/Optimized is 0
# Active/Non-optimized is 1
if tpg_id == active_tpg:
assert tpg_desc['asymmetric_access_state'] == 0, tpg_desc
else:
assert tpg_desc['asymmetric_access_state'] == 1, tpg_desc
def _get_active_target_portal_group():
_check_master()
node = _get_node()
if node == 'A':
return CONTROLLER_A_TARGET_PORT_GROUP_ID
elif node == 'B':
return CONTROLLER_B_TARGET_PORT_GROUP_ID
return None
def _wait_for_alua_settle(retries=20):
print("Checking ALUA status...")
while retries:
if call('iscsi.alua.settled'):
print("ALUA is settled")
break
retries -= 1
print("Waiting for ALUA to settle")
sleep(5)
def _ha_reboot_master(delay=900):
"""
Reboot the MASTER node and wait for both the new MASTER
and new BACKUP to become available.
"""
get_node_timeout = 20
orig_master_node = _get_node()
new_master_node = other_node(orig_master_node)
call('system.reboot', 'iSCSI test')
# First we'll loop until the node is no longer the orig_node
new_master = False
while not new_master:
try:
# There are times when we don't get a response at all (albeit
# in a bhyte HA-VM pair), so add a timeout to catch this situation.
if _get_node(timeout=get_node_timeout) == new_master_node:
new_master = True
break
except requests.exceptions.Timeout:
delay = delay - get_node_timeout
except Exception:
delay = delay - 1
if delay <= 0:
break
print("Waiting for MASTER")
sleep(1)
if not new_master:
raise RuntimeError('Did not switch to new controller.')
# OK, we're on the new master, now wait for the other controller
# to become BACKUP.
new_backup = False
while not new_backup:
try:
if _get_ha_remote_failover_status() == 'BACKUP':
new_backup = True
break
except Exception:
pass
delay = delay - 5
if delay <= 0:
break
print("Waiting for BACKUP")
sleep(5)
if not new_backup:
raise RuntimeError('Backup controller did not surface.')
# Ensure that a failover is still not in progress
in_progress = True
while in_progress:
try:
in_progress = _get_ha_failover_in_progress()
if not in_progress:
break
except Exception:
pass
delay = delay - 5
if delay <= 0:
break
print("Waiting while in progress")
sleep(5)
if in_progress:
raise RuntimeError('Failover never completed.')
# Finally check the ALUA status
_wait_for_alua_settle()
def _ensure_alua_state(state):
results = call('iscsi.global.config')
assert results['alua'] == state, results
@pytest.mark.dependency(name="iscsi_alua_config")
@pytest.mark.timeout(900)
def test_19_alua_config(request):
"""
Test various aspects of ALUA configuration.
When run against a HA system this test will perform TWO reboots to
test failover wrt iSCSI ALUA targets.
The second reboot was added to return the system to the original ACTIVE
node. This means that subsequent tests will run on the same node that
the previous tests started on, thereby simplifying log analysis.
"""
# First ensure ALUA is off
_ensure_alua_state(False)
if ha:
_check_ha_node_configuration()
# Next create a target
with initiator_portal() as config:
with configured_target_to_file_extent(config,
target_name,
pool_name,
dataset_name,
file_name
) as iscsi_config:
# Login to the target and ensure that things look reasonable.
iqn = f'{basename}:{target_name}'
api_serial_number = iscsi_config['extent']['serial']
api_naa = iscsi_config['extent']['naa']
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
verify_ha_inquiry(s, api_serial_number, api_naa)
if ha:
# Only perform this section on a HA system
with alua_enabled():
_ensure_alua_state(True)
_wait_for_alua_settle()
# We will login to the target on BOTH controllers and make sure
# we see the same target. Observe that we supply tpgs=1 as
# part of the check
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
tpgs = {
CONTROLLER_A_TARGET_PORT_GROUP_ID: [1],
CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001]
}
active_tpg = _get_active_target_portal_group()
_verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
# Ensure ALUA is off again
_ensure_alua_state(False)
# At this point we have no targets and ALUA is off
if ha:
# Now turn on ALUA again
with alua_enabled():
_ensure_alua_state(True)
# Then create a target (with ALUA already enabled)
with configured_target_to_file_extent(config,
target_name,
pool_name,
dataset_name,
file_name
) as iscsi_config:
iqn = f'{basename}:{target_name}'
api_serial_number = iscsi_config['extent']['serial']
api_naa = iscsi_config['extent']['naa']
# Login to the target and ensure that things look reasonable.
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
# Use the tpgs & active_tpg from above
_verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
_check_target_rw_paths(s1, s2)
# Let's failover
_ha_reboot_master()
expect_check_condition(s1, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
expect_check_condition(s2, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
_check_ha_node_configuration()
new_active_tpg = _get_active_target_portal_group()
assert new_active_tpg != active_tpg
verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
_verify_ha_report_target_port_groups(s1, tpgs, new_active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, new_active_tpg)
_check_target_rw_paths(s1, s2)
# Create a new target
with configured_target_to_zvol_extent(config, f'{target_name}b', zvol) as iscsi_config2:
iqn2 = f'{basename}:{target_name}b'
api_serial_number2 = iscsi_config2['extent']['serial']
api_naa2 = iscsi_config2['extent']['naa']
tpgs2 = {
CONTROLLER_A_TARGET_PORT_GROUP_ID: [1, 2],
CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001, 32002]
}
# Wait until ALUA settles, so that we know the target is available on the STANDBY node.
_wait_for_alua_settle()
# Login to the target on each controller
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as s3:
verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
initiator_name3 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:third"
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2, initiator_name=initiator_name3) as s4:
verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
_verify_ha_report_target_port_groups(s3, tpgs2, new_active_tpg)
_verify_ha_report_target_port_groups(s4, tpgs2, new_active_tpg)
_check_target_rw_paths(s3, s4)
# Reboot again (to failback to the original ACTIVE node)
_ha_reboot_master()
for s in [s1, s2, s3, s4]:
expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
# After the 2nd reboot we will switch back to using the original active_tpg
# Check the new target again
verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
_verify_ha_report_target_port_groups(s3, tpgs2, active_tpg)
_verify_ha_report_target_port_groups(s4, tpgs2, active_tpg)
_check_target_rw_paths(s3, s4)
# Check the original target
verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
_verify_ha_report_target_port_groups(s1, tpgs2, active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs2, active_tpg)
_check_target_rw_paths(s1, s2)
# Second target has been removed again
_wait_for_alua_settle()
_verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
# Ensure ALUA is off again
_ensure_alua_state(False)
@skip_persistent_reservations
@skip_multi_initiator
@skip_ha_tests
def test_20_alua_basic_persistent_reservation(request):
# Don't need to specify "iscsi_cmd_00" here
depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session")
# Turn on ALUA
with alua_enabled():
with initiator_portal() as config:
with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name):
iqn = f'{basename}:{target_name}'
# Login to the target on each controller
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
# Now we can do some basic tests
_pr_check_registered_keys(s1, [])
_pr_check_registered_keys(s2, [])
_pr_check_reservation(s1)
_pr_check_reservation(s2)
with _pr_registration(s1, PR_KEY1):
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s1)
_pr_check_reservation(s2)
with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
_pr_check_registered_keys(s1, [PR_KEY1])
_pr_check_registered_keys(s2, [PR_KEY1])
_pr_check_reservation(s1)
_pr_check_reservation(s2)
_pr_check_registered_keys(s1, [])
_pr_check_registered_keys(s2, [])
_pr_check_reservation(s1)
_pr_check_reservation(s2)
# Ensure ALUA is off again
_ensure_alua_state(False)
@skip_persistent_reservations
@skip_multi_initiator
@skip_ha_tests
def test_21_alua_persistent_reservation_two_initiators(request):
depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session")
with alua_enabled():
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
iqn = f'{basename}:{target_name}'
# Login to the target on each controller
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
TUR(s1)
initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, initiator_name=initiator_name2) as s2:
TUR(s2)
_check_persistent_reservations(s1, s2)
# Do it all again, the other way around
_check_persistent_reservations(s2, s1)
def _get_designator(s, designator_type):
x = s.inquiry(evpd=1, page_code=0x83)
for designator in x.result["designator_descriptors"]:
if designator["designator_type"] == designator_type:
del designator["piv"]
return designator
def _xcopy_test(s1, s2, adds1=None, adds2=None):
zeros = bytearray(512)
deadbeef = bytearray.fromhex("deadbeef") * 128
def validate_blocks(s, start, end, beefy_list):
for lba in range(start, end):
r = s.read16(lba, 1)
if lba in beefy_list:
assert r.datain == deadbeef, r.datain
else:
assert r.datain == zeros, r.datain
d1 = _get_designator(s1, 3)
d2 = _get_designator(s2, 3)
# First let's write zeros to the first 20 blocks using WRITE SAME (16)
s1.writesame16(0, 20, zeros)
s2.writesame16(0, 20, zeros)
# Write some deadbeef
s1.write16(1, 1, deadbeef)
s1.write16(3, 1, deadbeef)
s1.write16(4, 1, deadbeef)
# Check that the blocks were written correctly
validate_blocks(s1, 0, 20, [1, 3, 4])
validate_blocks(s2, 0, 20, [])
if adds1:
validate_blocks(adds1, 0, 20, [1, 3, 4])
if adds2:
validate_blocks(adds2, 0, 20, [])
# XCOPY
s1.extendedcopy4(
priority=1,
list_identifier=0x34,
target_descriptor_list=[
{
"descriptor_type_code": "Identification descriptor target descriptor",
"peripheral_device_type": 0x00,
"target_descriptor_parameters": d1,
"device_type_specific_parameters": {"disk_block_length": 512},
},
{
"descriptor_type_code": "Identification descriptor target descriptor",
"peripheral_device_type": 0x00,
"target_descriptor_parameters": d2,
"device_type_specific_parameters": {"disk_block_length": 512},
},
],
segment_descriptor_list=[
{
"descriptor_type_code": "Copy from block device to block device",
"dc": 1,
"source_target_descriptor_id": 0,
"destination_target_descriptor_id": 1,
"block_device_number_of_blocks": 4,
"source_block_device_logical_block_address": 1,
"destination_block_device_logical_block_address": 10,
}
],
)
validate_blocks(s1, 0, 20, [1, 3, 4])
validate_blocks(s2, 0, 20, [10, 12, 13])
if adds1:
validate_blocks(adds1, 0, 20, [1, 3, 4])
if adds2:
validate_blocks(adds2, 0, 20, [10, 12, 13])
@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"])
@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"])
def test_22_extended_copy(request, extent1, extent2):
# print(f"Extended copy {extent1} -> {extent2}")
depends(request, ["iscsi_cmd_00"], scope="session")
name1 = f"{target_name}x1"
name2 = f"{target_name}x2"
iqn1 = f'{basename}:{name1}'
iqn2 = f'{basename}:{name2}'
with initiator_portal() as config:
with configured_target(config, name1, extent1):
with configured_target(config, name2, extent2):
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
s1.testunitready()
s2.testunitready()
_xcopy_test(s1, s2)
@skip_ha_tests
@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"])
@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"])
def test_23_ha_extended_copy(request, extent1, extent2):
depends(request, ["iscsi_alua_config"], scope="session")
name1 = f"{target_name}x1"
name2 = f"{target_name}x2"
iqn1 = f'{basename}:{name1}'
iqn2 = f'{basename}:{name2}'
with alua_enabled():
with initiator_portal() as config:
with configured_target(config, name1, extent1):
with configured_target(config, name2, extent2):
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn1) as sa1:
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as sa2:
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn1) as sb1:
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2) as sb2:
sa1.testunitready()
sa2.testunitready()
sb1.testunitready()
sb2.testunitready()
_xcopy_test(sa1, sa2, sb1, sb2)
# Now re-run the test using the other controller
_xcopy_test(sb1, sb2, sa1, sa2)
def test_24_iscsi_target_disk_login(request):
"""
Tests whether a logged in iSCSI target shows up in disks.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
iqn = f'{basename}:{target_name}'
def fetch_disk_data(fetch_remote=False):
data = {}
if fetch_remote:
data['failover.get_disks_local'] = set(call('failover.call_remote', 'failover.get_disks_local'))
data['disk.get_unused'] = set([d['devname'] for d in call('failover.call_remote', 'disk.get_unused')])
else:
data['failover.get_disks_local'] = set(call('failover.get_disks_local'))
data['disk.get_unused'] = set([d['devname'] for d in call('disk.get_unused')])
return data
def check_disk_data(old, new, whenstr, internode_check=False):
# There are some items that we can't compare between 2 HA nodes
SINGLE_NODE_COMPARE_ONLY = ['disk.get_unused']
for key in old:
if internode_check and key in SINGLE_NODE_COMPARE_ONLY:
continue
assert old[key] == new[key], f"{key} does not match {whenstr}: {old[key]} {new[key]}"
if ha:
# In HA we will create an ALUA target and check the STANDBY node
data_before_l = fetch_disk_data()
data_before_r = fetch_disk_data(True)
check_disk_data(data_before_l, data_before_r, "initially", True)
with alua_enabled():
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
sleep(5)
data_after_l = fetch_disk_data()
data_after_r = fetch_disk_data(True)
check_disk_data(data_before_l, data_after_l, "after iSCSI ALUA target creation (Active)")
check_disk_data(data_before_r, data_after_r, "after iSCSI ALUA target creation (Standby)")
else:
# In non-HA we will create a target and login to it from the same TrueNAS system
# Just in case IP was supplied as a hostname use actual_ip
actual_ip = get_ip_addr(truenas_server.ip)
data_before = fetch_disk_data()
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol):
data_after = fetch_disk_data()
check_disk_data(data_before, data_after, "after iSCSI target creation")
# Discover the target (loopback)
results = SSH_TEST(f"iscsiadm -m discovery -t st -p {actual_ip}", user, password)
assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
# Make SURE we find the target at the ip we expect
found_iqn = False
for line in results['stdout'].split('\n'):
if not line.startswith(f'{actual_ip}:'):
continue
if line.split()[1] == iqn:
found_iqn = True
assert found_iqn, f'Failed to find IQN {iqn}: out: {results["output"]}'
# Login the target
results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --login", user, password)
assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
# Allow some time for the disk to surface
sleep(5)
# Then check that everything looks OK
try:
data_after = fetch_disk_data()
check_disk_data(data_before, data_after, "after iSCSI target login")
finally:
results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --logout", user, password)
assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
def test_25_resize_target_zvol(request):
"""
Verify that an iSCSI client is notified when the size of a ZVOL underlying
an iSCSI extent is modified.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_zvol_extent(config, target_name, zvol, volsize=MB_100) as config:
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
assert MB_100 == read_capacity16(s)
# Have checked using tcpdump/wireshark that a SCSI Asynchronous Event Notification
# gets sent 0x2A09: "CAPACITY DATA HAS CHANGED"
zvol_resize(zvol, MB_256)
assert MB_256 == read_capacity16(s)
# But we can do better (in terms of test) ... turn AEN off,
# which means we will get a CHECK CONDITION on the next resize
SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password)
zvol_resize(zvol, MB_512)
expect_check_condition(s, sense_ascq_dict[0x2A09]) # "CAPACITY DATA HAS CHANGED"
assert MB_512 == read_capacity16(s)
# Try to shrink the ZVOL again. Expect an error
with pytest.raises(ValidationErrors):
zvol_resize(zvol, MB_256)
assert MB_512 == read_capacity16(s)
def test_26_resize_target_file(request):
"""
Verify that an iSCSI client is notified when the size of a file-based
iSCSI extent is modified.
"""
depends(request, ["iscsi_cmd_00"], scope="session")
with initiator_portal() as config:
with configured_target_to_file_extent(config,
target_name,
pool_name,
dataset_name,
file_name,
filesize=MB_100) as config:
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
extent_id = config['extent']['id']
TUR(s)
assert MB_100 == read_capacity16(s)
file_extent_resize(extent_id, MB_256)
assert MB_256 == read_capacity16(s)
# Turn AEN off so that we will get a CHECK CONDITION on the next resize
SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password)
file_extent_resize(extent_id, MB_512)
expect_check_condition(s, sense_ascq_dict[0x2A09]) # "CAPACITY DATA HAS CHANGED"
assert MB_512 == read_capacity16(s)
# Try to shrink the file again. Expect an error
with pytest.raises(ValidationErrors):
file_extent_resize(extent_id, MB_256)
assert MB_512 == read_capacity16(s)
@skip_multi_initiator
def test_27_initiator_group(request):
depends(request, ["iscsi_cmd_00"], scope="session")
initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}"
initiator_iqn1 = f"{initiator_base}:one"
initiator_iqn2 = f"{initiator_base}:two"
initiator_iqn3 = f"{initiator_base}:three"
# First create a target without an initiator group specified
with initiator_portal() as config1:
with configured_target_to_zvol_extent(config1, target_name, zvol) as config:
iqn = f'{basename}:{target_name}'
# Ensure we can access from all initiators
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
TUR(s)
# Now set the initiator id to the empty (Allow All Initiators) one
# that we created above. Then ensure we can still read access the
# target from all initiators
set_target_initiator_id(config['target']['id'], config['initiator']['id'])
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
TUR(s)
# Now create another initiator group, which contains the first two
# initiators only and modify the target to use it
with initiator("two initiators only", [initiator_iqn1, initiator_iqn2]) as twoinit_config:
set_target_initiator_id(config['target']['id'], twoinit_config['id'])
# First two initiators can connect to the target
for initiator_iqn in [initiator_iqn1, initiator_iqn2]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
TUR(s)
# Third initiator cannot connect to the target
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn3) as s:
TUR(s)
assert 'Unable to connect to' in str(ve), ve
# Clear it again
set_target_initiator_id(config['target']['id'], None)
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
TUR(s)
def test_28_portal_access(request):
"""
Verify that an iSCSI client can access a target on the specified
portal.
For a HA ALUA target, check the constituent interfaces.
"""
iqn = f'{basename}:{target_name}'
with initiator() as initiator_config:
with portal(listen=[{'ip': get_ip_addr(truenas_server.ip)}]) as portal_config:
config1 = {'initiator': initiator_config, 'portal': portal_config}
with configured_target_to_zvol_extent(config1, target_name, zvol, volsize=MB_100):
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
assert MB_100 == read_capacity16(s)
# Now, if we are in a HA config turn on ALUA and test
# the specific IP addresses
if ha:
with alua_enabled():
_ensure_alua_state(True)
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
assert 'Unable to connect to' in str(ve), ve
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s:
TUR(s)
assert MB_100 == read_capacity16(s)
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s:
TUR(s)
assert MB_100 == read_capacity16(s)
def test_29_multiple_extents():
"""
Verify that an iSCSI client can access multiple target LUNs
when multiple extents are configured.
Also validate that an extent serial number cannot be reused, and
that supplying an empty string serial number means one gets
generated.
"""
iqn = f'{basename}:{target_name}'
with initiator_portal() as config:
portal_id = config['portal']['id']
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config:
with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config:
with target_extent_associate(target_id, extent1_config['id'], 0):
with target_extent_associate(target_id, extent2_config['id'], 1):
with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s:
TUR(s)
assert MB_100 == read_capacity16(s)
with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s:
TUR(s)
assert MB_256 == read_capacity16(s)
# Now try to create another extent using the same serial number
# We expect this to fail.
with pytest.raises(ValidationErrors) as ve:
with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512,
extent_name="extent3", serial=extent1_config['serial']):
pass
assert ve.value.errors == [
ValidationError('iscsi_extent_create.serial', 'Serial number must be unique', errno.EINVAL)
]
with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512,
extent_name="extent3", serial='') as extent3_config:
# We expect this to complete, but generate a serial number
assert len(extent3_config['serial']) == 15, extent3_config['serial']
def check_inq_enabled_state(iqn, expected):
"""Check the current enabled state of the specified SCST IQN directly from /sys
is as expected."""
results = SSH_TEST(f"cat /sys/kernel/scst_tgt/targets/iscsi/{iqn}/enabled", user, password)
assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
for line in results["output"].split('\n'):
if line.startswith('Warning: Permanently added'):
continue
if line:
actual = int(line)
assert actual == expected, f'IQN {iqn} has an unexpected enabled state - was {actual}, expected {expected}'
def test_30_target_without_active_extent(request):
"""Validate that a target will not be enabled if it does not have
and enabled associated extents"""
depends(request, ["iscsi_cmd_00"], scope="session")
name1 = f"{target_name}x1"
name2 = f"{target_name}x2"
iqn1 = f'{basename}:{name1}'
iqn2 = f'{basename}:{name2}'
with initiator_portal() as config:
with configured_target(config, name1, 'VOLUME') as target1_config:
with configured_target(config, name2, 'VOLUME') as target2_config:
# OK, we've configured two separate targets, ensure all looks good
check_inq_enabled_state(iqn1, 1)
check_inq_enabled_state(iqn2, 1)
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
TUR(s1)
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
TUR(s2)
# Disable an extent and ensure things are as expected
extent_disable(target2_config['extent']['id'])
check_inq_enabled_state(iqn1, 1)
check_inq_enabled_state(iqn2, 0)
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
TUR(s1)
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
TUR(s2)
assert 'Unable to connect to' in str(ve), ve
# Reenable the extent
extent_enable(target2_config['extent']['id'])
check_inq_enabled_state(iqn1, 1)
check_inq_enabled_state(iqn2, 1)
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
TUR(s1)
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
TUR(s2)
# Move the extent from target2 to target1
#
# Doing this by updating the existing association rather
# than deleting the old association and creating a new one,
# because want to avoid breakage wrt yield ... finally cleanup
payload = {
'target': target1_config['target']['id'],
'lunid': 1,
'extent': target2_config['extent']['id']
}
call('iscsi.targetextent.update', target2_config['associate']['id'], payload)
check_inq_enabled_state(iqn1, 1)
check_inq_enabled_state(iqn2, 0)
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
TUR(s1)
# We should now have a LUN 1
with iscsi_scsi_connection(truenas_server.ip, iqn1, 1) as s1b:
TUR(s1b)
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
TUR(s2)
assert 'Unable to connect to' in str(ve), ve
def test_31_iscsi_sessions(request):
"""Validate that we can get a list of currently running iSCSI sessions."""
depends(request, ["iscsi_cmd_00"], scope="session")
name1 = f"{target_name}x1"
name2 = f"{target_name}x2"
name3 = f"{target_name}x3"
iqn1 = f'{basename}:{name1}'
iqn2 = f'{basename}:{name2}'
iqn3 = f'{basename}:{name3}'
initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}"
initiator_iqn1 = f"{initiator_base}:one"
initiator_iqn2 = f"{initiator_base}:two"
initiator_iqn3 = f"{initiator_base}:three"
with initiator_portal() as config:
with configured_target(config, name1, 'VOLUME'):
with configured_target(config, name2, 'FILE'):
with configured_target(config, name3, 'VOLUME'):
verify_client_count(0)
with iscsi_scsi_connection(truenas_server.ip, iqn1, initiator_name=initiator_iqn1):
verify_client_count(1)
with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn2):
# Client count checks the number of different IPs attached, not sessions
verify_client_count(1)
# Validate that the two sessions are reported correctly
data = get_iscsi_sessions(check_length=2)
for sess in data:
if sess['target'] == iqn1:
assert sess['initiator'] == initiator_iqn1, data
elif sess['target'] == iqn2:
assert sess['initiator'] == initiator_iqn2, data
else:
# Unknown target!
assert False, data
# Filter by target
data = get_iscsi_sessions([['target', '=', iqn1]], 1)
assert data[0]['initiator'] == initiator_iqn1, data
data = get_iscsi_sessions([['target', '=', iqn2]], 1)
assert data[0]['initiator'] == initiator_iqn2, data
data = get_iscsi_sessions([['target', '=', iqn3]], 0)
# Filter by initiator
data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 1)
assert data[0]['target'] == iqn1, data
data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1)
assert data[0]['target'] == iqn2, data
data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0)
# Now login to target2 with initiator1
with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn1):
verify_client_count(1)
get_iscsi_sessions(check_length=3)
# Filter by target
data = get_iscsi_sessions([['target', '=', iqn1]], 1)
assert data[0]['initiator'] == initiator_iqn1, data
data = get_iscsi_sessions([['target', '=', iqn2]], 2)
assert set([sess['initiator'] for sess in data]) == {initiator_iqn1, initiator_iqn2}, data
data = get_iscsi_sessions([['target', '=', iqn3]], 0)
# Filter by initiator
data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 2)
assert set([sess['target'] for sess in data]) == {iqn1, iqn2}, data
data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1)
assert data[0]['target'] == iqn2, data
data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0)
# Logout of target, ensure sessions get updated.
verify_client_count(1)
data = get_iscsi_sessions(check_length=2)
for sess in data:
if sess['target'] == iqn1:
assert sess['initiator'] == initiator_iqn1, data
elif sess['target'] == iqn2:
assert sess['initiator'] == initiator_iqn2, data
else:
# Unknown target!
assert False, data
# Client count checks the number of different IPs attached, not sessions
verify_client_count(1)
get_iscsi_sessions(check_length=1)
verify_client_count(0)
get_iscsi_sessions(check_length=0)
def test_32_multi_lun_targets(request):
"""Validate that we can create and access multi-LUN targets."""
depends(request, ["iscsi_cmd_00"], scope="session")
name1 = f"{target_name}x1"
name2 = f"{target_name}x2"
iqn1 = f'{basename}:{name1}'
iqn2 = f'{basename}:{name2}'
def test_target_sizes(ipaddr):
with iscsi_scsi_connection(ipaddr, iqn1, 0) as s:
verify_capacity(s, MB_100)
with iscsi_scsi_connection(ipaddr, iqn1, 1) as s:
verify_capacity(s, MB_200)
with iscsi_scsi_connection(ipaddr, iqn2, 0) as s:
verify_capacity(s, MB_256)
with iscsi_scsi_connection(ipaddr, iqn2, 1) as s:
verify_capacity(s, MB_512)
with initiator_portal() as config:
with configured_target(config, name1, 'FILE', extent_size=MB_100) as config1:
with add_file_extent_target_lun(config1, 1, MB_200):
with configured_target(config, name2, 'VOLUME', extent_size=MB_256) as config1:
with add_zvol_extent_target_lun(config1, 1, volsize=MB_512):
# Check that we can connect to each LUN and that it has the expected capacity
test_target_sizes(truenas_server.ip)
if ha:
# Only perform this section on a HA system
with alua_enabled():
test_target_sizes(truenas_server.nodea_ip)
test_target_sizes(truenas_server.nodeb_ip)
def test_33_no_lun_zero():
"""
Verify that an iSCSI client can login to a target that is missing LUN 0 (and LUN 1)
and that report LUNs works as expected.
"""
iqn = f'{basename}:{target_name}'
with initiator_portal() as config:
portal_id = config['portal']['id']
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
with dataset(dataset_name):
with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config:
with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config:
with target_extent_associate(target_id, extent1_config['id'], 100):
with target_extent_associate(target_id, extent2_config['id'], 101):
# libiscsi sends a TUR to the lun on connect, so cannot properly test using it.
# Let's actually login and check that the expected LUNs surface.
assert target_login_test(get_ip_addr(truenas_server.ip), iqn, {100, 101})
# With libiscsi we can also check that the expected LUNs are there
with iscsi_scsi_connection(truenas_server.ip, iqn, 100) as s:
verify_luns(s, [100, 101])
def test_34_zvol_extent_volthreading():
"""
Ensure that volthreading is on for regular zvols and off when they are being
used an iSCSI extent.
"""
zvol_name = f"zvol_volthreading_test{digit}"
zvol = f'{pool_name}/{zvol_name}'
with zvol_dataset(zvol, MB_100, True, True):
assert get_volthreading(zvol) == 'on'
with zvol_extent(zvol, extent_name='zvolextent1'):
assert get_volthreading(zvol) == 'off'
assert get_volthreading(zvol) == 'on'
@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
def test_35_delete_extent_no_dataset(extent_type):
"""
Verify that even if a dataset that contains an extent has been deleted from
the command line, can still use the webui/API to delete the extent.
"""
dataset_name = f'iscsids_{extent_type}_{digit}'
with dataset(dataset_name) as dspath:
DESTROY_CMD = f'zfs destroy -r {dspath}'
match extent_type:
case 'FILE':
with file_extent(pool_name, dataset_name, 'testfile', extent_name='fileextent1'):
ssh(DESTROY_CMD)
case 'VOLUME':
zvol = f'{dspath}/zvol{digit}'
with zvol_dataset(zvol, MB_100, True, True):
with zvol_extent(zvol, extent_name='zvolextent1'):
ssh(DESTROY_CMD)
def test_99_teardown(request):
# Disable iSCSI service
depends(request, ["iscsi_cmd_00"])
payload = {'enable': False}
call('service.update', 'iscsitarget', payload)
# Stop iSCSI service.
call('service.stop', 'iscsitarget')
sleep(1)
# Verify stopped
service = _get_service()
assert service['state'] == "STOPPED", service
| 120,732 | Python | .py | 2,296 | 37.917247 | 149 | 0.55724 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,138 | test_localhost_ws_auth.py | truenas_middleware/tests/api2/test_localhost_ws_auth.py | from middlewared.test.integration.utils import ssh
def test__authentication_required_localhost():
cmd = 'midclt -u ws://localhost/websocket call user.query'
resp = ssh(cmd, check=False, complete_response=True)
assert not resp['result']
assert 'Not authenticated' in resp['stderr']
| 302 | Python | .py | 6 | 45.833333 | 62 | 0.749141 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,139 | test_pool_dataset_unlock_lock_immutable_flags.py | truenas_middleware/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py | import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
PASSPHRASE = '12345678'
def encryption_props():
return {
'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE},
'encryption': True,
'inherit_encryption': False
}
def test_lock_sets_immutable_flag():
with dataset('parent', encryption_props()) as parent_ds:
with dataset('parent/child', encryption_props()) as child_ds:
child_ds_mountpoint = os.path.join('/mnt', child_ds)
assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint
call('pool.dataset.lock', child_ds, job=True)
assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint
parent_mountpoint = os.path.join('/mnt', parent_ds)
assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint
call('pool.dataset.lock', parent_ds, job=True)
assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint
def test_unlock_unsets_immutable_flag():
with dataset('parent', encryption_props()) as parent_ds:
parent_mountpoint = os.path.join('/mnt', parent_ds)
with dataset('parent/child', encryption_props()) as child_ds:
child_ds_mountpoint = os.path.join('/mnt', child_ds)
call('pool.dataset.lock', parent_ds, job=True)
assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint
call('pool.dataset.unlock', parent_ds, {
'datasets': [{'name': parent_ds, 'passphrase': PASSPHRASE}, {'name': child_ds, 'passphrase': 'random'}],
'recursive': True,
}, job=True)
assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint
assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint
call('pool.dataset.unlock', child_ds, {
'datasets': [{'name': child_ds, 'passphrase': PASSPHRASE}],
}, job=True)
assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint
| 2,365 | Python | .py | 42 | 47.595238 | 120 | 0.665945 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,140 | test_pool_dataset_processes.py | truenas_middleware/tests/api2/test_pool_dataset_processes.py | import pytest
from middlewared.test.integration.utils import call
from middlewared.test.integration.assets.pool import another_pool
import os
import sys
sys.path.append(os.getcwd())
def test_empty_for_locked_root_dataset():
with another_pool({"encryption": True, "encryption_options": {"passphrase": "passphrase"}}):
call("pool.dataset.lock", "test", job=True)
assert call("pool.dataset.processes", "test") == []
| 437 | Python | .py | 10 | 40.3 | 96 | 0.742317 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,141 | test_428_smb_rpc.py | truenas_middleware/tests/api2/test_428_smb_rpc.py | #!/usr/bin/env python3
import pytest
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import GET, POST
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from protocols import MS_RPC
SMB_USER = "smbrpcuser"
SMB_PWD = "smb1234#!@"
INVALID_SHARE_NAME_CHARACTERS = {'%', '<', '>', '*', '?', '|', '/', '\\', '+', '=', ';', ':', '"', ',', '[', ']'}
@pytest.fixture(scope="module")
def setup_smb_share(request):
with dataset('rpc_test', data={'share_type': 'SMB'}) as ds:
with smb_share(os.path.join('/mnt', ds), "RPC_TEST", {"abe": True, "purpose": "NO_PRESET"}) as s:
yield {'dataset': ds, 'share': s}
@pytest.fixture(autouse=True, scope="function")
def setup_smb_user(request):
with user({
"username": SMB_USER,
"full_name": SMB_USER,
"group_create": True,
"home": "/var/empty",
"password": SMB_PWD,
}) as u:
yield u
def test_001_net_share_enum(setup_smb_user, setup_smb_share):
path = setup_smb_share['share']['path']
share_name = setup_smb_share['share']['name']
with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
shares = hdl.shares()
# IPC$ share should always be present
assert len(shares) == 2, str(shares)
assert shares[0]['netname'] == 'IPC$'
assert shares[0]['path'] == 'C:\\tmp'
assert shares[1]['netname'] == share_name
assert shares[1]['path'].replace('\\', '/')[2:] == path
def test_002_enum_users(setup_smb_user, setup_smb_share):
results = GET('/user', payload={
'query-filters': [['username', '=', SMB_USER]],
'query-options': {
'get': True,
'extra': {'additional_information': ['SMB']}
}
})
assert results.status_code == 200, results.text
user_info = results.json()
with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
entry = None
users = hdl.users()
for u in users:
if u['user'] != SMB_USER:
continue
entry = u
break
assert entry is not None, str(users)
rid = int(user_info['sid'].rsplit('-', 1)[1])
assert rid == entry['rid'], str(entry)
def test_003_access_based_share_enum(setup_smb_user, setup_smb_share):
payload = {
'share_name': "RPC_TEST",
'share_acl': [
{
'ae_who_sid': 'S-1-5-32-544',
'ae_perm': 'FULL',
'ae_type': 'ALLOWED'
}
]
}
results = POST("/sharing/smb/setacl", payload)
assert results.status_code == 200, results.text
results = GET("/sharing/smb")
assert results.status_code == 200, results.text
with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
shares = hdl.shares()
assert len(shares) == 1, str({"enum": shares, "shares": results.json()})
def test_share_name_restricutions(setup_smb_share):
first_share = setup_smb_share['share']
ds_name = setup_smb_share['dataset']
for char in INVALID_SHARE_NAME_CHARACTERS:
# First try updating existing share's name
with pytest.raises(ValidationErrors) as ve:
call('sharing.smb.update', first_share['id'], {'name': f'CANARY{char}'})
assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg
# Now try creating new share
with pytest.raises(ValidationErrors) as ve:
call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': f'CANARY{char}'})
assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg
with pytest.raises(ValidationErrors) as ve:
call('sharing.smb.update', first_share['id'], {'name': 'CANARY\x85'})
assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg
with pytest.raises(ValidationErrors) as ve:
call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': 'CANARY\x85'})
assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg
| 4,371 | Python | .py | 99 | 37.090909 | 113 | 0.621819 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,142 | test_pool_resilver.py | truenas_middleware/tests/api2/test_pool_resilver.py | from middlewared.test.integration.utils import call
def test_pool_resilver_update():
resilver = {
"enabled": False,
"begin": "18:00",
"end": "09:00",
"weekday": [1, 2, 3, 4, 5, 6, 7],
}
assert call("pool.resilver.update", resilver).items() > resilver.items()
| 306 | Python | .py | 9 | 27.777778 | 76 | 0.591837 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,143 | test_smart_test_run.py | truenas_middleware/tests/api2/test_smart_test_run.py | import contextlib
import re
import time
import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.utils import call, client, mock
@pytest.fixture(scope="function")
def short_test():
disk = call("disk.query")[0]
with mock("smart.test.disk_choices", return_value={disk["identifier"]: disk}):
with mock("disk.smartctl", return_value="Self Test has begun"):
with mock("smart.test.results", """\
i = 0
async def mock(self, *args):
global i
if i > 100:
return {"current_test": None}
else:
result = {"current_test": {"progress": i}}
i += 30
return result
"""):
result = call("smart.test.manual_test", [{"identifier": disk["identifier"], "type": "SHORT"}])
yield result[0]
def test_smart_test_job_progress(short_test):
progresses = set()
for i in range(30):
job = call("core.get_jobs", [["id", "=", short_test["job"]]], {"get": True})
if job["state"] == "RUNNING":
progresses.add(job["progress"]["percent"])
time.sleep(5)
elif job["state"] == "SUCCESS":
break
else:
assert False, job
else:
assert False
assert progresses == {0, 30, 60, 90}
def test_smart_test_event_source(short_test):
progresses = set()
def callback(event_type, **kwargs):
progresses.add(kwargs['fields']['progress'])
with client() as c:
c.subscribe(f"smart.test.progress:{short_test['disk']}", callback, sync=True)
for i in range(30):
if None in progresses:
assert progresses - {0} == {30, 60, 90, None}
break
else:
time.sleep(5)
else:
assert False
| 2,000 | Python | .py | 52 | 27.057692 | 110 | 0.527893 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,144 | test_sharing_service_encrypted_dataset_info.py | truenas_middleware/tests/api2/test_sharing_service_encrypted_dataset_info.py | import contextlib
import pytest
from middlewared.test.integration.utils import call
from middlewared.test.integration.assets.pool import dataset
PASSPHRASE = 'testing123'
ENCRYPTION_PARAMETERS = {
'encryption': True,
'encryption_options': {
'algorithm': 'AES-256-GCM',
'pbkdf2iters': 350000,
'passphrase': PASSPHRASE,
},
'inherit_encryption': False,
}
@contextlib.contextmanager
def lock_dataset(dataset_name):
try:
yield call('pool.dataset.lock', dataset_name, {'force_umount': True}, job=True)
finally:
call(
'pool.dataset.unlock', dataset_name, {
'datasets': [{'passphrase': PASSPHRASE, 'name': dataset_name}]
},
job=True,
)
@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [
('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'),
('sharing.nfs', {}, {}, 'path'),
('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'),
])
def test_service_encrypted_dataset_default_info(namespace, dataset_creation_params, share_creation_params, path_field):
with dataset('test_sharing_locked_ds_info', data={
**ENCRYPTION_PARAMETERS,
**dataset_creation_params,
}) as ds:
path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
share_creation_params[path_field] = path
share = call(f'{namespace}.create', share_creation_params)
assert share['locked'] is False
with lock_dataset(ds):
assert call(f'{namespace}.get_instance', share['id'])['locked'] is True
assert call(f'{namespace}.get_instance', share['id'])['locked'] is False
@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field,selected_fields', [
('sharing.smb', {}, {'name': 'test_smb_share'}, 'path', [['path', 'name'], ['path', 'name', 'locked']]),
('sharing.nfs', {}, {}, 'path', [['path', 'hosts'], ['path', 'hosts', 'locked']]),
(
'iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'},
{'name': 'test-extend'}, 'disk',
[['name', 'type'], ['name', 'type', 'locked']]
),
])
def test_service_encrypted_dataset_selected_info(
namespace, dataset_creation_params, share_creation_params, path_field, selected_fields,
):
with dataset('test_sharing_locked_ds_info', data={
**ENCRYPTION_PARAMETERS,
**dataset_creation_params,
}) as ds:
path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
share_creation_params[path_field] = path
assert call(f'{namespace}.create', share_creation_params)['locked'] is False
with lock_dataset(ds):
for selected_field_entry in selected_fields:
for share in call(f'{namespace}.query', [], {'select': selected_field_entry}):
assert set(share) == set(selected_field_entry)
@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [
('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'),
('sharing.nfs', {}, {}, 'path'),
('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'),
])
def test_service_encrypted_dataset_retrieve_info_with_cache(
namespace, dataset_creation_params, share_creation_params, path_field
):
with dataset('test_sharing_locked_ds_info', data={
**ENCRYPTION_PARAMETERS,
**dataset_creation_params,
}) as ds:
path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
share = call(f'{namespace}.create', {**share_creation_params, path_field: path})
assert share['locked'] is False
with lock_dataset(ds):
assert call(
f'{namespace}.get_instance', share['id'], {'extra': {'retrieve_locked_info': False}}
).get('locked') is None
cached_locked_value = call(
f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': True}}
)
locked_value = call(
f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': False}}
)
assert cached_locked_value == locked_value
| 4,446 | Python | .py | 91 | 41.417582 | 119 | 0.620825 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,145 | test_certificate_roles.py | truenas_middleware/tests/api2/test_certificate_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize('method, role, valid_role', (
('certificate.profiles', 'CERTIFICATE_READ', True),
('certificateauthority.profiles', 'CERTIFICATE_AUTHORITY_READ', True),
('certificate.profiles', 'CERTIFICATE_AUTHORITY_READ', False),
('certificateauthority.profiles', 'CERTIFICATE_READ', False),
))
def test_profiles_read_roles(unprivileged_user_fixture, method, role, valid_role):
common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=False)
@pytest.mark.parametrize('role, valid_role', (
('CERTIFICATE_AUTHORITY_WRITE', True),
('CERTIFICATE_AUTHORITY_READ', False),
))
def test_certificate_authority_create_role(unprivileged_user_fixture, role, valid_role):
common_checks(unprivileged_user_fixture, 'certificateauthority.create', role, valid_role, method_args=[{}])
@pytest.mark.parametrize('role, valid_role', (
('CERTIFICATE_WRITE', True),
('CERTIFICATE_READ', False),
))
def test_certificate_create_role(unprivileged_user_fixture, role, valid_role):
common_checks(unprivileged_user_fixture, 'certificate.create', role, valid_role, method_args=[], method_kwargs={'job': True})
@pytest.mark.parametrize('role, valid_role', (
('CERTIFICATE_AUTHORITY_WRITE', True),
('CERTIFICATE_AUTHORITY_READ', False),
))
def test_signing_csr_role(unprivileged_user_fixture, role, valid_role):
common_checks(unprivileged_user_fixture, 'certificateauthority.ca_sign_csr', role, valid_role, method_args=[{
'ca_id': 1,
'csr_cert_id': 1,
'name': 'test_csr_signing_role',
}])
| 1,674 | Python | .py | 32 | 48.40625 | 129 | 0.733619 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,146 | test_430_smb_sharesec.py | truenas_middleware/tests/api2/test_430_smb_sharesec.py | import pytest
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from middlewared.test.integration.assets.account import user as create_user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, client, ssh
from functions import SSH_TEST
from auto_config import user, password
Guests = {
"domain": "BUILTIN",
"name": "Guests",
"sidtype": "ALIAS"
}
Admins = {
"domain": "BUILTIN",
"name": "Administrators",
"sidtype": "ALIAS"
}
Users = {
"domain": "BUILTIN",
"name": "Users",
"sidtype": "ALIAS"
}
@pytest.fixture(scope="module")
def setup_smb_share():
with dataset(
"smb-sharesec",
{'share_type': 'SMB'},
) as ds:
with smb_share(f'/mnt/{ds}', "my_sharesec") as share:
yield share
@pytest.fixture(scope="module")
def sharesec_user():
with create_user({
'username': 'sharesec_user',
'full_name': 'sharesec_user',
'smb': True,
'group_create': True,
'password': 'test1234',
}) as u:
yield u
def test_initialize_share(setup_smb_share):
acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert len(acl['share_acl']) == 1
assert acl['share_acl'][0]['ae_who_sid'] == 'S-1-1-0'
assert acl['share_acl'][0]['ae_perm'] == 'FULL'
assert acl['share_acl'][0]['ae_type'] == 'ALLOWED'
def test_set_smb_acl_by_sid(setup_smb_share):
payload = {
'share_name': setup_smb_share['name'],
'share_acl': [
{
'ae_who_sid': 'S-1-5-32-545',
'ae_perm': 'FULL',
'ae_type': 'ALLOWED'
}
]
}
acl_set = call('sharing.smb.setacl', payload)
assert payload['share_name'].casefold() == acl_set['share_name'].casefold()
assert payload['share_acl'][0]['ae_who_sid'] == acl_set['share_acl'][0]['ae_who_sid']
assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm']
assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type']
assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'GROUP'
b64acl = call(
'datastore.query', 'sharing.cifs.share',
[['cifs_name', '=', setup_smb_share['name']]],
{'get': True}
)['cifs_share_acl']
assert b64acl != ""
call('smb.sharesec.synchronize_acls')
newb64acl = call(
'datastore.query', 'sharing.cifs.share',
[['cifs_name', '=', setup_smb_share['name']]],
{'get': True}
)['cifs_share_acl']
assert newb64acl == b64acl
def test_set_smb_acl_by_unix_id(setup_smb_share, sharesec_user):
payload = {
'share_name': setup_smb_share['name'],
'share_acl': [
{
'ae_who_id': {'id_type': 'USER', 'id': sharesec_user['uid']},
'ae_perm': 'CHANGE',
'ae_type': 'ALLOWED'
}
]
}
acl_set = call('sharing.smb.setacl', payload)
assert payload['share_name'].casefold() == acl_set['share_name'].casefold()
assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm']
assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type']
assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'USER'
assert acl_set['share_acl'][0]['ae_who_id']['id'] == sharesec_user['uid']
assert acl_set['share_acl'][0]['ae_who_str'] == sharesec_user['username']
def test_delete_share_info_tdb(setup_smb_share):
cmd = 'rm /var/db/system/samba4/share_info.tdb'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
cmd = 'test -f /var/db/system/samba4/share_info.tdb'
results = SSH_TEST(cmd, user, password)
assert results['result'] is False, results['output']
acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_sid'] == 'S-1-1-0'
def test_restore_sharesec_with_flush_share_info(setup_smb_share, sharesec_user):
with client() as c:
c.call('smb.sharesec.flush_share_info')
acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
def test_verify_share_info_tdb_is_created(setup_smb_share, sharesec_user):
cmd = 'test -f /var/db/system/samba4/share_info.tdb'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
# Get the initial ACL information
acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
share = call('sharing.smb.query', [['id', '=', setup_smb_share['id']]], {'get': True})
assert share['name'] == setup_smb_share['name']
share = call('sharing.smb.update', setup_smb_share['id'], {'name': 'my_sharesec2'})
assert share['name'] == 'my_sharesec2'
acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
setup_smb_share['name'] = 'my_sharesec2'
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'], ssh('tdbdump /var/db/system/samba4/share_info.tdb')
def test_toggle_share_and_verify_acl_preserved(setup_smb_share, sharesec_user):
call('sharing.smb.update', setup_smb_share['id'], {"enabled": False})
call('sharing.smb.update', setup_smb_share['id'], {"enabled": True})
acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
# Abusive test, bypass normal APIs for share and
# verify that sync_registry call still preserves info.
call('datastore.update', 'sharing.cifs.share', setup_smb_share['id'], {'cifs_enabled': False})
call('sharing.smb.sync_registry', job=True)
call('datastore.update', 'sharing.cifs.share', setup_smb_share['id'], {'cifs_enabled': True})
call('sharing.smb.sync_registry', job=True)
acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
| 6,793 | Python | .py | 146 | 40.636986 | 127 | 0.624432 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,147 | test_account_privilege.py | truenas_middleware/tests/api2/test_account_privilege.py | import errno
import os
import sys
import types
import pytest
from middlewared.service_exception import CallError, ValidationErrors
from middlewared.test.integration.assets.account import group, privilege, root_with_password_disabled
from middlewared.test.integration.utils import call, mock
from middlewared.test.integration.utils.audit import expect_audit_method_calls
sys.path.append(os.getcwd())
from functions import DELETE, POST, PUT
def test_change_local_administrator_groups_to_invalid():
operator = call("group.query", [["group", "=", "operator"]], {"get": True})
with pytest.raises(ValidationErrors) as ve:
call("privilege.update", 1, {"local_groups": [operator["id"]]})
assert ve.value.errors[0].attribute == "privilege_update.local_groups"
def test_change_local_administrator_allowlist():
with pytest.raises(ValidationErrors) as ve:
call("privilege.update", 1, {"allowlist": [{"method": "CALL", "resource": "system.info"}]})
assert ve.value.errors[0].attribute == "privilege_update.allowlist"
def test_change_local_administrator_roles():
with pytest.raises(ValidationErrors) as ve:
call("privilege.update", 1, {"roles": ['READONLY_ADMIN']})
assert ve.value.errors[0].attribute == "privilege_update.roles"
def test_delete_local_administrator():
with pytest.raises(CallError) as ve:
call("privilege.delete", 1)
assert ve.value.errno == errno.EPERM
def test_invalid_local_group():
with pytest.raises(ValidationErrors) as ve:
call("privilege.create", {
"name": "Test",
"local_groups": [1024], # invalid local group ID
"ds_groups": [],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
})
assert ve.value.errors[0].attribute == "privilege_create.local_groups.0"
def test_delete_local_administrator_group():
with group({
"name": "test_local_admins",
}) as g:
local_groups = [lg["gid"] for lg in call("privilege.get_instance", 1)["local_groups"]]
call("privilege.update", 1, {"local_groups": local_groups + [g["gid"]]})
with pytest.raises(CallError) as ve:
call("group.delete", g["id"])
assert ve.value.errmsg.startswith("This group is used by privilege")
call("privilege.update", 1, {"local_groups": local_groups})
@pytest.fixture(scope="module")
def privilege_with_orphan_local_group():
with group({
"name": "test_orphan",
"smb": False,
}) as g:
gid = g["gid"]
privilege = call("privilege.create", {
"name": "Test orphan",
"local_groups": [gid],
"ds_groups": [],
"allowlist": [{"method": "CALL", "resource": "system.info"}],
"web_shell": False,
})
call("datastore.delete", "account.bsdgroups", g["id"])
call("etc.generate", "user")
call("idmap.gencache.flush")
yield types.SimpleNamespace(gid=gid, privilege=privilege)
call("privilege.delete", privilege["id"])
def test_create_group_with_orphan_privilege_gid(privilege_with_orphan_local_group):
with pytest.raises(ValidationErrors) as ve:
with group({
"name": "test_orphan_duplicate",
"gid": privilege_with_orphan_local_group.gid,
}):
pass
assert ve.value.errors[0].attribute == "group_create.gid"
assert ve.value.errors[0].errmsg.startswith("A privilege 'Test orphan' already uses this group ID.")
def test_group_next_gid():
next_gid = call("group.get_next_gid")
with mock("privilege.used_local_gids", f"""
async def mock(self):
result = await self.used_local_gids()
result[{next_gid}] = None
return result
"""):
assert call("group.get_next_gid") == next_gid + 1
def test_remove_only_local_administrator_password_enabled_user():
root = call("user.query", [["username", "=", "root"]], {"get": True})
with pytest.raises(ValidationErrors) as ve:
call("user.update", root["id"], {"password_disabled": True})
assert ve.value.errors[0].attribute == "user_update.password_disabled"
assert ve.value.errors[0].errmsg == (
"After disabling password for this user no password-enabled local user will have built-in privilege "
"'Local Administrator'."
)
def test_password_disabled_root_is_a_local_administrator():
with root_with_password_disabled():
local_administrators = call("privilege.local_administrators")
assert len(local_administrators) == 1
assert local_administrators[0]["username"] == "root"
@pytest.mark.parametrize("api", ["ws", "rest"])
def test_create_privilege_audit(api):
privilege = None
try:
with expect_audit_method_calls([{
"method": "privilege.create",
"params": [
{
"name": "Test",
"web_shell": False,
}
],
"description": "Create privilege Test",
}]):
payload = {
"name": "Test",
"web_shell": False,
}
if api == "ws":
privilege = call("privilege.create", payload)
elif api == "rest":
result = POST(f"/privilege/", payload)
assert result.status_code == 200, result.text
privilege = result.json()
else:
raise ValueError(api)
finally:
if privilege is not None:
call("privilege.delete", privilege["id"])
@pytest.mark.parametrize("api", ["ws", "rest"])
def test_update_privilege_audit(api):
with privilege({
"name": "Test",
"web_shell": False,
}) as p:
with expect_audit_method_calls([{
"method": "privilege.update",
"params": [p["id"], {}],
"description": "Update privilege Test",
}]):
if api == "ws":
call("privilege.update", p["id"], {})
elif api == "rest":
result = PUT(f"/privilege/id/{p['id']}", {})
assert result.status_code == 200, result.text
else:
raise ValueError(api)
@pytest.mark.parametrize("api", ["ws", "rest"])
def test_delete_privilege_audit(api):
with privilege({
"name": "Test",
"web_shell": False,
}) as p:
with expect_audit_method_calls([{
"method": "privilege.delete",
"params": [p["id"]],
"description": "Delete privilege Test",
}]):
if api == "ws":
call("privilege.delete", p["id"])
elif api == "rest":
result = DELETE(f"/privilege/id/{p['id']}")
assert result.status_code == 200, result.text
else:
raise ValueError(api)
| 6,939 | Python | .py | 164 | 33.402439 | 109 | 0.596731 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,148 | test_user_truenas_admin.py | truenas_middleware/tests/api2/test_user_truenas_admin.py | import io
import os
import subprocess
import tarfile
import tempfile
import pytest
import requests
from middlewared.test.integration.assets.account import root_with_password_disabled
from middlewared.test.integration.assets.keychain import ssh_keypair
from middlewared.test.integration.utils import call, client, host, mock, url
@pytest.fixture(scope="module")
def truenas_admin():
assert call("user.query", [["uid", "=", 950]]) == []
assert call("user.query", [["username", "=", "truenas_admin"]]) == []
with root_with_password_disabled() as context:
context.client.call("datastore.update", "account.bsdusers", context.root_id, {"bsdusr_unixhash": "*"})
context.client.call("user.setup_local_administrator", "truenas_admin", "password")
call("system.info", client_kwargs=dict(auth=("truenas_admin", "password")))
# Quickly restore root password before anyone notices
context.client.call("datastore.update", "account.bsdusers", context.root_id, context.root_backup)
context.client.call("etc.generate", "user")
truenas_admin = call("user.query", [["username", "=", "truenas_admin"]], {"get": True})
try:
yield truenas_admin
finally:
call("datastore.delete", "account.bsdusers", truenas_admin["id"])
call("etc.generate", "user")
def test_installer_admin_has_local_administrator_privilege(truenas_admin):
with client(auth=("truenas_admin", "password")) as c:
c.call("system.info")
def test_can_set_admin_authorized_key(truenas_admin):
with ssh_keypair() as keypair:
call("user.update", truenas_admin["id"], {
"sshpubkey": keypair["attributes"]["public_key"],
})
try:
with tempfile.NamedTemporaryFile("w") as f:
os.chmod(f.name, 0o600)
f.write(keypair["attributes"]["private_key"])
f.flush()
subprocess.run([
"ssh",
"-i", f.name,
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "VerifyHostKeyDNS=no",
f"truenas_admin@{host().ip}",
"uptime",
], capture_output=True, check=True, timeout=30)
job_id, path = call("core.download", "config.save", [{"root_authorized_keys": True}], "config.tar")
r = requests.get(f"{url()}{path}")
r.raise_for_status()
tar_io = io.BytesIO(r.content)
with tarfile.TarFile(fileobj=tar_io) as tar:
member = tar.getmember("truenas_admin_authorized_keys")
assert member.uid == 950
assert member.gid == 950
assert member.uname == "truenas_admin"
assert member.gname == "truenas_admin"
assert tar.extractfile(member).read().decode() == keypair["attributes"]["public_key"]
finally:
call("user.update", truenas_admin["id"], {
"sshpubkey": "",
})
def test_admin_user_alert(truenas_admin):
with mock("user.get_user_obj", args=[{"uid": 950}], return_value={
"pw_name": "root", "pw_uid": 0, "pw_gid": 0, "pw_gecos": "root", "pw_dir": "/root", "pw_shell": "/usr/bin/zsh"
}):
alerts = call("alert.run_source", "AdminUser")
assert len(alerts) == 1
assert alerts[0]["klass"] == "AdminUserIsOverridden"
def test_admin_user_no_alert(truenas_admin):
assert not call("alert.run_source", "AdminUser")
| 3,643 | Python | .py | 73 | 39.150685 | 118 | 0.589477 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,149 | test_system_general.py | truenas_middleware/tests/api2/test_system_general.py | from middlewared.test.integration.utils import call
TIMEZONE = "America/New_York"
def test_check_system_set_time():
"""
This test intentionally slews our clock to be off
by 300 seconds and then verifies that it got set
"""
results = call("system.info")
# Convert to seconds
datetime = int(results["datetime"].timestamp())
# hop 300 seconds into the past
target = datetime - 300
call("system.set_time", int(target))
results = call("system.info")
datetime2 = int(results["datetime"].timestamp())
# This is a fudge-factor because NTP will start working
# pretty quickly to correct the slew.
assert abs(target - datetime2) < 60
def test_setting_timezone():
assert TIMEZONE in call("system.general.timezone_choices")
call("system.general.update", {"timezone": TIMEZONE})
assert call("system.general.config")["timezone"] == TIMEZONE
| 907 | Python | .py | 22 | 36.590909 | 64 | 0.708096 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,150 | test_port_delegates.py | truenas_middleware/tests/api2/test_port_delegates.py | #!/usr/bin/env python3
import os
import pytest
import sys
apifolder = os.getcwd()
sys.path.append(apifolder)
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.utils import call
PAYLOAD = (
('ftp.config', 'ftp.update', ['port'], {}),
)
@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD)
def test_port_delegate_validation_with_invalid_ports(config_method, method, keys, payload):
in_use_ports = []
namespace = config_method.rsplit('.', 1)[0]
for entry in call('port.get_in_use'):
in_use_ports.extend(filter(lambda i: i[1] > 1024 and entry['namespace'] != namespace, entry['ports']))
assert in_use_ports != [], 'No in use ports retrieved'
for index, key in enumerate(keys):
payload[key] = in_use_ports[index][1] if len(in_use_ports) > index else in_use_ports[0]
with pytest.raises(ValidationErrors) as ve:
call(method, payload)
assert any('The port is being used by' in error.errmsg for error in ve.value.errors) is True, ve
@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD)
def test_port_delegate_validation_with_valid_ports(config_method, method, keys, payload):
in_use_ports = []
for entry in call('port.get_in_use'):
in_use_ports.extend(entry['ports'])
assert in_use_ports != [], 'No in use ports retrieved'
validation_error = None
old_config = call(config_method)
to_restore_config = {}
used_ports = []
for key in keys:
port = next(i for i in range(20000, 60000) if i not in in_use_ports and i not in used_ports)
payload[key] = port
used_ports.append(port)
to_restore_config[key] = old_config[key]
try:
call(method, payload)
except ValidationErrors as ve:
validation_error = ve
else:
call(method, to_restore_config)
assert validation_error is None, f'No validation exception expected: {validation_error}'
| 1,976 | Python | .py | 45 | 38.8 | 110 | 0.694357 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,151 | test_snapshot_count_alert.py | truenas_middleware/tests/api2/test_snapshot_count_alert.py | import pytest
from pytest_dependency import depends
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.utils import call, mock
from time import sleep
DATASET_NAME = "snapshot_count"
NUM_SNAPSHOTS = 10
def test_snapshot_total_count_alert(request):
with dataset(DATASET_NAME) as ds:
base = call("zfs.snapshot.query", [], {"count": True})
with mock("pool.snapshottask.max_total_count", return_value=base + NUM_SNAPSHOTS):
for i in range(NUM_SNAPSHOTS):
call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
assert call("alert.run_source", "SnapshotCount") == []
# snapshots_changed ZFS dataset property has 1 second resolution
sleep(1)
call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{NUM_SNAPSHOTS}"})
alert = call("alert.run_source", "SnapshotCount")[0]
assert alert["text"] % alert["args"] == (
f"Your system has more snapshots ({base + NUM_SNAPSHOTS + 1}) than recommended ({base + NUM_SNAPSHOTS}"
"). Performance or functionality might degrade."
)
def test_snapshot_count_alert(request):
with (
dataset(DATASET_NAME) as ds,
smb_share(f"/mnt/{ds}", DATASET_NAME),
mock("pool.snapshottask.max_count", return_value=NUM_SNAPSHOTS)
):
for i in range(NUM_SNAPSHOTS):
call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
assert call("alert.run_source", "SnapshotCount") == []
# snapshots_changed ZFS dataset property has 1 second resolution
sleep(1)
call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{NUM_SNAPSHOTS}"})
alert = call("alert.run_source", "SnapshotCount")[0]
assert alert["text"] % alert["args"] == (
f"SMB share {ds} has more snapshots ({NUM_SNAPSHOTS + 1}) than recommended ({NUM_SNAPSHOTS}). File "
"Explorer may not display all snapshots in the Previous Versions tab."
)
| 2,186 | Python | .py | 40 | 44.85 | 119 | 0.626054 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,152 | test_audit_alerts.py | truenas_middleware/tests/api2/test_audit_alerts.py | import pytest
from middlewared.test.integration.utils import call, ssh, mock
from time import sleep
@pytest.fixture(scope='function')
def setup_state(request):
"""
Parametrize the test setup
The hope was that both 'backend' and 'setup' one-shot tests would be similar, however
the 'setup' test ended up requiring 'with mock'
"""
path = '/audit'
alert_key = request.param[0]
if alert_key is not None:
path += f"/{alert_key}.db"
alert_class = request.param[1]
restore_data = None
try:
# Remove any pre-existing alert cruft
call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key})
alerts = call("alert.list")
class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
assert len(class_alerts) == 0, class_alerts
match alert_class:
case 'AuditBackendSetup':
# A file in the dataset: set it immutable
ssh(f'chattr +i {path}')
lsattr = ssh(f'lsattr {path}')
assert lsattr[4] == 'i', lsattr
restore_data = path
case 'AuditDatasetCleanup':
# Directly tweak the zfs settings
call(
"zfs.dataset.update",
"boot-pool/ROOT/24.10.0-MASTER-20240709-021413/audit",
{"properties": {"org.freenas:refquota_warning": {"parsed": "70"}}}
)
case _:
pass
yield request.param
finally:
match alert_class:
case 'AuditBackendSetup':
# Remove immutable flag from file
assert restore_data != ""
ssh(f'chattr -i {restore_data}')
lsattr = ssh(f'lsattr {restore_data}')
assert lsattr[4] == '-', lsattr
# Restore backend file descriptors and dismiss alerts
call('auditbackend.setup')
case 'AuditSetup':
# Dismiss alerts
call('audit.setup')
case _:
pass
# call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key})
sleep(1)
alerts = call("alert.list")
class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
assert len(class_alerts) == 0, class_alerts
@pytest.mark.parametrize(
'setup_state', [
['SMB', 'AuditBackendSetup', 'auditbackend.setup'],
],
indirect=True
)
def test_audit_backend_alert(setup_state):
db_path, alert_class, audit_method = setup_state
call(audit_method)
sleep(1)
alerts = call("alert.list")
class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
assert len(class_alerts) > 0, class_alerts
assert class_alerts[0]['klass'] == 'AuditBackendSetup', class_alerts
assert class_alerts[0]['args']['service'] == db_path, class_alerts
assert class_alerts[0]['formatted'].startswith("Audit service failed backend setup"), class_alerts
@pytest.mark.parametrize(
'setup_state', [
[None, 'AuditSetup', 'audit.setup']
],
indirect=True
)
def test_audit_setup_alert(setup_state):
with mock("audit.update_audit_dataset", """
from middlewared.service import private
@private
async def mock(self, new):
raise Exception()
"""):
unused, alert_class, audit_method = setup_state
call(audit_method)
sleep(1)
alerts = call("alert.list")
class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
assert len(class_alerts) > 0, class_alerts
assert class_alerts[0]['klass'] == 'AuditSetup', class_alerts
assert class_alerts[0]['formatted'].startswith("Audit service failed to complete setup"), class_alerts
def test_audit_health_monitor_alert():
with mock("auditbackend.query", """
from middlewared.service import private
from middlewared.schema import accepts, List, Dict, Str
@private
@accepts(
Str('db_name', required=True),
List('query-filters'),
Dict('query-options', additional_attrs=True)
)
async def mock(self, db_name, filters, options):
raise CallError('TEST_SERVICE: connection to audit database is not initialized.')
"""):
alert = call("alert.run_source", "AuditServiceHealth")[0]
assert alert['source'] == 'AuditServiceHealth', f"Received source: {alert['source']}"
assert alert['text'].startswith("Failed to perform audit query"), f"Received text: {alert['text']}"
assert "connection to audit database is not initialized" in alert['args']['verrs'], f"Received args: {alert['args']}"
| 4,845 | Python | .py | 113 | 33.681416 | 125 | 0.607793 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,153 | test_015_services.py | truenas_middleware/tests/api2/test_015_services.py | import time
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
import pytest
from middlewared.service_exception import CallError
from middlewared.test.integration.utils import call, ssh
def test_001_oom_check():
pid = call('core.get_pid')
assert call('core.get_oom_score_adj', pid) == -1000
@pytest.mark.flaky(reruns=5, reruns_delay=5) # Sometimes systemd unit state is erroneously reported as active
def test_non_silent_service_start_failure():
"""
This test for 2 conditions:
1. middleware raises CallError that isn't empty
2. each time a CallError is raised, the message
has a timestamp and that timestamp changes
with each failure
"""
with pytest.raises(CallError) as e:
call('service.start', 'ups', {'silent': False})
# Error looks like
"""
middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:14 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller...
Jan 10 08:49:14 nut-monitor[3032658]: fopen /run/nut/upsmon.pid: No such file or directory
Jan 10 08:49:14 nut-monitor[3032658]: Unable to use old-style MONITOR line without a username
Jan 10 08:49:14 nut-monitor[3032658]: Convert it and add a username to upsd.users - see the documentation
Jan 10 08:49:14 nut-monitor[3032658]: Fatal error: unusable configuration
Jan 10 08:49:14 nut-monitor[3032658]: Network UPS Tools upsmon 2.7.4
Jan 10 08:49:14 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE
Jan 10 08:49:14 systemd[1]: nut-monitor.service: Failed with result 'exit-code'.
Jan 10 08:49:14 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller.
"""
lines1 = e.value.errmsg.splitlines()
first_ts, len_lines1 = ' '.join(lines1.pop(0).split()[:3]), len(lines1)
assert any('nut-monitor[' in line for line in lines1), lines1
assert any('systemd[' in line for line in lines1), lines1
# make sure we don't trigger system StartLimitBurst threshold
# by removing this service from failed unit list (if it's there)
ssh('systemctl reset-failed nut-monitor')
# we have to sleep 1 second here or the timestamp will be the
# same as when we first tried to start the service which is
# what we're testing to make sure the message is up to date
# with reality
time.sleep(1)
with pytest.raises(CallError) as e:
call('service.start', 'ups', {'silent': False})
# Error looks like: (Notice timestamp change, which is what we verify
"""
middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:15 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller...
Jan 10 08:49:15 nut-monitor[3032739]: fopen /run/nut/upsmon.pid: No such file or directory
Jan 10 08:49:15 nut-monitor[3032739]: Unable to use old-style MONITOR line without a username
Jan 10 08:49:15 nut-monitor[3032739]: Convert it and add a username to upsd.users - see the documentation
Jan 10 08:49:15 nut-monitor[3032739]: Fatal error: unusable configuration
Jan 10 08:49:15 nut-monitor[3032739]: Network UPS Tools upsmon 2.7.4
Jan 10 08:49:15 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE
Jan 10 08:49:15 systemd[1]: nut-monitor.service: Failed with result 'exit-code'.
Jan 10 08:49:15 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller.
"""
lines2 = e.value.errmsg.splitlines()
second_ts, len_lines2 = ' '.join(lines2.pop(0).split()[:3]), len(lines2)
assert any('nut-monitor[' in line for line in lines2), lines2
assert any('systemd[' in line for line in lines2), lines2
# timestamp should change since we sleep(1)
assert first_ts != second_ts
# the error messages will differ slightly (different PID for upsmon) but the number
# of lines should be the same
assert len_lines1 == len_lines2
# Stop the service to avoid syslog spam
call('service.stop', 'ups')
| 4,110 | Python | .py | 71 | 52.887324 | 158 | 0.720636 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,154 | test_rate_limit.py | truenas_middleware/tests/api2/test_rate_limit.py | import errno
import pytest
from middlewared.test.integration.utils import call, client
NOAUTH_METHOD = 'system.boot_id'
SEP = '_##_'
def test_unauth_requests_are_rate_limited():
"""Test that the truenas server rate limits a caller
that is hammering an endpoint that requires no authentication."""
with client(auth=None) as c:
for i in range(1, 22):
if i <= 20:
# default is 20 calls within 60 second timeframe
assert c.call(NOAUTH_METHOD)
else:
with pytest.raises(Exception) as ve:
# on 21st call within 60 seconds, rate limit kicks in
c.call(NOAUTH_METHOD)
assert ve.value.errno == errno.EBUSY
"""Test that middleware's rate limit plugin for interacting
with the global cache behaves as intended."""
cache = call('rate.limit.cache_get')
# the mechanism by which the rate limit chooses a unique key
# for inserting into the dictionary is by using the api endpoint
# name as part of the string
assert any((NOAUTH_METHOD in i for i in cache)), cache
# now let's pop the last entry of the cache
len_cache_before_pop = len(cache)
popped_method, popped_ip = list(cache)[-1].split(SEP)
call('rate.limit.cache_pop', popped_method, popped_ip)
new_cache = call('rate.limit.cache_get')
assert len(new_cache) != len_cache_before_pop, new_cache
# finally, let's clear the cache
call('rate.limit.cache_clear')
new_new_cache = call('rate.limit.cache_get')
assert len(new_new_cache) == 0, new_new_cache
@pytest.mark.parametrize('method_name', [NOAUTH_METHOD, 'system.host_id'])
def test_authorized_requests_are_not_rate_limited(method_name):
"""Test that the truenas server does NOT rate limit a caller
that hammers an endpoint when said caller has been authenticated"""
for i in range(1, 22):
assert call(method_name)
| 1,950 | Python | .py | 41 | 40.560976 | 74 | 0.675092 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,155 | test_rest_api_upload.py | truenas_middleware/tests/api2/test_rest_api_upload.py | import io
import json
import pytest
from middlewared.test.integration.utils import client, session, url
@pytest.mark.parametrize("method", ["test_input_pipe", "test_input_unchecked_pipe"])
def test_upload(method):
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/{method}",
files={
"data": (None, io.StringIO('{"key": "value"}')),
"file": (None, io.StringIO("FILE")),
},
)
r.raise_for_status()
job_id = r.json()
with client() as c:
assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE'
def test_no_upload_to_checked_pipe():
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/test_input_pipe",
headers={"Content-type": "application/json"},
data="{\"key\": \"value\"}",
)
assert r.status_code == 400
assert r.json()["message"] == "This method accepts only multipart requests."
def test_no_upload_to_unchecked_pipe():
with session() as s:
r = s.post(
f"{url()}/api/v2.0/resttest/test_input_unchecked_pipe",
headers={"Content-type": "application/json"},
data='{"key": "value"}',
)
r.raise_for_status()
job_id = r.json()
with client() as c:
assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}NONE'
def test_upload_to_upload_endpoint():
with session() as s:
r = s.post(
f"{url()}/_upload",
files={
"data": (None, io.StringIO(json.dumps({
"method": "resttest.test_input_pipe",
"params": [{"key": "value"}]
}))),
"file": (None, io.StringIO("FILE")),
},
)
r.raise_for_status()
job_id = r.json()["job_id"]
with client() as c:
assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE'
| 2,010 | Python | .py | 54 | 27.740741 | 84 | 0.520597 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,156 | test_iscsi_initiator_crud_roles.py | truenas_middleware/tests/api2/test_iscsi_initiator_crud_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"])
def test_read_role_can_read(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.initiator.query", role, True, valid_role_exception=False)
@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"])
def test_read_role_cant_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, False)
common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, False)
common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, False)
@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_INITIATOR_WRITE"])
def test_write_role_can_write(unprivileged_user_fixture, role):
common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, True)
common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, True)
common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, True)
| 1,208 | Python | .py | 15 | 77.2 | 109 | 0.777403 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,157 | test_authenticator_assurance_level.py | truenas_middleware/tests/api2/test_authenticator_assurance_level.py | import errno
import pytest
from contextlib import contextmanager
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.two_factor_auth import enabled_twofactor_auth, get_user_secret, get_2fa_totp_token
from middlewared.test.integration.assets.api_key import api_key
from middlewared.test.integration.utils import client, call
@contextmanager
def authenticator_assurance_level(level):
""" temporarily increase level """
with client() as c:
c.call('auth.set_authenticator_assurance_level', level)
try:
yield
finally:
c.call('auth.set_authenticator_assurance_level', 'LEVEL_1')
@pytest.fixture(scope='function')
def sharing_admin_user(unprivileged_user_fixture):
privilege = call('privilege.query', [['local_groups.0.group', '=', unprivileged_user_fixture.group_name]])
assert len(privilege) > 0, 'Privilege not found'
call('privilege.update', privilege[0]['id'], {'roles': ['SHARING_ADMIN']})
try:
yield unprivileged_user_fixture
finally:
call('privilege.update', privilege[0]['id'], {'roles': []})
@pytest.mark.parametrize('level,expected', [
('LEVEL_1', ['API_KEY_PLAIN', 'TOKEN_PLAIN', 'PASSWORD_PLAIN']),
('LEVEL_2', ['PASSWORD_PLAIN']),
])
def test_mechanism_choices(level, expected):
with authenticator_assurance_level(level):
assert call('auth.mechanism_choices') == expected
def test_level2_api_key_plain():
""" API_KEY_PLAIN lacks replay resistance
and so authentication attempts must fail with EOPNOTSUPP
"""
with authenticator_assurance_level('LEVEL_2'):
with api_key() as key:
with client(auth=None) as c:
with pytest.raises(CallError) as ce:
c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': 'root',
'api_key': key
})
assert ce.value.errno == errno.EOPNOTSUPP
def test_level2_password_plain_no_twofactor():
""" PASSWORD_PLAIN lacks replay resistance
and so authentication attempts must fail with EOPNOTSUPP
"""
with authenticator_assurance_level('LEVEL_2'):
with pytest.raises(CallError) as ce:
with client():
pass
assert ce.value.errno == errno.EOPNOTSUPP
def test_level2_password_with_otp(sharing_admin_user):
""" PASSWORD_PLAIN with 2FA is sufficient to authenticate """
user_obj_id = call('user.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
with enabled_twofactor_auth():
call('user.renew_2fa_secret', sharing_admin_user.username, {'interval': 60})
secret = get_user_secret(user_obj_id)
with authenticator_assurance_level('LEVEL_2'):
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'PASSWORD_PLAIN',
'username': sharing_admin_user.username,
'password': sharing_admin_user.password
})
assert resp['response_type'] == 'OTP_REQUIRED'
assert resp['username'] == sharing_admin_user.username
resp = c.call('auth.login_ex', {
'mechanism': 'OTP_TOKEN',
'otp_token': get_2fa_totp_token(secret)
})
assert resp['response_type'] == 'SUCCESS'
| 3,494 | Python | .py | 75 | 37.16 | 123 | 0.626949 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,158 | test_pool_replace_disk.py | truenas_middleware/tests/api2/test_pool_replace_disk.py | from time import sleep
import pytest
from middlewared.test.integration.assets.pool import _2_disk_mirror_topology, _4_disk_raidz2_topology, another_pool
from middlewared.test.integration.utils import call
@pytest.mark.parametrize("topology", [_2_disk_mirror_topology, _4_disk_raidz2_topology])
def test_pool_replace_disk(topology):
"""This tests the following:
1. create a zpool based on the `topology`
2. flatten the newly created zpools topology
3. verify the zpool vdev size matches reality
4. choose 1st vdev from newly created zpool
5. choose 1st disk in vdev from step #4
6. choose 1st disk in disk.get_unused as replacement disk
7. call pool.replace using disk from step #5 with disk from step #6
8. validate that the disk being replaced still has zfs partitions
9. validate pool.get_instance topology info shows the replacement disk
10. validate disk.get_instance associates the replacement disk with the zpool
"""
with another_pool(topology=topology) as pool: # step 1
# step 2
flat_top = call("pool.flatten_topology", pool["topology"])
pool_top = [vdev for vdev in flat_top if vdev["type"] == "DISK"]
# step 3
assert len(pool_top) == topology[0]
# step 4
to_replace_vdev = pool_top[0]
# step 5
to_replace_disk = call(
"disk.query", [["devname", "=", to_replace_vdev["disk"]]], {"get": True, "extra": {"pools": True}}
)
assert to_replace_disk["pool"] == pool["name"]
# step 6
new_disk = call("disk.get_unused")[0]
# step 7
call("pool.replace", pool["id"], {"label": to_replace_vdev["guid"], "disk": new_disk["identifier"]}, job=True)
# step 8
assert call("disk.gptid_from_part_type", to_replace_disk["devname"], call("disk.get_zfs_part_type"))
# step 9
found = False
for _ in range(10):
if not found:
for i in call("pool.flatten_topology", call("pool.get_instance", pool["id"])["topology"]):
if i["type"] == "DISK" and i["disk"] == new_disk["devname"]:
found = True
break
else:
sleep(1)
assert found, f'Failed to detect replacement disk {new_disk["devname"]!r} in zpool {pool["name"]!r}'
# step 10 (NOTE: disk.sync_all takes awhile so we retry a few times here)
for _ in range(30):
cmd = ("disk.get_instance", new_disk["identifier"], {"extra": {"pools": True}})
if call(*cmd)["pool"] == pool["name"]:
break
else:
sleep(1)
else:
assert False, f"{' '.join(cmd)} failed to update with pool information"
| 2,829 | Python | .py | 57 | 39.526316 | 118 | 0.595799 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,159 | test_job_errno.py | truenas_middleware/tests/api2/test_job_errno.py | import pytest
from middlewared.test.integration.utils import call, mock
from truenas_api_client import ClientException
def test_job_errno():
with mock("test.test1", """
from middlewared.service import job
from middlewared.schema import returns, Password
from middlewared.service_exception import CallError
@job()
@returns(Password("my_password"))
def mock(self, job, *args):
raise CallError("canary", 13)
"""):
job_id = call("test.test1")
with pytest.raises(ClientException):
call("core.job_wait", job_id, job=True)
result = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
assert "errno" in result["exc_info"]
assert result["exc_info"]["errno"] == 13
| 791 | Python | .py | 19 | 33.894737 | 76 | 0.633508 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,160 | test_cloud_sync_custom_s3.py | truenas_middleware/tests/api2/test_cloud_sync_custom_s3.py | import time
import pytest
from middlewared.test.integration.assets.cloud_sync import credential, task
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.mock_rclone import mock_rclone
@pytest.mark.parametrize("credential_attributes,result", [
(
{
"endpoint": "s3.fr-par.scw.cloud",
"region": "fr-par",
"skip_region": False,
"signatures_v2": False,
},
{"region": "fr-par"},
)
])
def test_custom_s3(credential_attributes, result):
with dataset("test_cloudsync_custom_s3") as ds:
with credential({
"name": "S3",
"provider": "S3",
"attributes": {
"access_key_id": "test",
"secret_access_key": "test",
**credential_attributes,
},
}) as c:
with task({
"direction": "PUSH",
"transfer_mode": "COPY",
"path": f"/mnt/{ds}",
"credentials": c["id"],
"attributes": {
"bucket": "bucket",
"folder": "",
},
}) as t:
with mock_rclone() as mr:
call("cloudsync.sync", t["id"])
time.sleep(2.5)
assert mr.result["config"]["remote"]["region"] == "fr-par"
| 1,474 | Python | .py | 42 | 23.642857 | 78 | 0.506667 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,161 | test_audit_rest.py | truenas_middleware/tests/api2/test_audit_rest.py | # -*- coding=utf-8 -*-
import io
import json
import os
import sys
from unittest.mock import ANY
import requests
from middlewared.test.integration.assets.account import unprivileged_user
from middlewared.test.integration.utils import call, url
from middlewared.test.integration.utils.audit import expect_audit_log
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import POST
def test_unauthenticated_call():
with expect_audit_log([
{
"event": "AUTHENTICATION",
"event_data": {
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "invalid"},
},
"error": "Bad username or password",
},
"success": False,
}
], include_logins=True):
r = requests.get(f"{url()}/api/v2.0/system/info", auth=("invalid", "password"))
assert r.status_code == 401
def test_unauthenticated_upload_call():
with expect_audit_log([
{
"event": "AUTHENTICATION",
"event_data": {
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "invalid"},
},
"error": "Bad username or password",
},
"success": False,
}
], include_logins=True):
r = requests.post(
f"{url()}/api/v2.0/resttest/test_input_pipe",
auth=("invalid", "password"),
files={
"data": (None, io.StringIO('{"key": "value"}')),
"file": (None, io.StringIO("FILE")),
},
)
assert r.status_code == 401
def test_authenticated_call():
user_id = None
try:
with expect_audit_log([
{
"service_data": {
"vers": {
"major": 0,
"minor": 1,
},
"origin": ANY,
"protocol": "REST",
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "root", "login_at": ANY},
},
},
"event": "AUTHENTICATION",
"event_data": {
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "root"},
},
"error": None,
},
"success": True,
},
{
"service_data": {
"vers": {
"major": 0,
"minor": 1,
},
"origin": ANY,
"protocol": "REST",
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "root", "login_at": ANY},
},
},
"event": "METHOD_CALL",
"event_data": {
"authenticated": True,
"authorized": True,
"method": "user.create",
"params": [
{
"username": "sergey",
"full_name": "Sergey",
"group_create": True,
"home": "/nonexistent",
"password": "********",
}
],
"description": "Create user sergey",
},
"success": True,
},
], include_logins=True):
r = POST("/user", {
"username": "sergey",
"full_name": "Sergey",
"group_create": True,
"home": "/nonexistent",
"password": "password",
})
assert r.status_code == 200
user_id = r.json()
finally:
if user_id is not None:
call("user.delete", user_id)
def test_unauthorized_call():
with unprivileged_user(
username="unprivileged",
group_name="unprivileged_users",
privilege_name="Unprivileged users",
allowlist=[],
roles=[],
web_shell=False,
) as u:
with expect_audit_log([
{
"service_data": {
"vers": {
"major": 0,
"minor": 1,
},
"origin": ANY,
"protocol": "REST",
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": ANY, "login_at": ANY},
},
},
"event": "METHOD_CALL",
"event_data": {
"authenticated": True,
"authorized": False,
"method": "user.create",
"params": [{"username": "sergey", "full_name": "Sergey"}],
"description": "Create user sergey",
},
"success": False,
}
]):
r = requests.post(
f"{url()}/api/v2.0/user",
auth=(u.username, u.password),
headers={"Content-type": "application/json"},
data=json.dumps({"username": "sergey", "full_name": "Sergey"}),
)
assert r.status_code == 403, r.text
def test_bogus_call():
with expect_audit_log([
{
"service_data": {
"vers": {
"major": 0,
"minor": 1,
},
"origin": ANY,
"protocol": "REST",
"credentials": {
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "root", "login_at": ANY},
},
},
"event": "METHOD_CALL",
"event_data": {
"authenticated": True,
"authorized": True,
"method": "user.create",
"params": [{}],
"description": "Create user",
},
"success": False,
}
]):
response = POST("/user", {})
assert response.status_code == 422
| 6,611 | Python | .py | 192 | 19.40625 | 87 | 0.400094 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,162 | test_012_directory_service_ssh.py | truenas_middleware/tests/api2/test_012_directory_service_ssh.py | import pytest
from functions import SSH_TEST
from middlewared.test.integration.assets.directory_service import active_directory, ldap
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_method_calls
try:
from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME
except ImportError:
Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
pytestmark = pytest.mark.skip(reason=Reason)
try:
from config import (
LDAPUSER,
LDAPPASSWORD
)
except ImportError:
Reason = 'LDAP* variable are not setup in config.py'
pytestmark = pytest.mark.skipif(True, reason=Reason)
@pytest.fixture(scope="function")
def do_ad_connection(request):
with active_directory() as ad:
yield ad
@pytest.fixture(scope="function")
def do_ldap_connection(request):
with ldap() as ldap_conn:
yield ldap_conn
def test_08_test_ssh_ad(do_ad_connection):
userobj = do_ad_connection['user_obj']
groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']})
payload = {"password_login_groups": [groupobj['gr_name']]}
try:
with expect_audit_method_calls([{
'method': 'ssh.update',
'params': [payload],
'description': 'Update SSH configuration'
}]):
call('ssh.update', payload)
results = SSH_TEST('ls -la', f'{ADUSERNAME}@{AD_DOMAIN}', ADPASSWORD)
finally:
call('ssh.update', {"password_login_groups": []})
assert results['result'] is True, results
def test_09_test_ssh_ldap(do_ldap_connection):
userobj = call('user.get_user_obj', {'username': LDAPUSER})
groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']})
call('ssh.update', {"password_login_groups": [groupobj['gr_name']]})
cmd = 'ls -la'
results = SSH_TEST(cmd, LDAPUSER, LDAPPASSWORD)
call('ssh.update', {"password_login_groups": []})
assert results['result'] is True, results
| 2,021 | Python | .py | 49 | 35.857143 | 94 | 0.686765 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,163 | test_idmap.py | truenas_middleware/tests/api2/test_idmap.py | import pytest
from middlewared.test.integration.utils import call
try:
from config import (
LDAPBASEDN,
LDAPBINDDN,
LDAPBINDPASSWORD,
LDAPHOSTNAME,
)
except ImportError:
Reason = 'LDAP* variable are not setup in config.py'
# comment pytestmark for development testing with --dev-test
pytestmark = pytest.mark.skipif(True, reason=Reason)
def test_create_and_delete_idmap_certificate():
payload = {
'name': 'BOB.NB',
'range_low': 1000,
'range_high': 2000,
'certificate': 1,
'idmap_backend': 'RFC2307',
'options': {
'ldap_server': 'STANDALONE',
'bind_path_user': LDAPBASEDN,
'bind_path_group': LDAPBASEDN,
'ldap_url': LDAPHOSTNAME,
'ldap_user_dn': LDAPBINDDN,
'ldap_user_dn_password': LDAPBINDPASSWORD,
'ssl': 'ON',
'ldap_realm': False,
}
}
idmap_id = call('idmap.create', payload)['id']
call('idmap.delete', idmap_id)
assert call('idmap.query', [['id', '=', idmap_id]]) == []
| 1,103 | Python | .py | 34 | 24.705882 | 64 | 0.589286 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,164 | test_attachment_querying.py | truenas_middleware/tests/api2/test_attachment_querying.py | #!/usr/bin/env python3
import os
import sys
from pytest_dependency import depends
sys.path.append(os.getcwd())
from middlewared.test.integration.assets.nfs import nfs_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, client
PARENT_DATASET = 'test_parent'
CHILD_DATASET = f'{PARENT_DATASET}/child_dataset'
def test_attachment_with_child_path(request):
with dataset(PARENT_DATASET) as parent_dataset:
parent_path = f'/mnt/{parent_dataset}'
assert call('pool.dataset.attachments_with_path', parent_path) == []
with nfs_share(parent_dataset):
attachments = call('pool.dataset.attachments_with_path', parent_path)
assert len(attachments) > 0, attachments
assert attachments[0]['type'] == 'NFS Share', attachments
with dataset(CHILD_DATASET) as child_dataset:
child_path = f'/mnt/{child_dataset}'
attachments = call('pool.dataset.attachments_with_path', child_path)
assert len(attachments) == 0, attachments
attachments = call('pool.dataset.attachments_with_path', child_path, True)
assert len(attachments) == 1, attachments
assert attachments[0]['type'] == 'NFS Share', attachments
| 1,329 | Python | .py | 25 | 44.88 | 90 | 0.68779 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,165 | test_262_iscsi_alua.py | truenas_middleware/tests/api2/test_262_iscsi_alua.py | import contextlib
import random
import string
from time import sleep
import pytest
from assets.websocket.iscsi import (alua_enabled, initiator_portal, target, target_extent_associate, verify_capacity,
verify_ha_inquiry, verify_luns)
from assets.websocket.service import ensure_service_enabled
from auto_config import ha, pool_name
from protocols import iscsi_scsi_connection
from middlewared.test.integration.assets.hostkvm import get_kvm_domain, poweroff_vm, reset_vm, start_vm
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
pytestmark = pytest.mark.skipif(not ha, reason='Tests applicable to HA only')
SERVICE_NAME = 'iscsitarget'
MB = 1024 * 1024
basename = 'iqn.2005-10.org.freenas.ctl'
def other_domain(hadomain):
if hadomain.endswith('_c1'):
return f'{hadomain[:-1]}2'
elif hadomain.endswith('_c2'):
return f'{hadomain[:-1]}1'
raise ValueError(f'Invalid HA domain name: {hadomain}')
@contextlib.contextmanager
def zvol(name, volsizeMB):
payload = {
'name': f'{pool_name}/{name}',
'type': 'VOLUME',
'volsize': volsizeMB * MB,
'volblocksize': '16K'
}
config = call('pool.dataset.create', payload)
try:
yield config
finally:
call('pool.dataset.delete', config['id'])
@contextlib.contextmanager
def zvol_extent(zvol, extent_name):
payload = {
'type': 'DISK',
'disk': f'zvol/{zvol}',
'name': extent_name,
}
config = call('iscsi.extent.create', payload)
try:
yield config
finally:
call('iscsi.extent.delete', config['id'], True, True)
class TestFixtureConfiguredALUA:
"""Fixture for with iSCSI enabled and ALUA configured"""
ZEROS = bytearray(512)
BLOCKS = 5
VERBOSE = False
NUM_TARGETS = 10
def wait_for_settle(self):
if self.VERBOSE:
print('Checking ALUA status...')
retries = 12
while retries:
if call('iscsi.alua.settled'):
if self.VERBOSE:
print('ALUA is settled')
break
retries -= 1
if self.VERBOSE:
print('Waiting for ALUA to settle')
sleep(5)
def wait_for_master(self, timeout=120):
for _ in range(timeout):
try:
if call('failover.status') == 'MASTER':
if self.VERBOSE:
print('Can communicate with new MASTER')
break
if self.VERBOSE:
print('Waiting for new MASTER')
sleep(1)
except Exception:
if self.VERBOSE:
print('Exception while waiting for new MASTER')
sleep(1)
def wait_for_ready(self, timeout=120):
for _ in range(timeout):
try:
if call('system.ready'):
if self.VERBOSE:
print('System is ready')
break
if self.VERBOSE:
print('Waiting for ready')
sleep(1)
except Exception:
if self.VERBOSE:
print('Exception while waiting for ready')
sleep(1)
def wait_for_backup(self, timeout=120):
for _ in range(timeout):
try:
if not call('failover.disabled.reasons'):
if self.VERBOSE:
print('Both controllers available')
break
if self.VERBOSE:
print('Waiting for BACKUP')
sleep(1)
except Exception:
if self.VERBOSE:
print('Exception while waiting for BACKUP')
sleep(1)
def wait_for_new_master(self, oldnode, timeout=60):
for _ in range(timeout):
try:
newnode = call('failover.node')
if oldnode != newnode:
if call('failover.status') == 'MASTER':
if self.VERBOSE:
print('Can communicate with new MASTER', newnode)
return newnode
if self.VERBOSE:
print('Waiting for new MASTER')
sleep(1)
except Exception:
if self.VERBOSE:
print('Exception while waiting for new MASTER')
sleep(1)
def wait_for_failover_in_progress(self, timeout=120):
for _ in range(timeout):
try:
if not call('failover.in_progress'):
if self.VERBOSE:
print('Failover event complete')
return
if self.VERBOSE:
print('Waiting for failover event to complete')
sleep(1)
except Exception:
if self.VERBOSE:
print('Exception while waiting for failover event to complete')
sleep(1)
@pytest.fixture(scope='class')
def alua_configured(self):
with ensure_service_enabled(SERVICE_NAME):
call('service.start', SERVICE_NAME)
with alua_enabled():
self.wait_for_settle()
with initiator_portal() as config:
yield config
if self.VERBOSE:
print('Tore down ALUA')
if self.VERBOSE:
print('Tore down iSCSI')
@pytest.fixture(scope='class')
def fix_complex_alua_config(self, alua_configured):
"""Fixture to create a non-trival ALUA iSCSI configuration"""
# Will create 10 targets (0-9) with 0 to 9 LUNs
config = alua_configured
portal_id = config['portal']['id']
digits = ''.join(random.choices(string.digits, k=4))
# iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
targets = {}
with contextlib.ExitStack() as es:
for i in range(self.NUM_TARGETS):
namebase = f'{digits}x{i}'
if self.VERBOSE:
print(f'Creating target {i}...')
target_config = es.enter_context(target(f'target{namebase}', [{'portal': portal_id}]))
target_id = target_config['id']
target_config['luns'] = {}
luncount = self.lun_count(i)
for j in range(luncount):
sizemb = 20 + (10 * (j + 1))
if i > 7:
lun = 100 + j
else:
lun = j
if self.VERBOSE:
print(f'Creating extent (LUN {lun} {sizemb}MB)...')
target_config['luns'][lun] = es.enter_context(
self.target_lun(target_id, f'extent{namebase}l{lun}', sizemb, lun)
)
targets[i] = target_config
sleep(2)
self.wait_for_settle()
yield targets
if self.VERBOSE:
print(f'Tearing down {self.NUM_TARGETS} targets ...')
if self.VERBOSE:
print(f'Tore down {self.NUM_TARGETS} targets')
@contextlib.contextmanager
def target_lun(self, target_id, zvol_name, mb, lun):
with zvol(zvol_name, mb) as zvol_config:
with zvol_extent(zvol_config['id'], zvol_name) as extent_config:
with target_extent_associate(target_id, extent_config['id'], lun) as associate_config:
yield {
'zvol': zvol_config,
'extent': extent_config,
'associate': associate_config
}
def verify_luns(self, iqn, lun_size_list):
"""Ensure that the expected LUNs are visible from each controller."""
lun_list = [lun for lun, _ in lun_size_list]
for lun, mb in lun_size_list:
# Node A
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn, lun) as s:
verify_luns(s, lun_list)
verify_capacity(s, mb * MB)
# Node B
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, lun) as s:
verify_luns(s, lun_list)
verify_capacity(s, mb * MB)
def lun_count(self, targetnum):
match targetnum:
case 0:
return 0
case 1 | 2 | 3 | 4 | 5:
return 1
case 6 | 7 | 8:
return 2
case _:
return 5
def test_alua_luns(self, alua_configured):
"""Test whether an ALUA target reacts correctly to having a LUN added
and removed again (in terms of REPORT LUNS response)"""
config = alua_configured
portal_id = config['portal']['id']
digits = ''.join(random.choices(string.digits, k=4))
target_name = f'target{digits}'
iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
# First configure a single extent at LUN 0 and ensure that we
# can see it from both interfaces.
with self.target_lun(target_id, f'extent0_{digits}', 100, 0):
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100)])
# Next add a 2nd extent at LUN 1 and ensure that we can see both LUNs
# from both interfaces.
with self.target_lun(target_id, f'extent1_{digits}', 200, 1):
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100), (1, 200)])
# After the LUN 1 extent has been removed again, ensure that we cannot see it
# any longer.
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100)])
# Next add back a 2nd extent at LUN 1 (with a different size) and ensure
# that we can still see both LUNs from both interfaces.
with self.target_lun(target_id, f'extent1_{digits}', 250, 1):
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100), (1, 250)])
# Add a third LUN
with self.target_lun(target_id, f'extent2_{digits}', 300, 2):
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100), (1, 250), (2, 300)])
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100), (1, 250)])
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(0, 100)])
def test_alua_lun_100(self, alua_configured):
"""Test that an ALUA target - without a LUN 0 - works correctly with only LUN 100."""
config = alua_configured
portal_id = config['portal']['id']
digits = ''.join(random.choices(string.digits, k=4))
target_name = f'target{digits}'
iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
with target(target_name, [{'portal': portal_id}]) as target_config:
target_id = target_config['id']
# First configure a single extent at LUN 0 and ensure that we
# can see it from both interfaces.
with self.target_lun(target_id, f'extent0_{digits}', 200, 100):
sleep(2)
self.wait_for_settle()
self.verify_luns(iqn, [(100, 200)])
sleep(2)
self.wait_for_settle()
def visit_luns(self, ip, config, callback):
"""Run the specified callback method for each LUN in the config"""
for target_num, target_config in config.items():
luns = target_config['luns']
if not luns:
# If no LUNs then we can't talk to the target.
continue
target_name = target_config['name']
iqn = f'{basename}:{target_name}'
for lun, lun_config in luns.items():
with iscsi_scsi_connection(ip, iqn, lun) as s:
callback(s, target_num, lun, lun_config)
def validate_shape(self, ip, config, tpgs=1):
"""Validate that each LUN in the config has the expected shape.
For example, serial number, NAA, size.
"""
def validate_lun(s, target_num, lun, lun_config):
api_serial_number = lun_config['extent']['serial']
api_naa = lun_config['extent']['naa']
verify_ha_inquiry(s, api_serial_number, api_naa, tpgs)
if 'zvol' in lun_config:
verify_capacity(s, lun_config['zvol']['volsize']['parsed'])
if self.VERBOSE:
print(f'Target {target_num} LUN {lun} shape OK')
self.visit_luns(ip, config, validate_lun)
@pytest.fixture(scope='class')
def fix_validate_shapes(self, fix_complex_alua_config):
"""Fixture that validates that the complex ALUA config has the right shape."""
# Make sure that each controller is exporting the targets/LUNs we expect
if self.VERBOSE:
print('Validate shape seen by Node A...')
self.validate_shape(truenas_server.nodea_ip, fix_complex_alua_config)
if self.VERBOSE:
print('Validate shape seen by Node B...')
self.validate_shape(truenas_server.nodeb_ip, fix_complex_alua_config)
if self.VERBOSE:
print('Validated shape')
yield fix_complex_alua_config
def zero_luns(self, ip, config):
def zero_lun(s, target_num, lun, lun_config):
# Write zeros using WRITE SAME (16)
s.writesame16(0, self.BLOCKS, self.ZEROS)
self.visit_luns(ip, config, zero_lun)
def check_zero_luns(self, ip, config):
def check_zero_lun(s, target_num, lun, lun_config):
r = s.read16(0, self.BLOCKS)
assert r.datain == self.ZEROS * self.BLOCKS, r.datain
self.visit_luns(ip, config, check_zero_lun)
@pytest.fixture(scope='class')
def fix_zero_luns(self, fix_validate_shapes):
"""Fixture that validates that the complex ALUA config has zeros written to LUNs."""
# Zero the LUNs
self.zero_luns(truenas_server.nodea_ip, fix_validate_shapes)
# Check that the LUNs are zeroed
self.check_zero_luns(truenas_server.nodea_ip, fix_validate_shapes)
self.check_zero_luns(truenas_server.nodeb_ip, fix_validate_shapes)
if self.VERBOSE:
print('LUNs zeroed')
return fix_validate_shapes
def page_pattern(self, target_num, lun):
"""
Return a 512 byte long bytearray unique to the target/lun.
"""
basis = f'TARGET {target_num} LUN {lun} ------'
b = bytearray()
b.extend(basis[:16].encode())
pattern = b * 32
assert len(pattern) == 512, pattern
return pattern
def write_patterns(self, ip, config):
def write_pattern(s, target_num, lun, lun_config):
s.writesame16(1, 2, self.page_pattern(target_num, lun))
self.visit_luns(ip, config, write_pattern)
def check_patterns(self, ip, config):
def check_pattern(s, target_num, lun, lun_config):
pattern = self.page_pattern(target_num, lun)
r = s.read16(0, 1)
assert r.datain == self.ZEROS, r.datain
r = s.read16(1, 2)
assert r.datain == pattern * 2, r.datain
r = s.read16(3, 1)
assert r.datain == self.ZEROS, r.datain
if self.VERBOSE:
print(f'Target {target_num} LUN {lun} pattern OK:', pattern[:16])
self.visit_luns(ip, config, check_pattern)
@pytest.fixture(scope='class')
def fix_write_patterns(self, fix_zero_luns):
"""Fixture that writes a data pattern to the complex ALUA config."""
# Write the pattern
self.write_patterns(truenas_server.nodea_ip, fix_zero_luns)
if self.VERBOSE:
print('Wrote LUN patterns')
# Check that the LUNs have the correct patterns
if self.VERBOSE:
print('Validate data pattern seen by Node A...')
self.check_patterns(truenas_server.nodea_ip, fix_zero_luns)
if self.VERBOSE:
print('Validate data pattern seen by Node B...')
self.check_patterns(truenas_server.nodeb_ip, fix_zero_luns)
if self.VERBOSE:
print('LUNs have pattern written / checked')
return fix_zero_luns
@pytest.fixture(scope='class')
def fix_orig_active_node(self):
return call('failover.node')
@pytest.mark.timeout(900)
def test_complex_alua_setup(self, fix_validate_shapes, fix_orig_active_node):
"""
Test that the complex ALUA configuration is setup, and has the correct shape.
"""
orig_active_node = fix_orig_active_node
assert orig_active_node in ['A', 'B']
@pytest.mark.timeout(900)
def test_complex_zero_luns(self, fix_zero_luns):
"""
Test that the complex ALUA configuration is setup, and has zeros written
to LUNs.
"""
pass
@pytest.mark.timeout(900)
def test_complex_write_patterns(self, fix_write_patterns):
"""
Test that the complex ALUA configuration is setup, and has a data pattern written
to LUNs.
"""
pass
@pytest.fixture
def fix_get_domain(self):
"""
Fixture to get the KVM domain associated with the current
MASTER node.
Note: unlike most other fixtures in this class, the fixture does NOT
have class scope.
"""
# Do some sanity checks before we proceed.
assert call('failover.status') == 'MASTER'
node = call('failover.node')
assert node in ['A', 'B']
domain = get_kvm_domain()
assert domain
if node == 'A':
assert domain.endswith('_c1')
elif node == 'B':
assert domain.endswith('_c2')
return {'node': node, 'domain': domain}
@pytest.mark.timeout(900)
def test_failover_complex_alua_config(self, fix_write_patterns, fix_get_domain):
"""
Power off the current MASTER and ensure that the previous BACKUP node serves
the ALUA targets, as soon as failover is complete.
"""
node = fix_get_domain['node']
domain = fix_get_domain['domain']
# Shutdown the current MASTER.
if self.VERBOSE:
print('Powering off VM', domain)
poweroff_vm(domain)
# Wait for the new MASTER to come up
newnode = self.wait_for_new_master(node)
# Wait for the failover event to complete
self.wait_for_failover_in_progress()
if newnode == 'A':
new_ip = truenas_server.nodea_ip
else:
new_ip = truenas_server.nodeb_ip
if self.VERBOSE:
print(f'Validate shape seen by Node {newnode}...')
self.validate_shape(new_ip, fix_write_patterns, 0)
if self.VERBOSE:
print(f'Validate data pattern seen by Node {newnode}...')
self.check_patterns(new_ip, fix_write_patterns)
if self.VERBOSE:
print(f'Validate data pattern seen by Node {newnode}...')
@pytest.mark.timeout(900)
def test_boot_complex_alua_config(self, fix_write_patterns, fix_get_domain, fix_orig_active_node):
"""
Reset the current MASTER, and repower the previous MASTER and ensure that
ALUA targets are served by both nodes.
"""
domain = fix_get_domain['domain']
orig_domain = other_domain(domain)
# Reset the MASTER
reset_vm(domain)
if self.VERBOSE:
print('Reset VM', domain)
# Power the shutdown node back on.
start_vm(orig_domain)
if self.VERBOSE:
print('Started VM', orig_domain)
sleep(5)
# Wait for the new MASTER to come up
self.wait_for_master()
self.wait_for_failover_in_progress()
self.wait_for_ready()
assert call('system.info')['uptime_seconds'] < 600
# Ensure that the BACKUP is also up
self.wait_for_backup()
self.wait_for_settle()
assert call('failover.call_remote', 'system.info')['uptime_seconds'] < 600
newnode = call('failover.node')
assert newnode in ['A', 'B']
if newnode == 'A':
new_ip = truenas_server.nodea_ip
other_ip = truenas_server.nodeb_ip
othernode = 'B'
else:
new_ip = truenas_server.nodeb_ip
other_ip = truenas_server.nodea_ip
othernode = 'A'
# Ensure that the targets look OK on MASTER
if self.VERBOSE:
print(f'Validate shape seen by Node {newnode}...')
self.validate_shape(new_ip, fix_write_patterns, None)
if self.VERBOSE:
print(f'Validate data pattern seen by Node {newnode}...')
self.check_patterns(new_ip, fix_write_patterns)
# Ensure that the targets look OK on BACKUP
if self.VERBOSE:
print(f'Validate shape seen by Node {othernode}...')
self.validate_shape(other_ip, fix_write_patterns, 1)
if self.VERBOSE:
print(f'Validate data pattern seen by Node {othernode}...')
self.check_patterns(other_ip, fix_write_patterns)
# Finally, we want to ensure that we have the same MASTER node as
# when these tests started.
if newnode != fix_orig_active_node:
if self.VERBOSE:
print(f'Restoring {fix_orig_active_node} as MASTER')
call('system.reboot', 'iSCSI ALUA test')
newnode2 = self.wait_for_new_master(newnode)
assert newnode2 == fix_orig_active_node
self.wait_for_backup()
self.wait_for_settle()
| 22,247 | Python | .py | 514 | 31.396887 | 117 | 0.564272 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,166 | test_pool_dataset_encryption.py | truenas_middleware/tests/api2/test_pool_dataset_encryption.py | import contextlib
import secrets
import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
from truenas_api_client.exc import ClientException
# genrated token_hex 32bit for
pool_token_hex = secrets.token_hex(32)
dataset_token_hex = secrets.token_hex(32)
encrypted_pool_name = 'test_encrypted'
dataset = f'{encrypted_pool_name}/encrypted'
child_dataset = f'{dataset}/child'
passphrase = 'my_passphrase'
def check_log_for(*phrases, should_find=False):
search_string = '|'.join(phrases)
cmd = f'grep -R -E "{search_string}" /var/log/middlewared.log'
results = ssh(cmd, check=False, complete_response=True)
assert results['result'] is should_find, str(results['output'])
def verify_lock_status(ds, *, locked):
job_status_result = call('pool.dataset.encryption_summary', ds, job=True)
for dictionary in job_status_result:
if dictionary['name'] == ds:
assert dictionary['unlock_successful'] is not locked, str(job_status_result)
assert dictionary['locked'] is locked, str(job_status_result)
break
else:
pytest.fail(str(job_status_result))
@contextlib.contextmanager
def create_dataset(payload, **delete_args):
name = payload['name']
yield call('pool.dataset.create', payload)
assert call('pool.dataset.delete', name, delete_args)
@pytest.fixture(scope='class')
def normal_pool():
with another_pool({'name': encrypted_pool_name}):
yield
@pytest.fixture(scope='class')
def passphrase_pool():
pool_passphrase = 'my_pool_passphrase'
with another_pool({
'name': encrypted_pool_name,
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'passphrase': pool_passphrase,
},
}):
check_log_for(pool_passphrase)
ds = call('pool.dataset.get_instance', encrypted_pool_name)
assert ds['key_format']['value'] == 'PASSPHRASE', ds
assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds
yield
@pytest.fixture(scope='class')
def key_pool():
with another_pool({
'name': encrypted_pool_name,
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'key': pool_token_hex,
},
}):
check_log_for(pool_token_hex)
ds = call('pool.dataset.get_instance', encrypted_pool_name)
assert ds['key_format']['value'] == 'HEX', ds
assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds
yield
@pytest.mark.usefixtures('normal_pool')
class TestNormalPool:
def test_passphrase_encrypted_root(self):
payload = {
'name': dataset,
'encryption_options': {
'generate_key': False,
'pbkdf2iters': 100000,
'algorithm': 'AES-128-CCM',
'passphrase': passphrase,
},
'encryption': True,
'inherit_encryption': False
}
with create_dataset(payload) as ds:
assert ds['key_format']['value'] == 'PASSPHRASE'
check_log_for(passphrase)
# Add a comment
call('pool.dataset.update', dataset, {'comments': 'testing encrypted dataset'})
# Change to key encryption
call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True)
ds = call('pool.dataset.get_instance', dataset)
assert ds['key_format']['value'] == 'HEX'
@pytest.mark.parametrize('payload', [
{'encryption': False},
{'inherit_encryption': True}
])
def test_dataset_not_encrypted(self, payload: dict):
payload['name'] = dataset
with create_dataset(payload) as ds:
assert ds['key_format']['value'] is None
@pytest.mark.parametrize('payload, message', [
(
{
'encryption_options': {'pbkdf2iters': 0},
'inherit_encryption': False
},
'Should be greater or equal than 100000'
),
(
{
'encryption_options': {'passphrase': passphrase},
'inherit_encryption': True
},
'Must be disabled when encryption is enabled'
),
(
{
'encryption_options': {
'generate_key': True,
'passphrase': passphrase,
},
'inherit_encryption': False
},
'Must be disabled when dataset is to be encrypted with passphrase'
)
])
def test_try_to_create_invalid_encrypted_dataset(self, payload: dict, message: str):
payload.update({
'name': dataset,
'encryption': True,
})
with pytest.raises(ValidationErrors, match=message):
with create_dataset(payload): pass
def test_invalid_encrypted_dataset_does_not_leak_passphrase_into_middleware_log(self):
check_log_for(passphrase)
@pytest.mark.parametrize('payload', [
{'encryption_options': {'generate_key': True}},
{'encryption_options': {'key': dataset_token_hex}}
])
def test_encrypted_root_with_key_cannot_lock(self, payload: dict):
payload.update({
'name': dataset,
'encryption': True,
'inherit_encryption': False,
})
with create_dataset(payload) as ds:
assert ds['key_format']['value'] == 'HEX'
check_log_for(dataset_token_hex)
with pytest.raises(ClientException, match='Only datasets which are encrypted with passphrase can be locked'):
call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
def test_encrypted_root_lock_unlock(self):
# Start with key-encrypted dataset
payload = {
'name': dataset,
'encryption': True,
'inherit_encryption': False,
'encryption_options': {'key': dataset_token_hex}
}
with create_dataset(payload):
# Change to a passphrase-encrypted dataset
call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True)
ds = call('pool.dataset.get_instance', dataset)
assert ds['key_format']['value'] == 'PASSPHRASE'
check_log_for(passphrase)
# Lock it
assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
verify_lock_status(dataset, locked=True)
# Attempt to unlock with incorrect passphrase
payload = {
'recursive': True,
'datasets': [{
'name': dataset,
'passphrase': 'bad_passphrase'
}]
}
job_status = call('pool.dataset.unlock', dataset, payload, job=True)
assert job_status['failed'][dataset]['error'] == 'Invalid Key', job_status
verify_lock_status(dataset, locked=True)
# Now actually unlock it
payload['datasets'][0]['passphrase'] = passphrase
job_status = call('pool.dataset.unlock', dataset, payload, job=True)
assert job_status['unlocked'] == [dataset], job_status
verify_lock_status(dataset, locked=False)
@pytest.mark.usefixtures('passphrase_pool')
class TestPassphraseEncryptedPool:
def test_passphrase_encrypted_root_cannot_change_key(self):
payload = {
'name': dataset,
'encryption_options': {
'generate_key': False,
'pbkdf2iters': 100000,
'algorithm': 'AES-128-CCM',
'passphrase': passphrase,
},
'encryption': True,
'inherit_encryption': False
}
with create_dataset(payload):
check_log_for(passphrase)
with pytest.raises(Exception, match=f'{dataset} has parent\\(s\\) which are encrypted with a passphrase'):
call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True)
def test_passphrase_encrypted_root_cannot_change_key_does_not_leak_passphrase_into_middleware_log(self):
check_log_for(passphrase)
def test_create_dataset_to_inherit_encryption_from_passphrase_encrypted_pool(self):
payload = {
'name': dataset,
'inherit_encryption': True
}
with create_dataset(payload) as ds:
assert ds['key_format']['value'] == 'PASSPHRASE', ds
@pytest.mark.parametrize('payload', [
{'encryption_options': {'generate_key': True}},
{'encryption_options': {'key': dataset_token_hex}},
])
def test_try_to_create_invalid_encrypted_dataset(self, payload: dict):
payload.update({
'name': dataset,
'encryption': True,
'inherit_encryption': False
})
with pytest.raises(ValidationErrors, match='Passphrase encrypted datasets cannot have children encrypted with a key'):
with create_dataset(payload): pass
def test_try_to_create_invalid_encrypted_dataset_does_not_leak_encryption_key_into_middleware_log(self):
check_log_for(dataset_token_hex)
@pytest.mark.usefixtures('key_pool')
class TestKeyEncryptedPool:
def test_key_encrypted_root(self):
# Start with key-encrypted dataset
payload = {
'name': dataset,
'encryption_options': {'key': dataset_token_hex},
'encryption': True,
'inherit_encryption': False
}
with create_dataset(payload) as ds:
assert ds['key_format']['value'] == 'HEX', ds
check_log_for(dataset_token_hex)
# Change to passphrase encryption
call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True)
check_log_for(passphrase)
ds = call('pool.dataset.get_instance', dataset)
assert ds['key_format']['value'] == 'PASSPHRASE', ds
# Lock the dataset
assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
ds = call('pool.dataset.get_instance', dataset)
assert ds['locked'] is True, ds
verify_lock_status(dataset, locked=True)
# Unlock the dataset
payload = {
'recursive': True,
'datasets': [{
'name': dataset,
'passphrase': passphrase
}]
}
job_status = call('pool.dataset.unlock', dataset, payload, job=True)
assert job_status['unlocked'] == [dataset], job_status
check_log_for(passphrase)
verify_lock_status(dataset, locked=False)
def test_dataset_with_inherit_encryption(self):
payload = {
'name': dataset,
'inherit_encryption': True
}
with create_dataset(payload) as ds:
assert ds['key_format']['value'] == 'HEX', ds
def test_encrypted_dataset_with_generate_key(self):
payload = {
'name': dataset,
'encryption_options': {'generate_key': True},
'encryption': True,
'inherit_encryption': False
}
with create_dataset(payload): pass
def test_passphrase_encrypted_dataset_parent_child_lock_unlock(self):
payload = {
'name': dataset,
'encryption_options': {'passphrase': passphrase},
'encryption': True,
'inherit_encryption': False
}
with create_dataset(payload, recursive=True): # Create parent dataset
check_log_for(passphrase)
# Create child dataset
child_passphrase = 'my_passphrase2'
payload.update({
'name': child_dataset,
'encryption_options': {'passphrase': child_passphrase},
})
call('pool.dataset.create', payload)
check_log_for(child_passphrase)
# Lock parent (and child)
assert call('pool.dataset.lock', dataset, job=True)
for ds_name in (dataset, child_dataset):
ds = call('pool.dataset.get_instance', ds_name)
assert ds['locked'] is True, ds
verify_lock_status(ds_name, locked=True)
# Try to unlock child
payload = {
'recursive': True,
'datasets': [{
'name': child_dataset,
'passphrase': child_passphrase
}]
}
with pytest.raises(ClientException, match=f'{child_dataset} has locked parents {dataset} which must be unlocked first'):
call('pool.dataset.unlock', child_dataset, payload, job=True)
check_log_for(child_passphrase)
verify_lock_status(child_dataset, locked=True)
# Unlock parent (and child)
payload = {
'recursive': True,
'datasets': [
{
'name': dataset,
'passphrase': passphrase
},
{
'name': child_dataset,
'passphrase': child_passphrase
}
]
}
job_status = call('pool.dataset.unlock', dataset, payload, job=True)
assert job_status['unlocked'] == [dataset, child_dataset], job_status
check_log_for(passphrase, child_passphrase)
for ds_name in (dataset, child_dataset):
ds = call('pool.dataset.get_instance', ds_name)
assert ds['locked'] is False, ds
verify_lock_status(ds_name, locked=False)
def test_key_encrypted_dataset(self):
# Create parent dataset
payload = {
'name': dataset,
'encryption_options': {'key': dataset_token_hex},
'encryption': True,
'inherit_encryption': False
}
call('pool.dataset.create', payload)
check_log_for(dataset_token_hex)
# Create child dataset
payload.update({
'name': child_dataset,
'encryption_options': {'passphrase': passphrase},
})
call('pool.dataset.create', payload)
check_log_for(passphrase)
ds = call('pool.dataset.get_instance', child_dataset)
assert ds['key_format']['value'] == 'PASSPHRASE', ds
# Inherit key encryption from parent
call('pool.dataset.inherit_parent_encryption_properties', child_dataset)
ds = call('pool.dataset.get_instance', child_dataset)
assert ds['key_format']['value'] == 'HEX', ds
| 14,960 | Python | .py | 352 | 31.403409 | 132 | 0.578828 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,167 | test_zfs_snapshot_hold.py | truenas_middleware/tests/api2/test_zfs_snapshot_hold.py | from unittest.mock import ANY
from middlewared.test.integration.assets.pool import dataset, snapshot
from middlewared.test.integration.utils import call
def test_normal_snapshot():
with dataset("test_normal_hold") as ds:
with snapshot(ds, "test") as id:
assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {}
def test_held_snapshot():
with dataset("test_held_snapshot") as ds:
with snapshot(ds, "test") as id:
call("zfs.snapshot.hold", id)
assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
call("zfs.snapshot.release", id) # Otherwise the whole test tree won't be deleted
def test_held_snapshot_tree():
with dataset("test_snapshot_tree") as ds:
with dataset("test_snapshot_tree/child") as ds2:
with snapshot(ds, "test", recursive=True) as id:
id2 = f"{ds2}@test"
call("zfs.snapshot.hold", id, {"recursive": True})
assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
assert call("zfs.snapshot.get_instance", id2, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
call("zfs.snapshot.release", id, {"recursive": True}) # Otherwise the whole test tree won't be deleted
| 1,397 | Python | .py | 22 | 53.954545 | 119 | 0.621423 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,168 | test_cronjob.py | truenas_middleware/tests/api2/test_cronjob.py | from middlewared.test.integration.utils import call, ssh
TESTFILE = '/mnt/cronjob_testfile'
def test_cron_job():
try:
id = call(
'cronjob.create',
{
'user': 'root',
'enabled': True,
'command': f'echo "yeah" > "{TESTFILE}"',
'schedule': {'minute': '*/1'}
}
)['id']
assert call('cronjob.query', [['id', '=', id]], {"get": True})['enabled'] is True
except Exception as e:
assert False, f'Unexpected failure: {str(e)}'
call('cronjob.run', id, job=True)
assert call('filesystem.statfs', TESTFILE)['blocksize']
results = ssh(f'rm "{TESTFILE}"', complete_response=True)
assert results['result'] is True, results['output']
call('cronjob.delete', id)
assert call('cronjob.query', [['id', '=', id]]) == []
| 866 | Python | .py | 22 | 30.636364 | 89 | 0.545346 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,169 | test_pool_dataset_track_processes.py | truenas_middleware/tests/api2/test_pool_dataset_track_processes.py | import contextlib
import time
import pytest
from pytest_dependency import depends
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.assets.pool import dataset, pool
import os
import sys
sys.path.append(os.getcwd())
@pytest.mark.parametrize("datasets,file_open_path,arg_path", [
# A file on a dataset
(
[('test', None)],
f'/mnt/{pool}/test/test_file',
lambda ssh: f'/mnt/{pool}/test',
),
# zvol
(
[('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})],
f'/dev/zvol/{pool}/test',
lambda ssh: f'/dev/zvol/{pool}/test'
),
# zvol with /dev/zd* path
(
[('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})],
f'/dev/zvol/{pool}/test',
lambda ssh: ssh(f'readlink -f /dev/zvol/{pool}/test').strip(),
),
# A dataset with nested zvol
(
[
('test', None),
('test/zvol', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100}),
],
f'/dev/zvol/{pool}/test/zvol',
lambda ssh: f'/dev/zvol/{pool}/test',
),
])
def test__open_path_and_check_proc(request, datasets, file_open_path, arg_path):
with contextlib.ExitStack() as stack:
for name, data in datasets:
stack.enter_context(dataset(name, data))
opened = False
try:
test_file = file_open_path
open_pid = ssh(f"""python -c 'import time; f = open("{test_file}", "w+"); time.sleep(10)' > /dev/null 2>&1 & echo $!""")
open_pid = open_pid.strip()
assert open_pid.isdigit(), f'{open_pid!r} is not a digit'
opened = True
# spinning up python interpreter could take some time on busy system so sleep
# for a couple seconds to give it time
time.sleep(2)
# what the cmdline output is formatted to
cmdline = f"""python -c import time; f = open(\"{test_file}\", \"w+\"); time.sleep(10)"""
# have to use websocket since the method being called is private
res = call('pool.dataset.processes_using_paths', [arg_path(ssh)])
assert len(res) == 1
result = res[0]
assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}'
assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}'
assert 'paths' not in result
res = call('pool.dataset.processes_using_paths', [arg_path(ssh)], True)
assert len(res) == 1
result = res[0]
assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}'
assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}'
assert 'paths' in result
assert len(result['paths']) == 1
assert result['paths'][0] == test_file if test_file.startswith('/mnt') else '/dev/zd0'
finally:
if opened:
ssh(f'kill -9 {open_pid}', check=False)
| 3,076 | Python | .py | 72 | 33.902778 | 132 | 0.569663 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,170 | test_cloud_backup.py | truenas_middleware/tests/api2/test_cloud_backup.py | import os
import types
import boto3
import pytest
from truenas_api_client import ClientException
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.cloud_backup import task, run_task
from middlewared.test.integration.assets.cloud_sync import credential
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils.call import call
from middlewared.test.integration.utils.mock import mock
from middlewared.test.integration.utils.ssh import ssh
try:
from config import (
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
AWS_BUCKET,
)
except ImportError:
pytestmark = pytest.mark.skip(reason="AWS credential are missing in config.py")
def clean():
s3 = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
).resource("s3")
bucket = s3.Bucket(AWS_BUCKET)
bucket.objects.filter(Prefix="cloud_backup/").delete()
@pytest.fixture(scope="module")
def s3_credential():
with credential({
"provider": "S3",
"attributes": {
"access_key_id": AWS_ACCESS_KEY_ID,
"secret_access_key": AWS_SECRET_ACCESS_KEY,
},
}) as c:
yield c
@pytest.fixture(scope="function")
def cloud_backup_task(s3_credential):
clean()
with dataset("cloud_backup") as local_dataset:
with task({
"path": f"/mnt/{local_dataset}",
"credentials": s3_credential["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "cloud_backup",
},
"password": "test",
"keep_last": 100,
}) as t:
yield types.SimpleNamespace(
local_dataset=local_dataset,
task=t,
)
def test_cloud_backup(cloud_backup_task):
assert call("cloud_backup.list_snapshots", cloud_backup_task.task["id"]) == []
ssh(f"dd if=/dev/urandom of=/mnt/{cloud_backup_task.local_dataset}/blob1 bs=1M count=1")
run_task(cloud_backup_task.task)
logs = ssh("cat " + call("cloud_backup.get_instance", cloud_backup_task.task["id"])["job"]["logs_path"])
assert "unable to open cache:" not in logs
assert "Files: 1 new, 0 changed, 0 unmodified" in logs
snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"])
assert len(snapshots) == 1
assert (snapshots[0]["time"] - call("system.info")["datetime"]).total_seconds() < 300
assert snapshots[0]["paths"] == [f"/mnt/{cloud_backup_task.local_dataset}"]
first_snapshot = snapshots[0]
ssh(f"mkdir /mnt/{cloud_backup_task.local_dataset}/dir1")
ssh(f"dd if=/dev/urandom of=/mnt/{cloud_backup_task.local_dataset}/dir1/blob2 bs=1M count=1")
run_task(cloud_backup_task.task)
logs = ssh("cat " + call("cloud_backup.get_instance", cloud_backup_task.task["id"])["job"]["logs_path"])
assert "Files: 1 new, 0 changed, 1 unmodified" in logs
snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"])
assert len(snapshots) == 2
contents = call(
"cloud_backup.list_snapshot_directory",
cloud_backup_task.task["id"],
snapshots[-1]["id"],
f"/mnt/{cloud_backup_task.local_dataset}",
)
assert len(contents) == 3
assert contents[0]["name"] == "cloud_backup"
assert contents[1]["name"] == "blob1"
assert contents[2]["name"] == "dir1"
call("cloud_backup.update", cloud_backup_task.task["id"], {"keep_last": 2})
run_task(cloud_backup_task.task)
snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"])
assert all(snapshot["id"] != first_snapshot["id"] for snapshot in snapshots)
snapshot_to_delete = snapshots[0]
call("cloud_backup.delete_snapshot", cloud_backup_task.task["id"], snapshot_to_delete["id"], job=True)
snapshots = call("cloud_backup.list_snapshots", cloud_backup_task.task["id"])
assert all(snapshot["id"] != snapshot_to_delete["id"] for snapshot in snapshots)
@pytest.fixture(scope="module")
def completed_cloud_backup_task(s3_credential):
clean()
with dataset("completed_cloud_backup") as local_dataset:
ssh(f"mkdir /mnt/{local_dataset}/dir1")
ssh(f"touch /mnt/{local_dataset}/dir1/file1")
ssh(f"mkdir /mnt/{local_dataset}/dir2")
ssh(f"touch /mnt/{local_dataset}/dir2/file2")
ssh(f"mkdir /mnt/{local_dataset}/dir3")
ssh(f"touch /mnt/{local_dataset}/dir3/file3")
with task({
"path": f"/mnt/{local_dataset}",
"credentials": s3_credential["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "cloud_backup",
},
"password": "test",
"keep_last": 100,
}) as t:
run_task(t)
snapshot = call("cloud_backup.list_snapshots", t["id"])[0]
yield types.SimpleNamespace(
local_dataset=local_dataset,
task=t,
snapshot=snapshot,
)
@pytest.mark.parametrize("options,result", [
({}, ["dir1/file1", "dir2/file2", "dir3/file3"]),
({"include": ["dir1", "dir2"]}, ["dir1/file1", "dir2/file2"]),
({"exclude": ["dir2", "dir3"]}, ["dir1/file1"]),
])
def test_cloud_backup_restore(completed_cloud_backup_task, options, result):
with dataset("restore") as restore:
call(
"cloud_backup.restore",
completed_cloud_backup_task.task["id"],
completed_cloud_backup_task.snapshot["id"],
f"/mnt/{completed_cloud_backup_task.local_dataset}",
f"/mnt/{restore}",
options,
job=True,
)
assert sorted([
os.path.relpath(path, f"/mnt/{restore}")
for path in ssh(f"find /mnt/{restore} -type f").splitlines()
]) == result
@pytest.fixture(scope="module")
def zvol():
with dataset("cloud_backup_zvol", {"type": "VOLUME", "volsize": 1024 * 1024}) as zvol:
path = f"/dev/zvol/{zvol}"
ssh(f"dd if=/dev/urandom of={path} bs=1M count=1")
yield path
def test_zvol_cloud_backup(s3_credential, zvol):
clean()
with mock("cloud_backup.validate_zvol", return_value=None):
with task({
"path": zvol,
"credentials": s3_credential["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "cloud_backup",
},
"password": "test",
"keep_last": 100,
}) as t:
run_task(t)
def test_zvol_cloud_backup_create_time_validation(s3_credential, zvol):
clean()
with pytest.raises(ValidationErrors) as ve:
with task({
"path": zvol,
"credentials": s3_credential["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "cloud_backup",
},
"password": "test",
"keep_last": 100,
}):
pass
assert "cloud_backup_create.path" in ve.value
def test_zvol_cloud_backup_runtime_validation(s3_credential, zvol):
clean()
m = mock("cloud_backup.validate_zvol", return_value=None)
m.__enter__()
exited = False
try:
with task({
"path": zvol,
"credentials": s3_credential["id"],
"attributes": {
"bucket": AWS_BUCKET,
"folder": "cloud_backup",
},
"password": "test",
"keep_last": 100,
}) as t:
m.__exit__(None, None, None)
exited = True
with pytest.raises(ClientException):
run_task(t)
finally:
if not exited:
m.__exit__(None, None, None)
def test_create_to_backend_with_a_different_password(cloud_backup_task):
with pytest.raises(ValidationErrors) as ve:
with task({
"path": cloud_backup_task.task["path"],
"credentials": cloud_backup_task.task["credentials"]["id"],
"attributes": cloud_backup_task.task["attributes"],
"password": "test2",
"keep_last": 100,
}):
pass
assert "cloud_backup_create.password" in ve.value
def test_update_with_incorrect_password(cloud_backup_task):
with pytest.raises(ValidationErrors) as ve:
call("cloud_backup.update", cloud_backup_task.task["id"], {"password": "test2"})
assert "cloud_backup_update.password" in ve.value
def test_sync_initializes_repo(cloud_backup_task):
clean()
call("cloud_backup.sync", cloud_backup_task.task["id"], job=True)
| 8,740 | Python | .py | 215 | 32.167442 | 108 | 0.602267 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,171 | test_auth_me.py | truenas_middleware/tests/api2/test_auth_me.py | import pytest
from middlewared.test.integration.assets.account import unprivileged_user_client
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.utils import call, client
def test_works():
user = call("auth.me")
assert user["pw_uid"] == 0
assert user["pw_name"] == "root"
assert user['two_factor_config'] is not None
assert user['privilege']['webui_access']
def test_works_for_token():
token = call("auth.generate_token", 300)
with client(auth=None) as c:
assert c.call("auth.login_with_token", token)
user = c.call("auth.me")
assert user["pw_uid"] == 0
assert user["pw_name"] == "root"
assert user['two_factor_config'] is not None
assert 'SYS_ADMIN' in user['account_attributes']
assert 'LOCAL' in user['account_attributes']
def test_attributes():
user = call("auth.me")
assert "test" not in user["attributes"]
call("auth.set_attribute", "test", "value")
user = call("auth.me")
assert user["attributes"]["test"] == "value"
call("auth.set_attribute", "test", "new_value")
user = call("auth.me")
assert user["attributes"]["test"] == "new_value"
def test_distinguishes_attributes():
builtin_administrators_group_id = call(
"datastore.query",
"account.bsdgroups",
[["group", "=", "builtin_administrators"]],
{"get": True, "prefix": "bsdgrp_"},
)["id"]
with user({
"username": "admin",
"full_name": "Admin",
"group_create": True,
"groups": [builtin_administrators_group_id],
"home": "/nonexistent",
"password": "test1234",
}) as admin:
with client(auth=("admin", "test1234")) as c:
me = c.call("auth.me")
assert "test" not in me["attributes"]
c.call("auth.set_attribute", "test", "value")
me = c.call("auth.me")
assert me["attributes"]["test"] == "value"
c.call("auth.set_attribute", "test", "new_value")
me = c.call("auth.me")
assert me["attributes"]["test"] == "new_value"
assert me['two_factor_config'] is not None
assert 'SYS_ADMIN' not in me['account_attributes']
assert 'LOCAL' in me['account_attributes']
assert me['privilege']['webui_access']
assert not call("datastore.query", "account.bsdusers_webui_attribute", [["uid", "=", admin["uid"]]])
@pytest.mark.parametrize("role,expected", [
(["READONLY_ADMIN", "FILESYSTEM_ATTRS_WRITE"], True),
(["READONLY_ADMIN"], True),
(["SHARING_ADMIN"], True),
(["FILESYSTEM_ATTRS_WRITE"], False)
])
def test_webui_access(role, expected):
with unprivileged_user_client(roles=role) as c:
me = c.call('auth.me')
assert me['privilege']['webui_access'] == expected
| 2,876 | Python | .py | 68 | 34.985294 | 104 | 0.611211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,172 | test_reporting_realtime.py | truenas_middleware/tests/api2/test_reporting_realtime.py | import time
from middlewared.test.integration.assets.account import unprivileged_user_client
def test_reporting_realtime():
with unprivileged_user_client(["REPORTING_READ"]) as c:
events = []
def callback(type, **message):
events.append((type, message))
c.subscribe("reporting.realtime", callback, sync=True)
time.sleep(5)
assert events
assert not events[0][1]["fields"]["failed_to_connect"]
| 464 | Python | .py | 11 | 34.636364 | 80 | 0.676404 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,173 | test_account_group.py | truenas_middleware/tests/api2/test_account_group.py | import pytest
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.assets.account import user, group
from middlewared.test.integration.utils import call
def test_delete_group_delete_users():
with group({
"name": "group1",
}) as g:
with user({
"username": "user1",
"full_name": "user1",
"group": g["id"],
"password": "test1234",
}) as u1:
with user({
"username": "user2",
"full_name": "user2",
"group": g["id"],
"password": "test1234",
}) as u2:
with user({
"username": "user3",
"full_name": "user3",
"group_create": True,
"groups": [g["id"]],
"password": "test1234",
}) as u3:
call("group.delete", g["id"], {"delete_users": True})
with pytest.raises(InstanceNotFound):
call("user.get_instance", u1["id"])
with pytest.raises(InstanceNotFound):
call("user.get_instance", u2["id"])
call("user.get_instance", u3["id"])
| 1,287 | Python | .py | 33 | 24.909091 | 73 | 0.4696 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,174 | test_vmware_snapshot_delete.py | truenas_middleware/tests/api2/test_vmware_snapshot_delete.py | import contextlib
from datetime import datetime
from unittest.mock import ANY
from middlewared.test.integration.utils import call, mock
@contextlib.contextmanager
def pending_snapshot_delete(d):
psd = {
"vmware": {
"hostname": "host",
"username": "user",
"password": "pass",
},
"vm_uuid": "abcdef",
"snapshot_name": "snapshot",
"datetime": d,
}
psd["id"] = call("datastore.insert", "storage.vmwarependingsnapshotdelete", psd)
try:
yield psd
finally:
call("datastore.delete", "storage.vmwarependingsnapshotdelete", psd["id"])
def test_success():
with pending_snapshot_delete(datetime(2100, 1, 1)):
with mock("vmware.connect", return_value=None):
with mock("vmware.find_vms_by_uuid", return_value=[None]):
with mock("vmware.delete_snapshot", return_value=None):
with mock("vmware.disconnect", return_value=None):
call("vmware.delete_pending_snapshots")
assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == []
def test_failure_1():
with pending_snapshot_delete(datetime(2100, 1, 1)):
with mock("vmware.connect", f"""
async def mock(self, *args):
raise Exception('Unknown error')
"""):
call("vmware.delete_pending_snapshots")
assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == [ANY]
def test_failure_2():
with pending_snapshot_delete(datetime(2100, 1, 1)):
with mock("vmware.connect", return_value=None):
with mock("vmware.find_vms_by_uuid", f"""
async def mock(self, *args):
raise Exception('Unknown error')
"""):
call("vmware.delete_pending_snapshots")
assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == [ANY]
def test_failure_and_expiry():
with pending_snapshot_delete(datetime(2010, 1, 1)):
with mock("vmware.connect", f"""
async def mock(self, *args):
raise Exception('Unknown error')
"""):
call("vmware.delete_pending_snapshots")
assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == []
| 2,355 | Python | .py | 54 | 33.666667 | 99 | 0.604987 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,175 | test_api_key.py | truenas_middleware/tests/api2/test_api_key.py | import errno
import pytest
from datetime import datetime, UTC
from middlewared.service_exception import CallError, ValidationErrors
from middlewared.test.integration.assets.api_key import api_key
from middlewared.test.integration.utils import call, client
from time import sleep
LEGACY_ENTRY_KEY = 'rtpz6u16l42XJJGy5KMJOVfkiQH7CyitaoplXy7TqFTmY7zHqaPXuA1ob07B9bcB'
LEGACY_ENTRY_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
@pytest.fixture(scope='function')
def sharing_admin_user(unprivileged_user_fixture):
privilege = call('privilege.query', [['local_groups.0.group', '=', unprivileged_user_fixture.group_name]])
assert len(privilege) > 0, 'Privilege not found'
call('privilege.update', privilege[0]['id'], {'roles': ['SHARING_ADMIN']})
try:
yield unprivileged_user_fixture
finally:
call('privilege.update', privilege[0]['id'], {'roles': []})
def check_revoked_alert():
# reset any revoked alert
call('api_key.check_status')
for a in call('alert.list'):
if a['klass'] == 'ApiKeyRevoked':
return a
return None
def test_user_unprivileged_api_key_failure(unprivileged_user_fixture):
"""We should be able to call a method with root API key using Websocket."""
with pytest.raises(ValidationErrors) as ve:
with api_key(unprivileged_user_fixture.username):
pass
assert 'User lacks privilege role membership' in ve.value.errors[0].errmsg
def test_api_key_nonexistent_username():
"""Non-existent user should raise a validation error."""
with pytest.raises(ValidationErrors) as ve:
with api_key('canary'):
pass
assert 'User does not exist' in ve.value.errors[0].errmsg
def test_print_expired_api_key_update_failure():
with pytest.raises(ValidationErrors) as ve:
with api_key():
key = call('api_key.query', [], {'get': True})
expiry = datetime.fromtimestamp(1, UTC)
call('api_key.update', key['id'], {'expires_at': expiry})
assert 'Expiration date is in the past' in ve.value.errors[0].errmsg
def test_api_key_info(sharing_admin_user):
with api_key(sharing_admin_user.username):
key_info = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})
assert key_info['revoked'] is False
assert key_info['expires_at'] is None
assert key_info['local'] is True
user = call('user.query', [['username', '=', sharing_admin_user.username]], {'get': True})
assert user['api_keys'] == [key_info['id']]
@pytest.mark.parametrize('endpoint', ['LEGACY', 'CURRENT'])
def test_api_key_session(sharing_admin_user, endpoint):
with api_key(sharing_admin_user.username) as key:
with client(auth=None) as c:
match endpoint:
case 'LEGACY':
assert c.call('auth.login_with_api_key', key)
case 'CURRENT':
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': key
})
assert resp['response_type'] == 'SUCCESS'
case _:
raise ValueError(f'{endpoint}: unknown endpoint')
session = c.call('auth.sessions', [['current', '=', True]], {'get': True})
assert session['credentials'] == 'API_KEY'
assert session['credentials_data']['api_key']['name'] == 'Test API Key'
me = c.call('auth.me')
assert me['pw_name'] == sharing_admin_user.username
assert 'SHARING_ADMIN' in me['privilege']['roles']
assert 'API_KEY' in me['account_attributes']
call("auth.terminate_session", session['id'])
with pytest.raises(Exception):
c.call('system.info')
def test_legacy_api_key_upgrade():
"""We should automatically upgrade old hashes on successful login"""
with api_key():
key_id = call('api_key.query', [['username', '=', 'root']], {'get': True})['id']
call('datastore.update', 'account.api_key', key_id, {
'key': LEGACY_ENTRY_HASH,
'user_identifier': 'LEGACY_API_KEY'
})
call('etc.generate', 'pam_middleware')
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': 'root',
'api_key': f'{key_id}-{LEGACY_ENTRY_KEY}'
})
assert resp['response_type'] == 'SUCCESS'
# We should have replaced hash on auth
updated = call('api_key.query', [['username', '=', 'root']], {'get': True})
assert updated['keyhash'] != LEGACY_ENTRY_HASH
assert updated['keyhash'].startswith('$pbkdf2-sha512')
# verify we still have access
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': 'root',
'api_key': f'{key_id}-{LEGACY_ENTRY_KEY}'
})
assert resp['response_type'] == 'SUCCESS'
def test_legacy_api_key_reject_nonroot(sharing_admin_user):
"""Old hash style should be rejected for non-root user."""
with api_key(sharing_admin_user.username):
key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
call('datastore.update', 'account.api_key', key_id, {'key': LEGACY_ENTRY_HASH})
call('etc.generate', 'pam_middleware')
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': LEGACY_ENTRY_KEY
})
assert resp['response_type'] == 'AUTH_ERR'
def test_api_key_expired(sharing_admin_user):
"""Expired keys should fail with expected response type"""
with api_key(sharing_admin_user.username) as key:
key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
call('datastore.update', 'account.api_key', key_id, {'expiry': 1})
# update our pam_tdb file with new expiration
call('etc.generate', 'pam_middleware')
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': key
})
assert resp['response_type'] == 'EXPIRED'
def test_key_revoked(sharing_admin_user):
"""Revoked key should raise an AUTH_ERR"""
with api_key(sharing_admin_user.username) as key:
key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
call('datastore.update', 'account.api_key', key_id, {'expiry': -1})
# update our pam_tdb file with revocation
call('etc.generate', 'pam_middleware')
revoked = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['revoked']
assert revoked is True
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': key
})
assert resp['response_type'] == 'AUTH_ERR'
assert check_revoked_alert() is not None
call('datastore.update', 'account.api_key', key_id, {'expiry': 0})
sleep(1)
alert = check_revoked_alert()
assert alert is None, str(alert)
def test_api_key_reset(sharing_admin_user):
with api_key(sharing_admin_user.username) as key:
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': key
})
assert resp['response_type'] == 'SUCCESS'
key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
updated = call("api_key.update", key_id, {"reset": True})
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': key
})
assert resp['response_type'] == 'AUTH_ERR'
with client(auth=None) as c:
resp = c.call('auth.login_ex', {
'mechanism': 'API_KEY_PLAIN',
'username': sharing_admin_user.username,
'api_key': updated['key']
})
assert resp['response_type'] == 'SUCCESS'
def test_api_key_crud_restricted_admin_own_keys(sharing_admin_user):
with client(auth=(sharing_admin_user.username, sharing_admin_user.password)) as c:
key_info = c.call('api_key.create', {
'username': sharing_admin_user.username,
'name': 'test_restricted_admin_key',
})
try:
c.call('api_key.update', key_info['id'], {
'name': 'test_restricted_admin_key_new'
})
finally:
c.call('api_key.delete', key_info['id'])
def test_api_key_restrict_admin_other_keys_fail(sharing_admin_user):
with client(auth=(sharing_admin_user.username, sharing_admin_user.password)) as c:
with pytest.raises(CallError) as ce:
c.call('api_key.create', {
'username': 'root',
'name': 'test_restricted_admin_key',
})
assert ce.value.errno == errno.EACCES
| 9,886 | Python | .py | 199 | 39.150754 | 115 | 0.588516 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,176 | test_snmp_agent.py | truenas_middleware/tests/api2/test_snmp_agent.py | import re
import subprocess
import tempfile
import time
import pytest
from middlewared.test.integration.utils import call, host, ssh
@pytest.fixture()
def snmpd_running():
call("service.start", "snmp")
time.sleep(2)
yield
def test_truenas_mib_elements(snmpd_running):
mib_file = "/usr/local/share/snmp/mibs/TRUENAS-MIB.txt"
with tempfile.NamedTemporaryFile(mode='w') as f:
lines = ssh(f'cat {mib_file}')
assert lines
f.writelines(lines)
f.flush()
snmp = subprocess.run(
f"snmpwalk -v2c -c public -m {f.name} {host().ip} "
"1.3.6.1.4.1.50536",
shell=True,
capture_output=True,
text=True,
)
assert snmp.returncode == 0, snmp.stderr
assert "TRUENAS-MIB::zpoolName.1 = STRING: boot-pool\n" in snmp.stdout
assert re.search(
r"^TRUENAS-MIB::zfsArcSize\.0 = Gauge32: ([1-9][0-9]+)\n", snmp.stdout, re.MULTILINE
), snmp.stdout
| 999 | Python | .py | 30 | 26.3 | 96 | 0.623309 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,177 | test_110_certificate.py | truenas_middleware/tests/api2/test_110_certificate.py | import pytest
import re
from time import sleep
from middlewared.test.integration.utils import call
try:
from config import (
LDAPBASEDN,
LDAPBINDDN,
LDAPBINDPASSWORD,
LDAPHOSTNAME,
)
except ImportError:
Reason = "LDAP* variable are not setup in config.py"
# comment pytestmark for development testing with --dev-test
pytestmark = pytest.mark.skipif(True, reason=Reason)
def test_certificate():
# create certificate
payload = {
"name": "BOB",
"range_low": 1000,
"range_high": 2000,
"certificate": 1,
"idmap_backend": "RFC2307",
"options": {
"ldap_server": "STANDALONE",
"bind_path_user": LDAPBASEDN,
"bind_path_group": LDAPBASEDN,
"ldap_url": LDAPHOSTNAME,
"ldap_user_dn": LDAPBINDDN,
"ldap_user_dn_password": LDAPBINDPASSWORD,
"ssl": "ON",
"ldap_realm": False,
}
}
results = call("idmap.create", payload)
idmap_id = int(results["id"])
certificate_id = results["certificate"]["id"]
# successful delete
results = call("certificate.delete", certificate_id, True)
job_id = int(results)
# failed delete
while True:
get_job = call("core.get_jobs", [["id", "=", job_id]])
job_status = get_job[0]
if job_status["state"] in ("RUNNING", "WAITING"):
sleep(1)
else:
assert job_status["state"] == "FAILED", get_job
assert bool(re.search(
r"Certificate is being used by following service.*IDMAP", job_status["error"], flags=re.DOTALL
)) is True, job_status["error"]
break
# delete idmap
call("idmap.delete", idmap_id)
| 1,775 | Python | .py | 54 | 25 | 110 | 0.590432 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,178 | test_014_failover_related.py | truenas_middleware/tests/api2/test_014_failover_related.py | import errno
import pytest
from pytest_dependency import depends
from functions import SSH_TEST
from auto_config import ha, user, password
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.account import unprivileged_user
from middlewared.test.integration.utils import call, client
@pytest.fixture(scope='module')
def readonly_admin():
# READONLY role implies FAILOVER_READ
with unprivileged_user(
username='failover_guy',
group_name='failover_admins',
privilege_name='FAILOVER_PRIV',
allowlist=[],
web_shell=False,
roles=['READONLY_ADMIN']
) as acct:
yield acct
@pytest.mark.dependency(name='hactl_install_dir')
def test_01_check_hactl_installed(request):
rv = SSH_TEST('which hactl', user, password)
assert rv['stdout'].strip() == '/usr/local/sbin/hactl', rv['output']
@pytest.mark.dependency(name='hactl_status')
def test_02_check_hactl_status(request):
depends(request, ['hactl_install_dir'])
rv = SSH_TEST('hactl', user, password)
output = rv['stdout'].strip()
if ha:
for i in ('Node status:', 'This node serial:', 'Other node serial:', 'Failover status:'):
assert i in output, output
else:
assert 'Not an HA node' in output, output
@pytest.mark.dependency(name='hactl_takeover')
def test_03_check_hactl_takeover(request):
# integration tests run against the master node (at least they should...)
depends(request, ['hactl_status'])
rv = SSH_TEST('hactl takeover', user, password)
output = rv['stdout'].strip()
if ha:
assert 'This command can only be run on the standby node.' in output, output
else:
assert 'Not an HA node' in output, output
@pytest.mark.dependency(name='hactl_enable')
def test_04_check_hactl_enable(request):
# integration tests run against the master node (at least they should...)
depends(request, ['hactl_takeover'])
rv = SSH_TEST('hactl enable', user, password)
output = rv['stdout'].strip()
if ha:
assert 'Failover already enabled.' in output, output
else:
assert 'Not an HA node' in output, output
def test_05_check_hactl_disable(request):
depends(request, ['hactl_enable'])
rv = SSH_TEST('hactl disable', user, password)
output = rv['stdout'].strip()
if ha:
assert 'Failover disabled.' in output, output
assert call('failover.config')['disabled'] is True
rv = SSH_TEST('hactl enable', user, password)
output = rv['stdout'].strip()
assert 'Failover enabled.' in output, output
assert call('failover.config')['disabled'] is False
else:
assert 'Not an HA node' in output, output
if ha:
def test_07_failover_replicate():
old_ns = call('network.configuration.config')['nameserver3']
new_ns = '1.1.1.1'
try:
call('network.configuration.update', {'nameserver3': new_ns})
remote = call('failover.call_remote', 'network.configuration.config')
assert remote['nameserver3'] == new_ns
assert remote['state']['nameserver3'] == new_ns
finally:
call('network.configuration.update', {'nameserver3': old_ns})
remote = call('failover.call_remote', 'network.configuration.config')
assert remote['nameserver3'] == old_ns
assert remote['state']['nameserver3'] == old_ns
def test_08_readonly_ops(request, readonly_admin):
with client(auth=(readonly_admin.username, readonly_admin.password)) as c:
c.call('failover.config')
c.call('failover.node')
with pytest.raises(CallError) as ce:
c.call('failover.call_remote', 'user.update')
assert ce.value.errno == errno.EACCES
| 3,827 | Python | .py | 88 | 36.727273 | 97 | 0.665591 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,179 | test_apps.py | truenas_middleware/tests/api2/test_apps.py | import pytest
from middlewared.test.integration.utils import call, client
from middlewared.test.integration.assets.apps import app
from middlewared.test.integration.assets.docker import docker
from middlewared.test.integration.assets.pool import another_pool
from truenas_api_client import ValidationErrors
CUSTOM_CONFIG = {
'services': {
'actual_budget': {
'user': '568:568',
'image': 'actualbudget/actual-server:24.10.1',
'restart': 'unless-stopped',
'deploy': {
'resources': {
'limits': {
'cpus': '2',
'memory': '4096M'
}
}
},
'devices': [],
'depends_on': {
'permissions': {
'condition': 'service_completed_successfully'
}
},
'cap_drop': ['ALL'],
'security_opt': ['no-new-privileges'],
'healthcheck': {
'interval': '10s',
'retries': 30,
'start_period': '10s',
'test': (
"/bin/bash -c 'exec {health_check_fd}< /dev/tcp/127.0.0.1/31012 "
"&& echo -e 'GET /health HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n"
"Connection: close\\r\\n\\r\\n' >&$$health_check_fd && "
"cat <&$$health_check_fd'"
),
'timeout': '5s'
},
'environment': {
'ACTUAL_HOSTNAME': '0.0.0.0',
'ACTUAL_PORT': '31012',
'ACTUAL_SERVER_FILES': '/data/server-files',
'ACTUAL_USER_FILES': '/data/user-files',
'GID': '568',
'GROUP_ID': '568',
'NODE_ENV': 'production',
'PGID': '568',
'PUID': '568',
'TZ': 'Etc/UTC',
'UID': '568',
'USER_ID': '568'
},
'ports': [
{
'host_ip': '0.0.0.0',
'mode': 'ingress',
'protocol': 'tcp',
'published': 31012,
'target': 31012
}
]
},
'permissions': {
'command': [
'''
function process_dir() {
local dir=$$1
local mode=$$2
local uid=$$3
local gid=$$4
local chmod=$$5
local is_temporary=$$6
# Process directory logic here...
}
process_dir /mnt/actual_budget/config check 568 568 false false
'''
],
'deploy': {
'resources': {
'limits': {
'cpus': '1.0',
'memory': '512m'
}
}
},
'entrypoint': ['bash', '-c'],
'image': 'bash',
'user': 'root'
}
},
'x-portals': [
{
'host': '0.0.0.0',
'name': 'Web UI',
'path': '/',
'port': 31012,
'scheme': 'http'
}
],
'x-notes': '''# Welcome to TrueNAS SCALE
Thank you for installing Actual Budget!
## Documentation
Documentation for Actual Budget can be found at https://www.truenas.com/docs.
## Bug reports
If you find a bug in this app, please file an issue at
https://ixsystems.atlassian.net or https://github.com/truenas/apps.
## Feature requests or improvements
If you find a feature request for this app, please file an issue at
https://ixsystems.atlassian.net or https://github.com/truenas/apps.
'''
}
INVALID_YAML = '''
services:
actual_budget
user: 568:568
image: actualbudget/actual-server:24.10.1
restart: unless-stopped
deploy:
resources: {'limits': {'cpus': '2', 'memory': '4096M'}}
devices: []
depends_on:
permissions:
condition: service_completed_successfully
cap_drop: ['ALL']
security_opt: ['no-new-privileges']
'''
@pytest.fixture(scope='module')
def docker_pool():
with another_pool() as pool:
with docker(pool) as docker_config:
yield docker_config
def test_create_catalog_app(docker_pool):
with app('actual-budget', {
'train': 'community',
'catalog_app': 'actual-budget',
}) as app_info:
assert app_info['name'] == 'actual-budget', app_info
assert app_info['state'] == 'DEPLOYING', app_info
volume_ds = call('app.get_app_volume_ds', 'actual-budget')
assert volume_ds is not None, volume_ds
def test_create_custom_app(docker_pool):
with app('custom-budget', {
'custom_app': True,
'custom_compose_config': CUSTOM_CONFIG,
}) as app_info:
assert app_info['name'] == 'custom-budget'
assert app_info['state'] == 'DEPLOYING'
def test_create_custom_app_validation_error(docker_pool):
with pytest.raises(ValidationErrors):
with app('custom-budget', {
'custom_app': False,
'custom_compose_config': CUSTOM_CONFIG,
}):
pass
def test_create_custom_app_invalid_yaml(docker_pool):
with pytest.raises(ValidationErrors):
with app('custom-budget', {
'custom_app': True,
'custom_compose_config': INVALID_YAML,
}):
pass
def test_delete_app_validation_error_for_non_existent_app(docker_pool):
with pytest.raises(ValidationErrors):
call('app.delete', 'actual-budget', {'remove_ix_volumes': True, 'remove_images': True}, job=True)
def test_delete_app_options(docker_pool):
with app(
'custom-budget',
{
'custom_app': True,
'custom_compose_config': CUSTOM_CONFIG,
},
{'remove_ix_volumes': True, 'remove_images': True}
) as app_info:
assert app_info['name'] == 'custom-budget'
assert app_info['state'] == 'DEPLOYING'
app_images = call('app.image.query', [['repo_tags', '=', ['actualbudget/actual-server:24.10.1']]])
assert len(app_images) == 0
volume_ds = call('app.get_app_volume_ds', 'custom-budget')
assert volume_ds is None
def test_update_app(docker_pool):
values = {
'values': {
'network': {
'web_port': 32000
},
'resources': {
'limits': {
'memory': 8192
}
}
}
}
with app('actual-budget', {
'train': 'community',
'catalog_app': 'actual-budget',
}) as app_info:
app_info = call('app.update', app_info['name'], values, job=True)
assert app_info['active_workloads']['used_ports'][0]['host_ports'][0]['host_port'] == 32000
def test_stop_start_app(docker_pool):
with app('actual-budget', {
'train': 'community',
'catalog_app': 'actual-budget'
}):
# stop running app
call('app.stop', 'actual-budget', job=True)
states = call('app.query', [], {'select': ['state']})[0]
assert states['state'] == 'STOPPED'
# start stopped app
call('app.start', 'actual-budget', job=True)
states = call('app.query', [], {'select': ['state']})[0]
assert states['state'] == 'DEPLOYING'
def test_event_subscribe(docker_pool):
with client(py_exceptions=False) as c:
expected_event_type_order = ['ADDED', 'CHANGED']
expected_event_order = ['STOPPING', 'STOPPED', 'DEPLOYING']
events = []
event_types = []
def callback(event_type, **message):
nonlocal events, event_types
if not events or events[-1] != message['fields']['state']:
events.append(message['fields']['state'])
if not event_types or event_types[-1] != event_type:
event_types.append(event_type)
c.subscribe('app.query', callback, sync=True)
with app('ipfs', {
'train': 'community',
'catalog_app': 'ipfs'
}):
events = []
call('app.stop', 'ipfs', job=True)
call('app.start', 'ipfs', job=True)
assert expected_event_order == events
assert expected_event_type_order == event_types
| 8,490 | Python | .py | 235 | 25.051064 | 105 | 0.506508 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,180 | test_pool_spare.py | truenas_middleware/tests/api2/test_pool_spare.py | import pytest
from truenas_api_client import ValidationErrors
from middlewared.test.integration.assets.disk import fake_disks
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
def test_pool_create_too_small_spare():
disk = call("disk.get_unused")[0]["name"]
with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}):
with pytest.raises(ValidationErrors) as ve:
pool = call("pool.create", {
"name": "test",
"encryption": False,
"allow_duplicate_serials": True,
"topology": {
"data": [
{"type": "STRIPE", "disks": [disk]},
],
"spares": ["sdz"],
},
}, job=True)
call("pool.export", pool["id"], job=True)
assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk")
def test_pool_update_too_small_spare():
with another_pool() as pool:
with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}):
with pytest.raises(ValidationErrors) as ve:
call("pool.update", pool["id"], {
"topology": {
"spares": ["sdz"],
},
}, job=True)
assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk")
| 1,490 | Python | .py | 32 | 34.1875 | 115 | 0.550345 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,181 | test_dataset_unlock_validation.py | truenas_middleware/tests/api2/test_dataset_unlock_validation.py | import os
import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from truenas_api_client import ValidationErrors
PASSPHRASE = '12345678'
def encryption_props():
return {
'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE},
'encryption': True,
'inherit_encryption': False
}
@pytest.mark.parametrize(
'nested_dir,lock_dataset', [('test_dir', True), ('parent/child', True), ('test_dir', False)]
)
def test_encrypted_dataset_unlock_mount_validation(nested_dir, lock_dataset):
with dataset('test_dataset', encryption_props()) as encrypted_ds:
mount_point = os.path.join('/mnt', encrypted_ds)
if lock_dataset:
call('pool.dataset.lock', encrypted_ds, job=True)
call('filesystem.set_immutable', False, mount_point)
ssh(f'mkdir -p {os.path.join(mount_point, nested_dir)}')
if lock_dataset:
with pytest.raises(ValidationErrors) as ve:
call(
'pool.dataset.unlock', encrypted_ds.split('/')[0],
{'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True
)
assert ve.value.errors[0].attribute == 'unlock_options.datasets.0.force'
assert ve.value.errors[0].errmsg == f'\'{mount_point}\' directory is not empty (please provide' \
' "force" flag to override this error and file/directory will be' \
' renamed once the dataset is unlocked)'
else:
call(
'pool.dataset.unlock', encrypted_ds.split('/')[0],
{'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True
)
ssh(f'rm -rf {mount_point}')
| 1,923 | Python | .py | 38 | 39.605263 | 115 | 0.60331 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,182 | test_truecommand_roles.py | truenas_middleware/tests/api2/test_truecommand_roles.py | import pytest
from middlewared.test.integration.assets.roles import common_checks
def test_truecommand_readonly_role(unprivileged_user_fixture):
common_checks(
unprivileged_user_fixture, 'truenas.managed_by_truecommand', 'READONLY_ADMIN', True, valid_role_exception=False
)
@pytest.mark.parametrize('endpoint,role,should_work,valid_role_exception', [
('truecommand.config', 'TRUECOMMAND_READ', True, False),
('truecommand.config', 'TRUECOMMAND_WRITE', True, False),
('truecommand.info', 'TRUECOMMAND_READ', True, False),
('truecommand.info', 'TRUECOMMAND_WRITE', True, False),
('truecommand.update', 'TRUECOMMAND_READ', False, True),
('truecommand.update', 'TRUECOMMAND_WRITE', True, True),
])
def test_truecommand_read_and_write_role(unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception):
common_checks(
unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception=valid_role_exception
)
| 990 | Python | .py | 18 | 50.611111 | 119 | 0.745605 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,183 | test_docker_setup.py | truenas_middleware/tests/api2/test_docker_setup.py | import pytest
from middlewared.test.integration.assets.docker import docker
from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.docker import dataset_props, IX_APPS_MOUNT_PATH
ENC_POOL_PASSWORD = 'test1234'
@pytest.fixture(scope='module')
def docker_pool():
with another_pool() as pool:
with docker(pool) as docker_config:
yield docker_config
@pytest.fixture(scope='module')
def docker_encrypted_pool():
with another_pool({
'name': 'docker_enc_pool',
'encryption': True,
'encryption_options': {'passphrase': ENC_POOL_PASSWORD}
}) as pool:
with docker(pool) as docker_config:
yield docker_config
def test_docker_datasets_properties(docker_pool):
docker_config = call('docker.config')
datasets = {
ds['name']: ds['properties'] for ds in call('zfs.dataset.query', [['id', '^', docker_config['dataset']]])
}
for ds_name, current_props in datasets.items():
invalid_props = {}
for to_check_prop, to_check_prop_value in dataset_props(ds_name).items():
if current_props[to_check_prop]['value'] != to_check_prop_value:
invalid_props[to_check_prop] = current_props[to_check_prop]['value']
assert invalid_props == {}, f'{ds_name} has invalid properties: {invalid_props}'
def test_correct_docker_dataset_is_mounted(docker_pool):
docker_config = call('docker.config')
assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
def test_catalog_synced_properly(docker_pool):
assert call('catalog.synced') is True
def test_catalog_sync_location(docker_pool):
assert call('catalog.config')['location'] == '/mnt/.ix-apps/truenas_catalog'
def test_apps_being_reported(docker_pool):
assert call('app.available', [], {'count': True}) != 0
def test_apps_are_running(docker_pool):
assert call('docker.status')['status'] == 'RUNNING'
def test_apps_dataset_after_address_pool_update(docker_pool):
docker_config = call('docker.update', {'address_pools': [{'base': '172.17.0.0/12', 'size': 27}]}, job=True)
assert docker_config['address_pools'] == [{'base': '172.17.0.0/12', 'size': 27}]
assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
assert call('docker.status')['status'] == 'RUNNING'
def test_correct_docker_dataset_is_mounted_on_enc_pool(docker_encrypted_pool):
docker_config = call('docker.config')
assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
def test_docker_locked_dataset_mount(docker_encrypted_pool):
docker_config = call('docker.config')
call('pool.dataset.lock', docker_encrypted_pool['pool'], job=True)
assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] != docker_config['dataset']
def test_docker_unlocked_dataset_mount(docker_encrypted_pool):
docker_config = call('docker.config')
call(
'pool.dataset.unlock', docker_encrypted_pool['pool'], {
'datasets': [{'passphrase': ENC_POOL_PASSWORD, 'name': docker_encrypted_pool['pool']}], 'recursive': True
}, job=True
)
assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
| 3,366 | Python | .py | 62 | 48.709677 | 117 | 0.692308 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,184 | test_audit_dataset.py | truenas_middleware/tests/api2/test_audit_dataset.py | from middlewared.test.integration.utils import call, pool
from middlewared.test.integration.utils.audit import expect_audit_method_calls
DS_NAME = f'{pool}/audit_dataset_insert_name_here'
def test_dataset_audit():
payload = {'name': DS_NAME}
try:
with expect_audit_method_calls([{
'method': 'pool.dataset.create',
'params': [payload],
'description': f'Pool dataset create {DS_NAME}',
}]):
call('pool.dataset.create', payload)
with expect_audit_method_calls([{
'method': 'pool.dataset.update',
'params': [DS_NAME, {'atime': 'OFF'}],
'description': f'Pool dataset update {DS_NAME}',
}]):
call('pool.dataset.update', DS_NAME, {'atime': 'OFF'})
finally:
with expect_audit_method_calls([{
'method': 'pool.dataset.delete',
'params': [DS_NAME],
'description': f'Pool dataset delete {DS_NAME}',
}]):
call('pool.dataset.delete', DS_NAME)
| 1,039 | Python | .py | 25 | 32.16 | 78 | 0.582341 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,185 | test_posix_acl.py | truenas_middleware/tests/api2/test_posix_acl.py | import enum
import pytest
from auto_config import pool_name
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.assets.pool import dataset
ACLTEST_DATASET_NAME = "posixacltest"
ACLTEST_DATASET_ABS_PATH = f"/mnt/{pool_name}/{ACLTEST_DATASET_NAME}"
ACLTEST_SUBDATASET_NAME = "sub1"
ACLTEST_SUBDATASET_ABS_PATH = f"{ACLTEST_DATASET_ABS_PATH}/{ACLTEST_SUBDATASET_NAME}"
PERMSET_EMPTY = {"READ": False, "WRITE": False, "EXECUTE": False}
PERMSET_FULL = {"READ": True, "WRITE": True, "EXECUTE": True}
TAGS = {
"USER_OBJ": {"mask_required": False},
"GROUP_OBJ": {"mask_required": False},
"MASK": {"mask_required": False},
"USER": {"mask_required": True},
"GROUP": {"mask_required": True},
"OTHER": {"mask_required": False},
}
class ACLBrand(enum.Enum):
ACCESS = enum.auto()
DEFAULT = enum.auto()
def getacl(self, perms=None):
"""Default to 770 unless permissions explicitly specified."""
permfull = perms if perms else PERMSET_FULL.copy()
permempty = perms if perms else PERMSET_EMPTY.copy()
default = self.name == "DEFAULT"
return [
{
"tag": "USER_OBJ",
"id": -1,
"perms": permfull,
"default": default,
},
{
"tag": "GROUP_OBJ",
"id": -1,
"perms": permfull,
"default": default,
},
{
"tag": "OTHER",
"id": -1,
"perms": permempty,
"default": default,
},
]
@pytest.fixture(scope="module")
def temp_ds():
with dataset(
ACLTEST_DATASET_NAME, data={"acltype": "POSIX", "aclmode": "DISCARD"}
) as ds:
# Verify that our dataset was created successfully
# and that the acltype is POSIX1E, which should be
# default for a "generic" dataset.
info = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
assert info["acltype"] == "POSIX1E", info
# Verify that we can set a trivial POSIX1E ACL
call(
"filesystem.setacl",
{
"path": ACLTEST_DATASET_ABS_PATH,
"dacl": ACLBrand.ACCESS.getacl(),
"gid": 65534,
"uid": 65534,
"acltype": "POSIX1E",
},
job=True,
)
# Verify ACL is repoted as trivial
info = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
assert info["trivial"], info
# Verify UID/GID
assert info["uid"] == 65534, info
assert info["gid"] == 65534, info
# Verify ACL was applied correctly
default_acl = ACLBrand.ACCESS.getacl()
for idx, acl in enumerate(info["acl"]):
for key in ("tag", "perms"):
assert acl[key] == default_acl[idx][key], acl[key]
# create subdataset for inheritance related tests
call(
"pool.dataset.create",
{
"name": f"{ds}/{ACLTEST_SUBDATASET_NAME}",
"acltype": "POSIX",
"aclmode": "DISCARD",
},
)
rv = ssh(
"; ".join(
[
f"mkdir -p {ACLTEST_DATASET_ABS_PATH}/dir1/dir2",
f"touch {ACLTEST_DATASET_ABS_PATH}/dir1/testfile",
f"touch {ACLTEST_DATASET_ABS_PATH}/dir1/dir2/testfile",
]
),
complete_response=True,
)
assert rv["result"] is True, rv["output"]
yield
"""
At this point very basic functionality of API endpoint is verified.
Proceed to more rigorous testing of permissions.
"""
@pytest.mark.parametrize("perm", ["READ", "WRITE", "EXECUTE"])
def test_set_perms_for(temp_ds, perm):
"""
Validation that READ, WRITE, EXECUTE are set correctly via endpoint.
OTHER entry is used for this purpose.
"""
dacls = ACLBrand.ACCESS.getacl()
dacls[2]["perms"][perm] = True
call(
"filesystem.setacl",
{"path": ACLTEST_DATASET_ABS_PATH, "dacl": dacls, "acltype": "POSIX1E"},
job=True,
)
rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)["acl"][2]["perms"]
assert rv[perm], rv
@pytest.mark.parametrize("tag", TAGS.keys())
def test_set_tag_(temp_ds, tag):
"""
Validation that entries for all tag types can be set correctly.
In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry
is modified to match our test permset. USER and GROUP (named)
entries are set for id 1000 (user / group need not exist for
this to succeed). Named entries require an additional mask entry.
"""
test_permset = {"READ": True, "WRITE": False, "EXECUTE": True}
must_add = True
payload = {
"path": ACLTEST_DATASET_ABS_PATH,
"dacl": ACLBrand.ACCESS.getacl(),
"acltype": "POSIX1E",
}
for entry in payload["dacl"]:
if entry["tag"] == tag:
entry["perms"] = test_permset
must_add = False
break
if must_add:
new_entry = {
"tag": tag,
"perms": test_permset,
"id": 1000,
"default": False,
}
if tag == "MASK":
new_entry["id"] = -1
# POSIX ACLs are quite particular about
# ACE ordering. We do this on backend.
# MASK comes before OTHER.
payload["dacl"].insert(2, new_entry)
elif tag == "USER":
payload["dacl"].insert(1, new_entry)
elif tag == "GROUP":
payload["dacl"].insert(2, new_entry)
if TAGS[tag]["mask_required"]:
new_entry = {
"tag": "MASK",
"perms": test_permset,
"id": -1,
"default": False,
}
payload["dacl"].insert(3, new_entry)
call("filesystem.setacl", payload, job=True)
rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
assert payload["dacl"] == rv["acl"], rv
@pytest.mark.parametrize("tag", TAGS.keys())
def test_set_default_tag_(temp_ds, tag):
"""
Validation that entries for all tag types can be set correctly.
In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry
is modified to match our test permset. USER and GROUP (named)
entries are set for id 1000 (user / group need not exist for
this to succeed). Named entries require an additional mask entry.
This particular test covers "default" entries in POSIX1E ACL.
"""
test_permset = {"READ": True, "WRITE": False, "EXECUTE": True}
must_add = True
payload = {
"path": ACLTEST_DATASET_ABS_PATH,
"dacl": ACLBrand.ACCESS.getacl(),
"acltype": "POSIX1E",
}
default = ACLBrand.DEFAULT.getacl()
for entry in default:
if entry["tag"] == tag:
entry["perms"] = test_permset
must_add = False
if must_add:
new_entry = {
"tag": tag,
"perms": test_permset,
"id": 1000,
"default": True,
}
if tag == "MASK":
new_entry["id"] = -1
# POSIX ACLs are quite particular about
# ACE ordering. We do this on backend.
# MASK comes before OTHER.
default.insert(2, new_entry)
elif tag == "USER":
default.insert(1, new_entry)
elif tag == "GROUP":
default.insert(2, new_entry)
if TAGS[tag]["mask_required"]:
new_entry = {
"tag": "MASK",
"perms": test_permset,
"id": -1,
"default": True,
}
default.insert(3, new_entry)
payload["dacl"].extend(default)
call("filesystem.setacl", payload, job=True)
rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
assert payload["dacl"] == rv["acl"], rv
assert rv["trivial"] is False, rv
def test_non_recursive_acl_strip(temp_ds):
"""
Verify that non-recursive ACL strip works correctly.
We do this by checking result of subsequent getacl
request on the path (it should report that it is "trivial").
"""
call(
"filesystem.setacl",
{
"path": ACLTEST_DATASET_ABS_PATH,
"dacl": [],
"acltype": "POSIX1E",
"options": {"stripacl": True},
},
job=True,
)
rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
assert rv["trivial"] is True, rv
"""
This next series of tests verifies that ACLs are being inherited correctly.
We first create a child dataset to verify that ACLs do not change unless
'traverse' is set.
"""
def test_recursive_no_traverse(temp_ds):
"""
Test that ACL is recursively applied correctly, but does
not affect mountpoint of child dataset.
In this case, access ACL will have 750 for dataset mountpoint,
and default ACL will have 777. Recusively applying will grant
777 for access and default.
"""
payload = {
"path": ACLTEST_DATASET_ABS_PATH,
"gid": 65534,
"uid": 65534,
"dacl": ACLBrand.ACCESS.getacl(),
"acltype": "POSIX1E",
"options": {"recursive": True},
}
new_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
default = ACLBrand.DEFAULT.getacl(new_perms)
payload["dacl"].extend(default)
call("filesystem.setacl", payload, job=True)
# Verify that subdataset hasn't changed. Should still report as trivial.
rv = call("filesystem.getacl", ACLTEST_SUBDATASET_ABS_PATH)
assert rv["trivial"], rv
# Verify that user was changed on subdirectory
rv = call("filesystem.getacl", f"{ACLTEST_DATASET_ABS_PATH}/dir1")
assert rv["uid"] == 65534, rv
assert rv["trivial"] is False, rv
for entry in rv["acl"]:
assert entry["perms"] == new_perms, rv["acl"]
def test_recursive_with_traverse(temp_ds):
"""
This test verifies that setting `traverse = True`
will allow setacl operation to cross mountpoints.
"""
payload = {
"gid": 65534,
"uid": 65534,
"path": ACLTEST_DATASET_ABS_PATH,
"dacl": ACLBrand.ACCESS.getacl(),
"acltype": "POSIX1E",
"options": {"recursive": True, "traverse": True},
}
default = ACLBrand.DEFAULT.getacl({"READ": True, "WRITE": True, "EXECUTE": True})
payload["dacl"].extend(default)
call("filesystem.setacl", payload, job=True)
rv = call("filesystem.getacl", ACLTEST_SUBDATASET_ABS_PATH)
assert rv["trivial"] is False, rv
assert rv["uid"] == 65534, rv
def test_strip_acl_from_dataset(temp_ds):
"""
Strip ACL via filesystem.setperm endpoint.
This should work even for POSIX1E ACLs.
"""
call(
"filesystem.setperm",
{
"path": ACLTEST_DATASET_ABS_PATH,
"mode": "777",
"options": {"stripacl": True, "recursive": True},
},
job=True,
)
"""
The next four tests check that we've remotved the ACL from the
mountpoint, a subdirectory, and a file. These are all potentially
different cases for where we can fail to strip an ACL.
"""
def test_filesystem_acl_is_not_removed_child_dataset(temp_ds):
rv = call("filesystem.stat", ACLTEST_SUBDATASET_ABS_PATH)
assert rv["acl"] is True, rv
def test_filesystem_acl_is_removed_from_mountpoint(temp_ds):
rv = call("filesystem.stat", ACLTEST_DATASET_ABS_PATH)
assert rv["acl"] is False, rv
assert oct(rv["mode"]) == "0o40777", rv
def test_filesystem_acl_is_removed_from_subdir(temp_ds):
rv = call("filesystem.stat", f"{ACLTEST_DATASET_ABS_PATH}/dir1")
assert rv["acl"] is False, rv
assert oct(rv["mode"]) == "0o40777", rv
def test_filesystem_acl_is_removed_from_file(temp_ds):
rv = call("filesystem.stat", f"{ACLTEST_DATASET_ABS_PATH}/dir1/testfile")
assert rv["acl"] is False, rv
assert oct(rv["mode"]) == "0o100777", rv
| 11,969 | Python | .py | 326 | 28.687117 | 85 | 0.589803 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,186 | test_system_vendor.py | truenas_middleware/tests/api2/test_system_vendor.py | from middlewared.test.integration.utils import call, ssh
SENTINEL_FILE_PATH = "/data/.vendor"
def test_no_vendor_file():
file_exists = ssh(f"test -e {SENTINEL_FILE_PATH}", check=False, complete_response=True)["result"]
assert not file_exists
assert not call("system.vendor.is_vendored")
def test_name_is_none():
vendor_name = call("system.vendor.name")
assert vendor_name is None
| 406 | Python | .py | 9 | 41.222222 | 101 | 0.734015 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,187 | test_ui_caching.py | truenas_middleware/tests/api2/test_ui_caching.py | import re
import pytest
import requests
from middlewared.test.integration.utils import url
RE_MAIN_SCRIPT = re.compile(r'<script src="(main[.-].+\.js)" type="module">')
@pytest.mark.parametrize("path", ["/", "/ui", "/ui/", "/ui/index.html", "/ui/sessions/signin"])
def test_index_html(path):
r = requests.get(url() + path, timeout=10)
assert r.status_code == 200
assert "Strict-Transport-Security" in r.headers
# FIXME: There is no easy way to fix this for index.html, but since this path never appears anywhere,
# we can probably ignore this for now
if path != "/ui/index.html":
assert r.headers["Cache-Control"] == "no-store, no-cache, must-revalidate, max-age=0"
assert RE_MAIN_SCRIPT.search(r.text)
def test_assets():
r = requests.get(url(), timeout=10)
m = RE_MAIN_SCRIPT.search(r.text)
r = requests.get(url() + f"/ui/{m.group(1)}")
assert "Strict-Transport-Security" in r.headers
assert r.headers["Cache-Control"] == "must-revalidate"
| 1,011 | Python | .py | 21 | 43.809524 | 105 | 0.683402 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,188 | test_pool_dataset_info.py | truenas_middleware/tests/api2/test_pool_dataset_info.py | from middlewared.test.integration.utils import call
from middlewared.test.integration.assets.pool import pool
def test_recommended_zvol_blocksize():
assert call("pool.dataset.recommended_zvol_blocksize", pool) == "16K"
| 225 | Python | .py | 4 | 53.75 | 73 | 0.812785 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,189 | test_account_idmap.py | truenas_middleware/tests/api2/test_account_idmap.py | import os
import sys
import pytest
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.utils import call, client
LOCAL_USER_SID_PREFIX = 'S-1-22-1-'
LOCAL_GROUP_SID_PREFIX = 'S-1-22-2-'
def test_uid_idmapping():
with user({
'username': 'idmap_user',
'full_name': 'idmap_user',
'smb': True,
'group_create': True,
'password': 'test1234',
}) as u:
UNIX_SID = LOCAL_USER_SID_PREFIX + str(u['uid'])
results = call('idmap.convert_sids', [UNIX_SID])
assert results['unmapped'] == {}
assert UNIX_SID in results['mapped']
entry = results['mapped'][UNIX_SID]
assert entry['id_type'] == 'USER'
assert entry['id'] == u['uid']
assert entry['name'] == 'Unix User\\idmap_user'
results = call('idmap.convert_unixids', [{
'id_type': 'USER',
'id': u['uid'],
}])
assert results['unmapped'] == {}
entry = results['mapped'][f'UID:{u["uid"]}']
assert entry['id_type'] == 'USER'
assert entry['id'] == u['uid']
pdb_sid = entry['sid']
user_obj = call('user.get_user_obj', {'uid': u['uid'], 'sid_info': True})
assert 'sid' in user_obj
assert user_obj['sid'] == pdb_sid
| 1,311 | Python | .py | 35 | 30.028571 | 81 | 0.568272 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,190 | test_account_privilege_role_forbidden_fields.py | truenas_middleware/tests/api2/test_account_privilege_role_forbidden_fields.py | import pytest
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.account import unprivileged_user_client
from middlewared.test.integration.assets.cloud_sync import local_ftp_credential
from middlewared.test.integration.assets.pool import dataset
@pytest.fixture(scope="module")
def unprivileged_client():
with unprivileged_user_client(["CLOUD_BACKUP_WRITE", "CLOUD_SYNC_WRITE"]) as c:
yield c
@pytest.fixture(scope="module")
def cloudsync_template():
with local_ftp_credential() as credential:
with dataset("cloud_backup") as local_dataset:
yield {
"path": f"/mnt/{local_dataset}",
"credentials": credential["id"],
"attributes": {
"folder": "",
},
}
@pytest.mark.parametrize("param,value", [
("pre_script", "rm -rf /"),
("post_script", "rm -rf /"),
])
def test_cloud_backup(unprivileged_client, cloudsync_template, param, value):
with pytest.raises(ValidationErrors) as ve:
unprivileged_client.call("cloud_backup.create", {
**cloudsync_template,
"password": "test",
"keep_last": 10,
param: value,
})
assert any(error.attribute == f"cloud_backup_create.{param}" for error in ve.value.errors), ve
@pytest.mark.parametrize("param,value", [
("pre_script", "rm -rf /"),
("post_script", "rm -rf /"),
])
def test_cloud_sync(unprivileged_client, cloudsync_template, param, value):
with pytest.raises(ValidationErrors) as ve:
unprivileged_client.call("cloudsync.create", {
**cloudsync_template,
"direction": "PUSH",
"transfer_mode": "COPY",
param: value,
})
assert any(error.attribute == f"cloud_sync_create.{param}" for error in ve.value.errors), ve
| 1,894 | Python | .py | 46 | 33.586957 | 98 | 0.641807 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,191 | test_427_smb_acl.py | truenas_middleware/tests/api2/test_427_smb_acl.py | #!/usr/bin/env python3
import errno
import pytest
import sys
import os
import secrets
import string
import subprocess
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import (
pool_name,
)
from middlewared.service_exception import ValidationError, ValidationErrors
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.smb import smb_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.smb import security, smb_connection
from middlewared.test.integration.utils.unittest import RegexString
from pytest_dependency import depends
from time import sleep
from utils import create_dataset
SMB_USER = "smbacluser"
SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
TEST_DATA = {}
OWNER_RIGHTS_SID = 'S-1-3-4'
permset = {
"READ_DATA": False,
"WRITE_DATA": False,
"APPEND_DATA": False,
"READ_NAMED_ATTRS": False,
"WRITE_NAMED_ATTRS": False,
"EXECUTE": False,
"DELETE_CHILD": False,
"READ_ATTRIBUTES": False,
"WRITE_ATTRIBUTES": False,
"DELETE": False,
"READ_ACL": False,
"WRITE_ACL": False,
"WRITE_OWNER": False,
"SYNCHRONIZE": True
}
flagset = {
"FILE_INHERIT": False,
"DIRECTORY_INHERIT": False,
"INHERIT_ONLY": False,
"NO_PROPAGATE_INHERIT": False,
"INHERITED": False
}
def get_windows_sd(share, format="LOCAL"):
return call("smb.get_remote_acl", {
"server": "127.0.0.1",
"share": share,
"username": SMB_USER,
"password": SMB_PWD,
"options": {"output_format": format}
})['acl_data']
def iter_permset(path, share, local_acl):
smbacl = get_windows_sd(share)
assert smbacl['acl'][0]['perms'] == permset
for perm in permset.keys():
permset[perm] = True
call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True)
smbacl = get_windows_sd(share)
for ace in smbacl["acl"]:
if ace["id"] != 666:
continue
assert ace["perms"] == permset, f'{perm}: {str(ace)}'
def iter_flagset(path, share, local_acl):
smbacl = get_windows_sd(share)
assert smbacl['acl'][0]['flags'] == flagset
for flag in flagset.keys():
# we automatically canonicalize entries and so INHERITED shifts to end of list
flagset[flag] = True
call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True)
smbacl = get_windows_sd(share)
for ace in smbacl["acl"]:
if ace["id"] != 666:
continue
assert ace["flags"] == flagset, f'{flag}: {str(ace)}'
@pytest.fixture(scope='module')
def initialize_for_smb_tests(request):
ba = call(
'group.query',
[['name', '=', 'builtin_administrators']],
{'get': True}
)
with user({
'username': SMB_USER,
'full_name': SMB_USER,
'group_create': True,
'smb': True,
'groups': [ba['id']],
'password': SMB_PWD
}) as u:
try:
call('service.start', 'cifs')
yield {'user': u}
finally:
call('service.stop', 'cifs')
@pytest.mark.dependency(name="SMB_SERVICE_STARTED")
def test_001_initialize_for_tests(initialize_for_smb_tests):
TEST_DATA.update(initialize_for_smb_tests)
def test_003_test_perms(request):
"""
This test creates a temporary dataset / SMB share,
then iterates through all the possible permissions bits
setting local FS ace for each of them and verifies that
correct NT ACL bit gets toggled when viewed through SMB
protocol.
"""
depends(request, ["SMB_SERVICE_STARTED"], scope="session")
with dataset('nfs4acl_perms_smb', {'share_type': 'SMB'}) as ds:
path = os.path.join('/mnt', ds)
with smb_share(path, "PERMS"):
the_acl = call('filesystem.getacl', path, False)['acl']
the_acl.insert(0, {
'perms': permset,
'flags': flagset,
'id': 666,
'type': 'ALLOW',
'tag': 'USER'
})
call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True)
iter_permset(path, "PERMS", the_acl)
def test_004_test_flags(request):
"""
This test creates a temporary dataset / SMB share,
then iterates through all the possible inheritance flags
setting local FS ace for each of them and verifies that
correct NT ACL bit gets toggled when viewed through SMB
protocol.
"""
depends(request, ["SMB_SERVICE_STARTED"], scope="session")
with dataset('nfs4acl_flags_smb', {'share_type': 'SMB'}) as ds:
path = os.path.join('/mnt', ds)
with smb_share(path, "FLAGS"):
the_acl = call('filesystem.getacl', path, False)['acl']
the_acl.insert(0, {
'perms': permset,
'flags': flagset,
'id': 666,
'type': 'ALLOW',
'tag': 'USER'
})
call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True)
iter_flagset(path, "FLAGS", the_acl)
def test_005_test_map_modify(request):
"""
This test validates that we are generating an appropriate SD when user has
'stripped' an ACL from an SMB share. Appropriate in this case means one that
grants an access mask equaivalent to MODIFY or FULL depending on whether it's
the file owner or group / other.
"""
depends(request, ["SMB_SERVICE_STARTED"], scope="session")
ds = 'nfs4acl_map_modify'
path = f'/mnt/{pool_name}/{ds}'
with create_dataset(f'{pool_name}/{ds}', {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'}, None, 777):
with smb_share(path, "MAP_MODIFY"):
sd = get_windows_sd("MAP_MODIFY", "SMB")
dacl = sd['dacl']
assert dacl[0]['access_mask']['standard'] == 'FULL', str(dacl[0])
assert dacl[1]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[1])
assert dacl[1]['access_mask']['special']['WRITE_EA'], str(dacl[1])
assert dacl[2]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[2])
assert dacl[2]['access_mask']['special']['WRITE_EA'], str(dacl[2])
def test_006_test_preserve_dynamic_id_mapping(request):
depends(request, ["SMB_SERVICE_STARTED"], scope="session")
def _find_owner_rights(acl, owner_rights_id):
for entry in acl:
if entry['id'] == owner_rights_id:
return True
return False
ds = 'nfs4acl_dynmamic_user'
path = f'/mnt/{pool_name}/{ds}'
with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}):
with smb_share(path, "DYNAMIC"):
# add an ACL entry that forces generation
# of a dynamic idmap entry
sleep(5)
cmd = [
'smbcacls',
f'//{truenas_server.ip}/DYNAMIC',
'\\',
'-a', r'ACL:S-1-3-4:ALLOWED/0x0/FULL',
'-d', '0',
'-U', f'{SMB_USER}%{SMB_PWD}',
]
res = subprocess.run(cmd, capture_output=True)
assert res.returncode == 0, res.stderr.decode() or res.stdout.decode()
# Since winbindd is by default not in nsswitch when we're standalone
# the GID won't resolve to name
res = call('idmap.convert_sids', [OWNER_RIGHTS_SID])
assert OWNER_RIGHTS_SID in res['mapped']
assert res['mapped'][OWNER_RIGHTS_SID]['id_type'] == 'GROUP'
assert res['mapped'][OWNER_RIGHTS_SID]['name'].endswith('Owner Rights')
owner_rights_id = res['mapped'][OWNER_RIGHTS_SID]['id']
# verify "owner rights" entry is present
# verify "owner rights" entry is still present
the_acl = call('filesystem.getacl', path, False, True)['acl']
has_owner_rights = _find_owner_rights(the_acl, owner_rights_id)
assert has_owner_rights is True, str(the_acl)
# force re-sync of group mapping database (and winbindd_idmap.tdb)
call('smb.synchronize_group_mappings', job=True)
# verify "owner rights" entry is still present
the_acl = call('filesystem.getacl', path, False, True)['acl']
has_owner_rights = _find_owner_rights(the_acl, owner_rights_id)
assert has_owner_rights is True, str(the_acl)
def test_007_test_disable_autoinherit(request):
depends(request, ["SMB_SERVICE_STARTED"], scope="session")
ds = 'nfs4acl_disable_inherit'
path = f'/mnt/{pool_name}/{ds}'
with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}):
with smb_share(path, 'NFS4_INHERIT'):
with smb_connection(
share='NFS4_INHERIT',
username=SMB_USER,
password=SMB_PWD
) as c:
c.mkdir('foo')
fh = c.create_file('foo', 'r')
sd = c.get_sd(fh, security.SECINFO_DACL)
c.close(fh)
assert sd.type & security.SEC_DESC_DACL_PROTECTED == 0, sd.as_sddl()
c.inherit_acl('foo', 'COPY')
fh = c.create_file('foo', 'r')
sd = c.get_sd(fh, security.SECINFO_DACL)
assert sd.type & security.SEC_DESC_DACL_PROTECTED, sd.as_sddl()
def test_008_test_prevent_smb_dataset_update(request):
"""
Prevent changing acltype and xattr on dataset hosting SMB shares
"""
ds_name = 'prevent_changes'
path = f'/mnt/{pool_name}/{ds_name}'
with create_dataset(f'{pool_name}/{ds_name}') as ds:
with smb_share(path, 'SMB_SHARE_1'):
# Create a second share for testing purposes
with smb_share(path, 'SMB_SHARE_2'):
# Confirm we ignore requests that don't involve changes
for setting in [{"acltype": "POSIX"}]:
call('pool.dataset.update', ds, setting)
# Confirm we block requests that involve changes
for setting in [{"acltype": "OFF"}]:
attrib = list(setting.keys())[0]
with pytest.raises(ValidationErrors) as ve:
call('pool.dataset.update', ds, setting)
assert ve.value.errors == [
ValidationError(
f"pool_dataset_update.{attrib}",
RegexString("This dataset is hosting SMB shares. .*"),
errno.EINVAL,
)
]
assert "SMB_SHARE_2" in str(ve.value.errors[0]), ve.value.errors[0]
| 10,899 | Python | .py | 256 | 33.332031 | 104 | 0.591171 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,192 | test_pool_dataset_acl.py | truenas_middleware/tests/api2/test_pool_dataset_acl.py | import dataclasses
import errno
import pytest
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from truenas_api_client import ClientException
@dataclasses.dataclass
class AclIds:
user_to_add: int = 8765309
user2_to_add: int = 8765310
group_to_add: int = 1138
def check_for_entry(acl, id_type, xid, perms, is_posix=False):
has_entry = has_default = has_access = False
for ace in acl:
if ace['id'] == xid and ace['tag'] == id_type and ace['perms'] == perms:
if is_posix:
if ace['default']:
assert has_default is False
has_default = True
else:
assert has_access is False
has_access = True
else:
assert has_entry is False
has_entry = True
return has_entry or (has_access and has_default)
def test_simplified_apps_api_posix_acl():
posix_acl = [
{'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'},
{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'},
{'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'FULL_CONTROL'},
]
with dataset('APPS_POSIX') as ds:
ds_path = f'/mnt/{ds}'
call('filesystem.add_to_acl', {'path': ds_path, 'entries': posix_acl}, job=True)
acl = call('filesystem.getacl', ds_path)['acl']
assert check_for_entry(
acl,
'USER',
AclIds.user_to_add,
{'READ': True, 'WRITE': True, 'EXECUTE': True}, True
), acl
assert check_for_entry(
acl,
'GROUP',
AclIds.group_to_add,
{'READ': True, 'WRITE': False, 'EXECUTE': True}, True
), acl
def test_simplified_apps_api_nfs4_acl(request):
nfs4_acl = [
{'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'},
{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'},
{'id_type': 'USER', 'id': AclIds.user2_to_add, 'access': 'FULL_CONTROL'},
]
with dataset('APPS_NFS4', {'share_type': 'APPS'}) as ds:
ds_path = f'/mnt/{ds}'
call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True)
acl = call('filesystem.getacl', ds_path)['acl']
assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl
assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl
assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl
# check behavior of using force option.
# presence of file in path should trigger failure if force is not set
results = ssh(f'touch {ds_path}/canary', complete_response=True)
assert results['result'] is True, results
acl_changed = call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True)
assert acl_changed is False
with pytest.raises(ClientException):
call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl + [
{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'},
]}, job=True)
# check behavior of using force option.
# second call with `force` specified should succeed
acl_changed = call('filesystem.add_to_acl', {
'path': ds_path,
'entries': nfs4_acl + [{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'}],
'options': {'force': True}
}, job=True)
assert acl_changed is True
# we already added the entry earlier.
# this check makes sure we're not adding duplicate entries.
acl = call('filesystem.getacl', ds_path)['acl']
assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl
assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl
assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl
| 4,123 | Python | .py | 85 | 39.211765 | 104 | 0.584971 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,193 | test_disk_temperature.py | truenas_middleware/tests/api2/test_disk_temperature.py | import time
from unittest.mock import ANY
import pytest
from middlewared.test.integration.utils import call, mock
@pytest.fixture(autouse=True, scope="function")
def reset_temperature_cache():
call("disk.reset_temperature_cache")
def test_disk_temperature():
with mock("disk.temperature_uncached", return_value=50):
assert call("disk.temperature", "sda") == 50
def test_disk_temperature_cache():
with mock("disk.temperature_uncached", return_value=50):
call("disk.temperature", "sda")
with mock("disk.temperature_uncached", exception=True):
assert call("disk.temperature", "sda", {"cache": 300}) == 50
def test_disk_temperature_cache_expires():
with mock("disk.temperature_uncached", return_value=50):
call("disk.temperature", "sda")
time.sleep(3)
with mock("disk.temperature_uncached", return_value=60):
assert call("disk.temperature", "sda", {"cache": 2}) == 60
def test_disk_temperatures_only_cached():
with mock("disk.temperature_uncached", return_value=50):
call("disk.temperature", "sda")
with mock("disk.temperature_uncached", exception=True):
assert call("disk.temperatures", ["sda"], {"only_cached": True}) == {"sda": 50}
def test_disk_temperature_alerts():
sda_temperature_alert = {
"uuid": "a11a16a9-a28b-4005-b11a-bce6af008d86",
"source": "",
"klass": "SMART",
"args": {
"device": "/dev/sda",
"message": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)"
},
"node": "Controller A",
"key": "{\"device\": \"/dev/sda\", \"message\": \"Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}",
"datetime": {
"$date": 1657098825510
},
"last_occurrence": {
"$date": 1657185226656
},
"dismissed": False,
"mail": None,
"text": "%(message)s.",
"id": "a11a16a9-a28b-4005-b11a-bce6af008d86",
"level": "CRITICAL",
"formatted": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).",
"one_shot": True,
}
sdb_temperature_alert = {
"uuid": "66e29e1c-2948-4473-928a-3ccf0c0aefa9",
"source": "",
"klass": "SMART",
"args": {
"device": "/dev/sdb",
"message": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)"
},
"node": "Controller A",
"key": "{\"device\": \"/dev/sdb\", \"message\": \"Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}",
"datetime": {
"$date": 1657098825510
},
"last_occurrence": {
"$date": 1657185226656
},
"dismissed": False,
"mail": None,
"text": "%(message)s.",
"id": "a11a16a9-a28b-4005-b11a-bce6af008d86",
"level": "CRITICAL",
"formatted": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).",
"one_shot": True,
}
unrelated_alert = {
"uuid": "c371834a-5168-474d-a6d0-9eac02ad29a7",
"source": "",
"klass": "ScrubStarted",
"args": "temp",
"node": "Controller A",
"key": "\"temp\"",
"datetime": {
"$date": 1657713495028
},
"last_occurrence": {
"$date": 1657713495028
},
"dismissed": False,
"mail": None,
"text": "Scrub of pool %r started.",
"id": "c371834a-5168-474d-a6d0-9eac02ad29a7",
"level": "INFO",
"formatted": "Scrub of pool 'temp' started.",
"one_shot": True,
}
with mock("alert.list", return_value=[sda_temperature_alert, sdb_temperature_alert, unrelated_alert]):
assert call("disk.temperature_alerts", ["sda"]) == [dict(sda_temperature_alert,
datetime=ANY,
last_occurrence=ANY)]
| 4,189 | Python | .py | 100 | 32.67 | 156 | 0.573605 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,194 | test_system_general_ui_rollback.py | truenas_middleware/tests/api2/test_system_general_ui_rollback.py | import time
from contextlib import contextmanager
from middlewared.test.integration.utils import call, client, ssh
from middlewared.test.integration.utils.client import truenas_server
ROLLBACK = 20
UI_DELAY = 3
ORIG_PORT = 80
NEW_PORT = 81
def fallback_ui_fix():
"""Fix the UI port settings using SSH in case an
unexpected failure is met or we just want to reset
our changes"""
ssh(f"midclt call system.general.update '{{\"ui_port\": {ORIG_PORT}}}'")
ssh("midclt call system.general.ui_restart 0")
time.sleep(5)
@contextmanager
def client_with_timeout(host_ip=None, tries=30):
for _ in range(tries):
try:
with client(host_ip=host_ip) as c:
assert c.call("core.ping") == "pong"
yield c
break
except ConnectionRefusedError:
time.sleep(1)
else:
assert False, "Could not connect to client."
def test_system_general_ui_rollback():
"""This tests the following:
1. change the port the nginx service binds to (our UI)
2. ensure communication with the API on the original port failsinal port fails
3. ensure communication with the API on the new port succeeds
4. check the time left before the changes are rolled back
5. sleep that amount of time (plus a few seconds for a buffer)
6. ensure communication with the API on the original port succeeds
7. if any above steps fail, revert the UI port settings via ssh"""
try:
# Step 1
call(
"system.general.update",
{"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY}
)
# Step 2
try:
assert call("core.ping") != "pong"
except Exception:
pass
# Step 3
with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c:
rollback_left = c.call("system.general.checkin_waiting")
# Step 4
assert rollback_left < ROLLBACK
# Step 5
time.sleep(rollback_left + 5)
# Step 6
assert call("core.ping") == "pong"
except Exception:
# Step 7
fallback_ui_fix()
raise
def test_system_general_ui_checkin():
"""This tests the following:
1. change the port the nginx service binds to (our UI)
2. immediately checkin the UI port changes
3. ensure we don't have a checkin pending
4. revert any UI port settings via ssh"""
try:
# Step 1
call(
"system.general.update",
{"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY}
)
# Step 2
with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c:
# Step 3
c.call("system.general.checkin")
# Step 4
assert c.call("system.general.checkin_waiting") is None
finally:
fallback_ui_fix()
| 2,995 | Python | .py | 80 | 29.2125 | 93 | 0.618407 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,195 | test_030_activedirectory.py | truenas_middleware/tests/api2/test_030_activedirectory.py | import ipaddress
import os
from time import sleep
import dns.resolver
import pytest
from truenas_api_client import \
ValidationErrors as ClientValidationErrors
from middlewared.service_exception import ValidationErrors
from middlewared.test.integration.assets.directory_service import (
active_directory, override_nameservers)
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.privilege import privilege
from middlewared.test.integration.assets.product import product_type
from middlewared.test.integration.utils import call, client, ssh
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.system import reset_systemd_svcs
from auto_config import ha
from protocols import smb_connection, smb_share
from truenas_api_client import ClientException
if ha and "hostname_virtual" in os.environ:
hostname = os.environ["hostname_virtual"]
else:
from auto_config import hostname
try:
from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME
AD_USER = fr"AD02\{ADUSERNAME.lower()}"
except ImportError:
Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
pytestmark = pytest.mark.skip(reason=Reason)
SMB_NAME = "TestADShare"
def remove_dns_entries(payload):
call('dns.nsupdate', {'ops': payload})
def cleanup_forward_zone():
try:
result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']})
except dns.resolver.NXDOMAIN:
# No entry, nothing to do
return
ips_to_remove = [rdata['address'] for rdata in result]
payload = []
for i in ips_to_remove:
addr = ipaddress.ip_address(i)
payload.append({
'command': 'DELETE',
'name': f'{hostname}.{AD_DOMAIN}.',
'address': str(addr),
'type': 'A' if addr.version == 4 else 'AAAA'
})
remove_dns_entries(payload)
def check_ad_started():
ds = call('directoryservices.status')
if ds['type'] is None:
return False
assert ds['type'] == 'ACTIVEDIRECTORY'
assert ds['status'] == 'HEALTHY'
return True
def cleanup_reverse_zone():
result = call('activedirectory.ipaddresses_to_register', {'hostname': f'{hostname}.{AD_DOMAIN}.', 'bindip': []}, False)
ptr_table = {f'{ipaddress.ip_address(i).reverse_pointer}.': i for i in result}
try:
result = call('dnsclient.reverse_lookup', {'addresses': list(ptr_table.values())})
except dns.resolver.NXDOMAIN:
# No entry, nothing to do
return
payload = []
for host in result:
reverse_pointer = host["name"]
assert reverse_pointer in ptr_table, str(ptr_table)
addr = ipaddress.ip_address(ptr_table[reverse_pointer])
payload.append({
'command': 'DELETE',
'name': host['target'],
'address': str(addr),
'type': 'A' if addr.version == 4 else 'AAAA'
})
remove_dns_entries(payload)
@pytest.fixture(scope="function")
def set_product_type(request):
if ha:
# HA product is already enterprise-licensed
yield
else:
with product_type():
yield
@pytest.fixture(scope="function")
def set_ad_nameserver(request):
with override_nameservers() as ns:
yield (request, ns)
def test_cleanup_nameserver(set_ad_nameserver):
domain_info = call('activedirectory.domain_info', AD_DOMAIN)
cred = call('kerberos.get_cred', {'dstype': 'ACTIVEDIRECTORY',
'conf': {'bindname': ADUSERNAME,
'bindpw': ADPASSWORD,
'domainname': AD_DOMAIN
}
})
call('kerberos.do_kinit', {'krb5_cred': cred,
'kinit-options': {'kdc_override': {'domain': AD_DOMAIN.upper(),
'kdc': domain_info['KDC server']
},
}
})
# Now that we have proper kinit as domain admin
# we can nuke stale DNS entries from orbit.
#
cleanup_forward_zone()
cleanup_reverse_zone()
def test_enable_leave_activedirectory():
reset_systemd_svcs('winbind')
assert check_ad_started() is False
if not ha:
with pytest.raises(ValidationErrors):
# At this point we are not enterprise licensed
call("system.general.update", {"ds_auth": True})
short_name = None
with active_directory(dns_timeout=15) as ad:
short_name = ad['dc_info']['Pre-Win2k Domain']
# Make sure we can read our secrets.tdb file
secrets_has_domain = call('directoryservices.secrets.has_domain', short_name)
assert secrets_has_domain is True
# Check that our database has backup of this info written to it.
db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$']
assert f'SECRETS/MACHINE_PASSWORD/{short_name}' in db_secrets
# Last password change should be populated
passwd_change = call('directoryservices.get_last_password_change')
assert passwd_change['dbconfig'] is not None
assert passwd_change['secrets'] is not None
# We should be able tZZo change some parameters when joined to AD
call('activedirectory.update', {'domainname': AD_DOMAIN, 'verbose_logging': True}, job=True)
# Changing kerberos realm should raise ValidationError
with pytest.raises(ClientValidationErrors) as ve:
call('activedirectory.update', {'domainname': AD_DOMAIN, 'kerberos_realm': None}, job=True)
assert ve.value.errors[0].errmsg.startswith('Kerberos realm may not be altered')
# This should be caught by our catchall
with pytest.raises(ClientValidationErrors) as ve:
call('activedirectory.update', {'domainname': AD_DOMAIN, 'createcomputer': ''}, job=True)
assert ve.value.errors[0].errmsg.startswith('Parameter may not be changed')
assert check_ad_started() is True
# Verify that idmapping is working
pw = ad['user_obj']
# Verify winbindd information
assert pw['sid'] is not None, str(ad)
assert not pw['sid'].startswith('S-1-22-1-'), str(ad)
assert pw['local'] is False
assert pw['source'] == 'ACTIVEDIRECTORY'
result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']})
assert len(result) != 0
addresses = [x['address'] for x in result]
assert truenas_server.ip in addresses
res = call('privilege.query', [['name', 'C=', AD_DOMAIN]], {'get': True})
assert res['ds_groups'][0]['name'].endswith('domain admins')
assert res['ds_groups'][0]['sid'].endswith('512')
assert res['allowlist'][0] == {'method': '*', 'resource': '*'}
assert check_ad_started() is False
secrets_has_domain = call('directoryservices.secrets.has_domain', short_name)
assert secrets_has_domain is False
with pytest.raises(KeyError):
call('user.get_user_obj', {'username': AD_USER})
result = call('privilege.query', [['name', 'C=', AD_DOMAIN]])
assert len(result) == 0, str(result)
def test_activedirectory_smb_ops():
reset_systemd_svcs('winbind')
with active_directory(dns_timeout=15) as ad:
short_name = ad['dc_info']['Pre-Win2k Domain']
machine_password_key = f'SECRETS/MACHINE_PASSWORD/{short_name}'
running_pwd = call('directoryservices.secrets.dump')[machine_password_key]
db_pwd = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$'][machine_password_key]
# We've joined and left AD already. Verify secrets still getting backed up correctly.
assert running_pwd == db_pwd
with dataset(
"ad_smb",
{'share_type': 'SMB'},
acl=[{
'tag': 'GROUP',
'id': ad['user_obj']['pw_uid'],
'perms': {'BASIC': 'FULL_CONTROL'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
}]
) as ds:
call('service.restart', 'cifs')
with smb_share(f'/mnt/{ds}', {'name': SMB_NAME}):
with smb_connection(
host=truenas_server.ip,
share=SMB_NAME,
username=ADUSERNAME,
domain='AD02',
password=ADPASSWORD
) as c:
fd = c.create_file('testfile.txt', 'w')
c.write(fd, b'foo')
val = c.read(fd, 0, 3)
c.close(fd, True)
assert val == b'foo'
c.mkdir('testdir')
fd = c.create_file('testdir/testfile2.txt', 'w')
c.write(fd, b'foo2')
val = c.read(fd, 0, 4)
c.close(fd, True)
assert val == b'foo2'
c.rmdir('testdir')
with dataset(
"ad_datasets",
{'share_type': 'SMB'},
acl=[{
'tag': 'GROUP',
'id': ad['user_obj']['pw_uid'],
'perms': {'BASIC': 'FULL_CONTROL'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
}]
) as ds:
with smb_share(f'/mnt/{ds}', {
'name': 'DATASETS',
'purpose': 'NO_PRESET',
'auxsmbconf': 'zfs_core:zfs_auto_create = true',
'path_suffix': '%D/%U'
}):
with smb_connection(
host=truenas_server.ip,
share='DATASETS',
username=ADUSERNAME,
domain='AD02',
password=ADPASSWORD
) as c:
fd = c.create_file('nested_test_file', "w")
c.write(fd, b'EXTERNAL_TEST')
c.close(fd)
acl = call('filesystem.getacl', os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME), True)
assert acl['trivial'] is False, str(acl)
with dataset(
"ad_home",
{'share_type': 'SMB'},
acl=[{
'tag': 'GROUP',
'id': ad['user_obj']['pw_uid'],
'perms': {'BASIC': 'FULL_CONTROL'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
}]
) as ds:
with smb_share(f'/mnt/{ds}', {
'name': 'TEST_HOME',
'purpose': 'NO_PRESET',
'home': True,
}):
# must refresh idmap cache to get new homedir from NSS
# this means we may need a few seconds for winbindd
# service to settle down on slow systems (like our CI VMs)
sleep(10 if ha else 5)
with smb_connection(
host=truenas_server.ip,
share='HOMES',
username=ADUSERNAME,
domain='AD02',
password=ADPASSWORD
) as c:
fd = c.create_file('homes_test_file', "w")
c.write(fd, b'EXTERNAL_TEST')
c.close(fd)
file_local_path = os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME, 'homes_test_file')
acl = call('filesystem.getacl', file_local_path, True)
assert acl['trivial'] is False, str(acl)
def test_account_privilege_authentication(set_product_type):
reset_systemd_svcs('winbind smbd')
with active_directory(dns_timeout=15):
call("system.general.update", {"ds_auth": True})
nusers = call("user.query", [["local", "=", False]], {"count": True})
assert nusers > 0
ngroups = call("group.query", [["local", "=", False]], {"count": True})
assert ngroups > 0
try:
# RID 513 is constant for "Domain Users"
domain_sid = call("idmap.domain_info", AD_DOMAIN.split(".")[0])['sid']
with privilege({
"name": "AD privilege",
"local_groups": [],
"ds_groups": [f"{domain_sid}-513"],
"allowlist": [
{"method": "CALL", "resource": "system.info"},
{"method": "CALL", "resource": "user.query"},
{"method": "CALL", "resource": "group.query"},
],
"web_shell": False,
}):
with client(auth=(f"limiteduser@{AD_DOMAIN}", ADPASSWORD)) as c:
methods = c.call("core.get_methods")
me = c.call("auth.me")
assert 'DIRECTORY_SERVICE' in me['account_attributes']
assert 'ACTIVE_DIRECTORY' in me['account_attributes']
assert len(c.call("user.query", [["local", "=", False]])) == nusers
assert len(c.call("group.query", [["local", "=", False]])) == ngroups
assert "system.info" in methods
assert "pool.create" not in methods
# ADUSERNAME is member of domain admins and will have
# all privileges
with client(auth=(f"{ADUSERNAME}@{AD_DOMAIN}", ADPASSWORD)) as c:
methods = c.call("core.get_methods")
assert "pool.create" in methods
# Alternative formatting for user name <DOMAIN>\<username>.
# this should also work for auth
with client(auth=(AD_USER, ADPASSWORD)) as c:
methods = c.call("core.get_methods")
assert "pool.create" in methods
finally:
call("system.general.update", {"ds_auth": False})
def test_secrets_restore():
with active_directory():
reset_systemd_svcs('winbind smbd')
assert check_ad_started() is True
ssh('rm /var/db/system/samba4/private/secrets.tdb')
with pytest.raises(ClientException):
call('directoryservices.health.check')
call('directoryservices.health.recover')
assert check_ad_started() is True
def test_keytab_restore():
with active_directory():
reset_systemd_svcs('winbind smbd')
assert check_ad_started() is True
kt_id = call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})['id']
# delete our keytab from datastore
call('datastore.delete', 'directoryservice.kerberoskeytab', kt_id)
call('directoryservices.health.recover')
# verify that it was recreated during health check
call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})
| 15,103 | Python | .py | 325 | 34.236923 | 123 | 0.559815 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,196 | test_audit_ftp.py | truenas_middleware/tests/api2/test_audit_ftp.py | from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.audit import expect_audit_method_calls
def test_ftp_config_audit():
'''
Test the auditing of FTP configuration changes
'''
initial_ftp_config = call('ftp.config')
try:
# UPDATE
payload = {
'clients': 1000,
'banner': "Hello, from New York"
}
with expect_audit_method_calls([{
'method': 'ftp.update',
'params': [payload],
'description': 'Update FTP configuration',
}]):
call('ftp.update', payload)
finally:
# Restore initial state
restore_payload = {
'clients': initial_ftp_config['clients'],
'banner': initial_ftp_config['banner']
}
call('ftp.update', restore_payload)
| 854 | Python | .py | 26 | 24.384615 | 78 | 0.585956 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,197 | test_425_smb_protocol.py | truenas_middleware/tests/api2/test_425_smb_protocol.py | #!/usr/bin/env python3
import pytest
import sys
import os
import enum
import secrets
import string
from base64 import b64decode, b64encode
apifolder = os.getcwd()
sys.path.append(apifolder)
from functions import PUT, GET, SSH_TEST
from auto_config import (
user,
password,
)
from middlewared.test.integration.assets.account import user as create_user
from middlewared.test.integration.assets.smb import copy_stream, get_stream, smb_share, smb_mount
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from pytest_dependency import depends
from protocols import SMB, smb_connection
from samba import ntstatus
from samba import NTSTATUSError
SMB_NAME = "SMBPROTO"
SMB_USER = "smbuser"
SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
TEST_DATA = {}
class DOSmode(enum.Enum):
READONLY = 1
HIDDEN = 2
SYSTEM = 4
ARCHIVE = 32
netatalk_metadata = """
AAUWBwACAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAEAAAAmgAAAAAAAAAIAAABYgAAABAAAAAJAAAA
egAAACAAAAAOAAABcgAAAASAREVWAAABdgAAAAiASU5PAAABfgAAAAiAU1lOAAABhgAAAAiAU1Z+
AAABjgAAAARQTEFQbHRhcAQQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAIbmGsyG5hrOAAAAAKEvSOAAAAAAAAAAAAAAAAAcBAAAAAAAA9xS5YAAAAAAZ
AAAA
"""
parsed_meta = """
QUZQAAAAAQAAAAAAgAAAAFBMQVBsdGFwBBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAA
"""
apple_kmdlabel = """
8oBNzAaTG04NeBVAT078KCEjrzPrwPTUuZ4MXK1qVRDlBqLATmFSDFO2hXrS5VWsrg1DoZqeX6kF
zDEInIzw2XrZkI9lY3jvMAGXu76QvwrpRGv1G3Ehj+0=
"""
apple_kmditemusertags = """
YnBsaXN0MDCgCAAAAAAAAAEBAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAJ
"""
AFPXattr = {
"org.netatalk.Metadata": {
"smbname": "AFP_AfpInfo",
"text": netatalk_metadata,
"bytes": b64decode(netatalk_metadata),
"smb_text": parsed_meta,
"smb_bytes": b64decode(parsed_meta)
},
"com.apple.metadata:_kMDItemUserTags": {
"smbname": "com.apple.metadata_kMDItemUserTags",
"text": apple_kmditemusertags,
"bytes": b64decode(apple_kmditemusertags)
},
"com.apple.metadata:kMDLabel_anemgxoe73iplck2hfuumqxdbu": {
"smbname": "com.apple.metadatakMDLabel_anemgxoe73iplck2hfuumqxdbu",
"text": apple_kmdlabel,
"bytes": b64decode(apple_kmdlabel)
},
}
@pytest.fixture(scope='module')
def initialize_for_smb_tests(request):
with dataset('smb-proto', data={'share_type': 'SMB'}) as ds:
with create_user({
'username': SMB_USER,
'full_name': SMB_USER,
'group_create': True,
'password': SMB_PWD
}) as u:
try:
with smb_share(os.path.join('/mnt', ds), SMB_NAME, {
'auxsmbconf': 'zfs_core:base_user_quota = 1G'
}) as s:
try:
call('service.start', 'cifs')
yield {'dataset': ds, 'share': s, 'user': u}
finally:
call('service.stop', 'cifs')
finally:
# In test_140_enable_aapl we turned afp on for the share, so wait until
# it has been destroyed before turning off aapl_extensions.
call('smb.update', {
'enable_smb1': False,
'aapl_extensions': False
})
@pytest.fixture(scope='module')
def mount_share():
with smb_mount(TEST_DATA['share']['name'], SMB_USER, SMB_PWD) as mp:
yield {'mountpoint': mp}
@pytest.mark.dependency(name="SMB_SHARE_CREATED")
def test_001_initialize_smb_servce(initialize_for_smb_tests):
TEST_DATA.update(initialize_for_smb_tests)
def test_002_check_client_count(request):
depends(request, ["SMB_SHARE_CREATED"])
with smb_connection(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
assert call("smb.client_count") == 1
@pytest.mark.dependency(name="SHARE_IS_WRITABLE")
def test_009_share_is_writable(request):
"""
This test creates creates an empty file, sets "delete on close" flag, then
closes it. NTStatusError should be raised containing failure details
if we are for some reason unable to access the share.
This test will fail if smb.conf / smb4.conf does not exist on client / server running test.
"""
depends(request, ["SMB_SHARE_CREATED"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file("testfile", "w")
c.close(fd, True)
c.disconnect()
@pytest.mark.parametrize('dm', DOSmode)
def test_010_check_dosmode_create(request, dm):
"""
This tests the setting of different DOS attributes through SMB2 Create.
after setting
"""
depends(request, ["SHARE_IS_WRITABLE"])
if dm.value > DOSmode.SYSTEM.value:
return
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
if dm == DOSmode.READONLY:
c.create_file(dm.name, "w", "r")
elif dm == DOSmode.HIDDEN:
c.create_file(dm.name, "w", "h")
elif dm == DOSmode.SYSTEM:
c.create_file(dm.name, "w", "s")
dir_listing = c.ls("/")
for f in dir_listing:
if f['name'] != dm.name:
continue
# Archive is automatically set by kernel
to_check = f['attrib'] & ~DOSmode.ARCHIVE.value
c.disconnect()
assert (to_check & dm.value) != 0, f
def test_011_check_dos_ro_cred_handling(request):
"""
This test creates a file with readonly attribute set, then
uses the open fd to write data to the file.
"""
depends(request, ["SHARE_IS_WRITABLE"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file("RO_TEST", "w", "r")
c.write(fd, b"TESTING123\n")
c.disconnect()
@pytest.mark.dependency(name="SMB1_ENABLED")
def test_050_enable_smb1(request):
depends(request, ["SMB_SHARE_CREATED"])
payload = {
"enable_smb1": True,
}
results = PUT("/smb/", payload)
assert results.status_code == 200, results.text
@pytest.mark.dependency(name="SHARE_IS_WRITABLE_SMB1")
def test_051_share_is_writable_smb1(request):
"""
This test creates creates an empty file, sets "delete on close" flag, then
closes it. NTStatusError should be raised containing failure details
if we are for some reason unable to access the share.
This test will fail if client min protocol != NT1 in smb.conf of SMB client.
Sample smb.conf entry:
[global]
client min protocol = nt1
"""
depends(request, ["SMB_SHARE_CREATED"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("testfile", "w")
c.close(fd, True)
c.disconnect()
@pytest.mark.parametrize('dm', DOSmode)
def test_052_check_dosmode_create_smb1(request, dm):
"""
This tests the setting of different DOS attributes through SMB1 create.
after setting
"""
depends(request, ["SHARE_IS_WRITABLE"])
if dm.value > DOSmode.SYSTEM.value:
return
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
if dm == DOSmode.READONLY:
c.create_file(f'{dm.name}_smb1', "w", "r")
elif dm == DOSmode.HIDDEN:
c.create_file(f'{dm.name}_smb1', "w", "h")
elif dm == DOSmode.SYSTEM:
c.create_file(f'{dm.name}_smb1', "w", "s")
dir_listing = c.ls("/")
for f in dir_listing:
if f['name'] != f'{dm.name}_smb1':
continue
# Archive is automatically set by kernel
to_check = f['attrib'] & ~DOSmode.ARCHIVE.value
c.disconnect()
assert (to_check & dm.value) != 0, f
@pytest.mark.dependency(name="STREAM_TESTFILE_CREATED")
def test_060_create_base_file_for_streams_tests(request):
"""
Create the base file that we will use for further stream tests.
"""
depends(request, ["SMB_SHARE_CREATED"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("streamstestfile", "w")
c.close(fd)
c.mkdir("streamstestdir")
c.disconnect()
@pytest.mark.dependency(name="STREAM_WRITTEN_SMB2")
def test_061_create_and_write_stream_smb2(request, mount_share):
"""
Create our initial stream and write to it over SMB2/3 protocol.
Start with offset 0.
"""
depends(request, ["STREAM_TESTFILE_CREATED"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file("streamstestfile:smb2_stream", "w")
c.write(fd, b'test1', 0)
c.close(fd)
fd2 = c.create_file("streamstestdir:smb2_stream", "w")
c.write(fd2, b'test2', 0)
c.close(fd2)
fd3 = c.create_file("streamstestfile:smb2_stream", "w")
contents = c.read(fd3, 0, 5)
c.close(fd3)
kcontent1 = get_stream('streamstestfile', 'smb2_stream')
fd4 = c.create_file("streamstestdir:smb2_stream", "w")
contents2 = c.read(fd4, 0, 5)
c.close(fd4)
kcontent2 = get_stream('streamstestdir', 'smb2_stream')
c.rmdir("streamstestdir")
c.disconnect()
assert (contents.decode() == "test1")
assert (contents2.decode() == "test2")
# Remove samba compatibility NULL byte
assert kcontent1[:-1].decode() == 'test1'
assert kcontent2[:-1].decode() == 'test2'
@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB2")
def test_062_write_stream_large_offset_smb2(request, mount_share):
"""
Append to our existing stream over SMB2/3 protocol. Specify an offset that will
cause resuling xattr to exceed 64KiB default xattr size limit in Linux.
"""
depends(request, ["STREAM_TESTFILE_CREATED"])
with smb_connection(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
fd = c.create_file("streamstestfile:smb2_stream", "w")
try:
c.write(fd, b'test2', 131072)
finally:
c.close(fd)
fd2 = c.create_file("streamstestfile:smb2_stream", "w")
try:
contents = c.read(fd2, 131072, 5)
finally:
c.close(fd2)
kcontent = get_stream('streamstestfile', 'smb2_stream')
assert (contents.decode() == "test2")
# Verify that reading a large stream functions correctly
assert len(kcontent) == 131072 + 5 + 1
# Remove samba compatibility NULL byte
assert kcontent[131072:-1].decode() == 'test2'
# Verify that SMB server rejects too-large stream write
fd = c.create_file("streamstestfile:smb2_stream", "w")
try:
with pytest.raises(NTSTATUSError) as e:
c.write(fd, b'test2', 2097152)
assert e.value.args[0] == ntstatus.NT_STATUS_FILE_SYSTEM_LIMITATION
finally:
c.close(fd)
# Verify that SMB server allows _very_ large write
fd = c.create_file("streamstestfile:smb2_stream", "w")
try:
# We have to an extra byte for that nul at end of xattr
offset = 2097152 - (len(b"test2") + 1)
c.write(fd, b"test2", offset)
contents = c.read(fd, offset, 5)
assert contents.decode() == "test2"
finally:
c.close(fd)
copy_stream('streamstestfile', 'smb2_stream', 'smb2_stream2')
fd = c.create_file("streamstestfile:smb2_stream", "r")
try:
contents_stream1 = c.read(fd, 0, 2097152)
finally:
c.close(fd)
fd = c.create_file("streamstestfile:smb2_stream2", "r")
try:
contents_stream2 = c.read(fd, 0, 2097152)
finally:
c.close(fd)
assert contents_stream1 == contents_stream2
def test_063_stream_delete_on_close_smb2(request):
"""
Set delete_on_close on alternate datastream over SMB2/3 protocol, close, then verify
stream was deleted.
TODO: I have open MR to expand samba python bindings to support stream enumeration.
Verifcation of stream deletion will have to be added once this is merged.
"""
depends(request, ["STREAM_WRITTEN_SMB2", "LARGE_STREAM_WRITTEN_SMB2"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file("streamstestfile:smb2_stream", "w")
c.close(fd, True)
c.disconnect()
@pytest.mark.dependency(name="STREAM_WRITTEN_SMB1")
def test_065_create_and_write_stream_smb1(request):
"""
Create our initial stream and write to it over SMB1 protocol.
Start with offset 0.
"""
depends(request, ["STREAM_TESTFILE_CREATED"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("streamstestfile:smb1_stream", "w")
c.write(fd, b'test1', 0)
c.close(fd)
fd2 = c.create_file("streamstestfile:smb1_stream", "w")
contents = c.read(fd2, 0, 5)
c.close(fd2)
c.disconnect()
assert (contents.decode() == "test1")
@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB1")
def test_066_write_stream_large_offset_smb1(request):
"""
Append to our existing stream over SMB1 protocol. Specify an offset that will
cause resuling xattr to exceed 64KiB default xattr size limit in Linux.
"""
depends(request, ["STREAM_WRITTEN_SMB1"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("streamstestfile:smb1_stream", "w")
c.write(fd, b'test2', 131072)
c.close(fd)
fd2 = c.create_file("streamstestfile:smb1_stream", "w")
contents = c.read(fd2, 131072, 5)
c.close(fd2)
c.disconnect()
assert (contents.decode() == "test2")
def test_067_stream_delete_on_close_smb1(request):
"""
Set delete_on_close on alternate datastream over SMB1 protocol, close, then verify
stream was deleted.
TODO: I have open MR to expand samba python bindings to support stream enumeration.
Verifcation of stream deletion will have to be added once this is merged.
"""
depends(request, ["STREAM_WRITTEN_SMB1", "LARGE_STREAM_WRITTEN_SMB1"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("streamstestfile:smb1_stream", "w")
c.close(fd, True)
c.disconnect()
def test_068_case_insensitive_rename(request):
"""
ZFS is case sensitive, but case preserving when casesensitivity == insensitive
rename of to_rename -> To_rename should succeed and new file appear
correctly in directory listing.
Will fail with NT_STATUS_OBJECT_NAME_COLLISION if we have regression and
samba identifies files as same.
"""
depends(request, ["SHARE_IS_WRITABLE"])
with smb_connection(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
fd = c.create_file("to_rename", "w")
c.close(fd)
c.rename("to_rename", "To_rename")
files = [x['name'] for x in c.ls('\\')]
assert "To_rename" in files
assert "to_rename" not in files
# MacOS Sonoma currently (Aug 2024) gets SMB handle on file to be renamed
# potentially via the target of the rename which potentially hits optimization
# in samba. This validates that rename in this way also works on case-insensitve
# filesystems.
c.rename("to_rename", "to_rename")
files = [x['name'] for x in c.ls('\\')]
assert "to_rename" in files
assert "To_rename" not in files
def test_069_normal_rename(request):
"""
This verifies that renames are successfully completed
"""
depends(request, ["SHARE_IS_WRITABLE"])
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
fd = c.create_file("old_file_to_rename", "w")
c.close(fd)
c.rename("old_file_to_rename", "renamed_new_file")
files = [x['name'] for x in c.ls('\\')]
c.disconnect()
assert ("renamed_new_file" in files)
"""
At this point we grant SMB_USER SeDiskOperatorPrivilege by making it a member
of the local group builtin_administrators. This privilege is required to manipulate
SMB quotas.
"""
@pytest.mark.dependency(name="BA_ADDED_TO_USER")
def test_089_add_to_builtin_admins(request):
depends(request, ["SHARE_IS_WRITABLE"])
smbuser_id = TEST_DATA['user']['id']
ba = GET('/group?group=builtin_administrators').json()
assert len(ba) != 0
userinfo = GET(f'/user/id/{smbuser_id}').json()
groups = userinfo['groups']
groups.append(ba[0]['id'])
payload = {'groups': groups}
results = PUT(f"/user/id/{smbuser_id}/", payload)
assert results.status_code == 200, f"res: {results.text}, payload: {payload}"
@pytest.mark.parametrize('proto', ["SMB2"])
def test_090_test_auto_smb_quota(request, proto):
"""
Since the share is configured wtih ixnas:base_user_quota parameter,
the first SMB tree connect should have set a ZFS user quota on the
underlying dataset. Test querying through the SMB protocol.
Currently SMB1 protocol is disabled because of hard-coded check in
source3/smbd/nttrans.c to only allow root to get/set quotas.
"""
depends(request, ["BA_ADDED_TO_USER"])
c = SMB()
qt = c.get_quota(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=(proto == "SMB1")
)
# There should only be one quota entry
assert len(qt) == 1, qt
# username is prefixed with server netbios name "SERVER\user"
assert qt[0]['user'].endswith(SMB_USER), qt
# Hard and Soft limits should be set to value above (1GiB)
assert qt[0]['soft_limit'] == (2 ** 30), qt
assert qt[0]['hard_limit'] == (2 ** 30), qt
def test_091_remove_auto_quota_param(request):
depends(request, ["SMB_SHARE_CREATED"])
call('sharing.smb.update', TEST_DATA['share']['id'], {
'auxsmbconf': ''
})
@pytest.mark.parametrize('proto', ["SMB2"])
def test_092_set_smb_quota(request, proto):
"""
This test checks our ability to set a ZFS quota
through the SMB protocol by first setting a 2 GiB
quota, then reading it through the SMB protocol, then
resetting to zero.
"""
depends(request, ["BA_ADDED_TO_USER"])
new_quota = 2 * (2**30)
c = SMB()
qt = c.set_quota(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
hardlimit=new_quota,
target=SMB_USER,
smb1=(proto == "SMB1")
)
assert len(qt) == 1, qt
assert qt[0]['user'].endswith(SMB_USER), qt
assert qt[0]['soft_limit'] == new_quota, qt
assert qt[0]['hard_limit'] == new_quota, qt
qt = c.get_quota(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=(proto == "SMB1")
)
assert len(qt) == 1, qt
assert qt[0]['user'].endswith(SMB_USER), qt
assert qt[0]['soft_limit'] == new_quota, qt
assert qt[0]['hard_limit'] == new_quota, qt
qt = c.set_quota(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
hardlimit=-1,
target=SMB_USER,
smb1=(proto == "SMB1")
)
assert len(qt) == 1, qt
assert qt[0]['user'].endswith(SMB_USER), qt
assert qt[0]['soft_limit'] is None, qt
assert qt[0]['hard_limit'] is None, qt
qt = c.get_quota(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=(proto == "SMB1")
)
assert len(qt) == 1, qt
assert qt[0]['user'].endswith(SMB_USER), qt
assert qt[0]['soft_limit'] is None, qt
assert qt[0]['hard_limit'] is None, qt
def test_95_strip_quota(request):
"""
This test removes any quota set for the test smb user
"""
depends(request, ["BA_ADDED_TO_USER"])
call('pool.dataset.set_quota', TEST_DATA['dataset'], [{
'quota_type': 'USER',
'id': SMB_USER,
'quota_value': 0
}])
@pytest.mark.dependency(name="AFP_ENABLED")
def test_140_enable_aapl(request):
depends(request, ["SMB_SHARE_CREATED"])
call('smb.update', {'aapl_extensions': True})
call('sharing.smb.update', TEST_DATA['share']['id'], {
'afp': True,
})
@pytest.mark.dependency(name="SSH_XATTR_SET")
@pytest.mark.parametrize('xat', AFPXattr.keys())
def test_151_set_xattr_via_ssh(request, xat):
"""
Iterate through AFP xattrs and set them on testfile
via SSH.
"""
depends(request, ["AFP_ENABLED"], scope="session")
smb_path = TEST_DATA['share']['path']
afptestfile = f'{smb_path}/afp_xattr_testfile'
cmd = f'touch {afptestfile} && chown {SMB_USER} {afptestfile} && '
cmd += f'echo -n \"{AFPXattr[xat]["text"]}\" | base64 -d | '
cmd += f'attr -q -s {xat} {afptestfile}'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, {"cmd": cmd, "res": results['output']}
@pytest.mark.dependency(name="XATTR_CHECK_SMB_READ")
@pytest.mark.parametrize('xat', AFPXattr.keys())
def test_152_check_xattr_via_smb(request, mount_share, xat):
"""
Read xattr that was written via SSH and verify that
data is same when viewed over SMB.
"""
depends(request, ["SSH_XATTR_SET"])
afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
bytes_to_read = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"]
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file(afptestfile, "w")
xat_bytes = c.read(fd, 0, len(bytes_to_read) + 1)
c.close(fd)
c.disconnect()
err = {
"name": xat,
"b64data": b64encode(xat_bytes)
}
# Python base64 library appends a `\t` to end of byte string
assert xat_bytes == bytes_to_read, str(err)
# Check via kernel client.
kcontent = get_stream('afp_xattr_testfile', AFPXattr[xat]['smbname'])
err = {
"name": xat,
"b64data": b64encode(kcontent[:-1])
}
assert kcontent[:-1] == bytes_to_read, str(err)
@pytest.mark.dependency(name="XATTR_CHECK_SMB_UNLINK")
@pytest.mark.parametrize('xat', AFPXattr.keys())
def test_153_unlink_xattr_via_smb(request, xat):
"""
Open AFP xattr, set "delete on close" flag, then close.
"""
depends(request, ["XATTR_CHECK_SMB_READ"])
afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file(afptestfile, "w")
c.close(fd, True)
c.disconnect()
@pytest.mark.dependency(name="XATTR_CHECK_SMB_WRITE")
@pytest.mark.parametrize('xat', AFPXattr.keys())
def test_154_write_afp_xattr_via_smb(request, xat):
"""
Write xattr over SMB
"""
depends(request, ["XATTR_CHECK_SMB_UNLINK"])
afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
payload = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"]
c = SMB()
c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
fd = c.create_file(afptestfile, "w")
c.write(fd, payload)
c.close(fd)
c.disconnect()
@pytest.mark.parametrize('xat', AFPXattr.keys())
def test_155_ssh_read_afp_xattr(request, xat):
"""
Read xattr that was set via SMB protocol directly via
SSH and verify that data is the same.
"""
depends(request, ["XATTR_CHECK_SMB_WRITE"], scope="session")
# Netatalk-compatible xattr gets additional
# metadata written to it, which makes comparison
# of all bytes problematic.
if xat == "org.netatalk.Metadata":
return
smb_path = TEST_DATA['share']['path']
afptestfile = f'{smb_path}/afp_xattr_testfile'
cmd = f'attr -q -g {xat} {afptestfile} | base64'
results = SSH_TEST(cmd, user, password)
assert results['result'] is True, results['output']
xat_data = b64decode(results['stdout'])
assert AFPXattr[xat]['bytes'] == xat_data, results['output']
def test_175_check_external_path(request):
with smb_share(f'EXTERNAL:{truenas_server.ip}\\{SMB_NAME}', 'EXTERNAL'):
with smb_connection(
share=SMB_NAME,
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
fd = c.create_file('external_test_file', "w")
c.write(fd, b'EXTERNAL_TEST')
c.close(fd)
cmd = f'smbclient //127.0.0.1/EXTERNAL -U {SMB_USER}%{SMB_PWD} '
cmd += '-c "get external_test_file"'
ssh(cmd)
results = SSH_TEST('cat external_test_file', user, password)
assert results['result'] is True, results['output']
assert results['stdout'] == 'EXTERNAL_TEST'
def test_176_check_dataset_auto_create(request):
with dataset('smb_proto_nested_datasets', data={'share_type': 'SMB'}) as ds:
ds_mp = os.path.join('/mnt', ds)
with smb_share(ds_mp, 'DATASETS', {'purpose': 'PRIVATE_DATASETS'}):
with smb_connection(
share='DATASETS',
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
fd = c.create_file('nested_test_file', "w")
c.write(fd, b'EXTERNAL_TEST')
c.close(fd)
acl = call('filesystem.getacl', os.path.join(ds_mp, SMB_USER), True)
assert acl['trivial'] is False, str(acl)
def test_180_create_share_multiple_dirs_deep(request):
with dataset('nested_dirs', data={'share_type': 'SMB'}) as ds:
dirs_path = os.path.join('/mnt', ds, 'd1/d2/d3')
ssh(f'mkdir -p {dirs_path}')
with smb_share(dirs_path, 'DIRS'):
with smb_connection(
share='DIRS',
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
fd = c.create_file('nested_dirs_file', "w")
c.write(fd, b'DIRS_TEST')
c.close(fd)
call('filesystem.stat', os.path.join(dirs_path, 'nested_dirs_file'))
def test_181_create_and_disable_share(request):
with dataset('smb_disabled', data={'share_type': 'SMB'}) as ds:
with smb_share(os.path.join('/mnt', ds), 'TO_DISABLE') as tmp_share:
with smb_connection(
share='TO_DISABLE',
username=SMB_USER,
password=SMB_PWD,
smb1=False
) as c:
call('sharing.smb.update', tmp_share['id'], {'enabled': False})
try:
c.create_file('canary', "w")
except NTSTATUSError as status:
assert status.args[0] == ntstatus.NT_STATUS_NETWORK_NAME_DELETED, str(status)
else:
assert c.connected is True
| 27,173 | Python | .py | 689 | 32.88389 | 108 | 0.645717 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,198 | test_pool_remove_disk.py | truenas_middleware/tests/api2/test_pool_remove_disk.py | from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
def test_waits_for_device_removal():
with another_pool(topology=(4, lambda disks: {
"data": [
{"type": "MIRROR", "disks": disks[0:2]},
{"type": "MIRROR", "disks": disks[2:4]}
],
})) as pool:
ssh(f"dd if=/dev/urandom of=/mnt/{pool['name']}/blob bs=1M count=1000")
call("pool.remove", pool["id"], {"label": pool["topology"]["data"][0]["guid"]}, job=True)
| 542 | Python | .py | 11 | 42.272727 | 97 | 0.610586 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,199 | test_mock_remote.py | truenas_middleware/tests/api2/test_mock_remote.py | import pytest
from auto_config import ha
from middlewared.test.integration.utils import call, mock
pytestmark = pytest.mark.skipif(not ha, reason='Tests applicable to HA only')
VALID_NODES = ['A', 'B']
def test__mock_remote_node():
"""
Test that we can mock on the remote node, using direct calls to verify.
"""
this_node = call('failover.node')
assert this_node in VALID_NODES
other_node = call('failover.call_remote', 'failover.node')
assert other_node in VALID_NODES
assert this_node != other_node
with mock('failover.node', return_value='BOGUS1'):
assert call('failover.node') == 'BOGUS1'
assert call('failover.call_remote', 'failover.node') == other_node
with mock('failover.node', return_value='BOGUS2', remote=True):
assert call('failover.node') == 'BOGUS1'
assert call('failover.call_remote', 'failover.node') == 'BOGUS2'
assert call('failover.node') == 'BOGUS1'
assert call('failover.call_remote', 'failover.node') == other_node
assert call('failover.node') == this_node
assert call('failover.call_remote', 'failover.node') == other_node
def test__mock_remote_indirect():
"""
Test that we can mock on the remote node, using indirect calls to verify.
"""
mmd = call('failover.mismatch_disks')
assert mmd['missing_local'] == []
assert mmd['missing_remote'] == []
disks = call('failover.get_disks_local')
with mock('failover.get_disks_local', return_value=disks[1:], remote=True):
mmd = call('failover.mismatch_disks')
assert mmd['missing_local'] == []
assert mmd['missing_remote'] == [disks[0]]
mmd = call('failover.mismatch_disks')
assert mmd['missing_local'] == []
assert mmd['missing_remote'] == []
| 1,791 | Python | .py | 39 | 40.25641 | 79 | 0.660939 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |