id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23,700 | client.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/client.py | import contextlib
import errno
import os
import logging
import socket
import requests
from middlewared.service_exception import CallError
from truenas_api_client import Client
from truenas_api_client.utils import undefined
from .pytest import fail
__all__ = ["client", "host", "host_websocket_uri", "password", "session", "url", "websocket_url"]
logger = logging.getLogger(__name__)
"""
truenas_server object is used by both websocket client and REST client for determining which
server to access for API calls. For HA, the `ip` attribute should be set to the virtual IP
of the truenas server.
"""
class TrueNAS_Server:
__slots__ = (
'_ip',
'_nodea_ip',
'_nodeb_ip',
'_server_type',
'_client',
)
def __init__(self):
self._ip = None
self._nodea_ip = None
self._nodeb_ip = None
self._server_type = None
self._client = None
@property
def ip(self) -> str | None:
"""
default target IP address for TrueNAS server
Will be virtual IP on TrueNAS HA but otherwise set through the
`MIDDLEWARE_TEST_IP` environmental variable in non-HA case.
"""
return self._ip
@ip.setter
def ip(self, new_ip: str):
""" set new IP and clear client connection """
self._ip = new_ip
if self._client:
self._client.close()
self._client = None
@property
def nodea_ip(self) -> str | None:
""" IP address of first storage controller on HA. Will be `None` if not HA """
return self._nodea_ip
@nodea_ip.setter
def nodea_ip(self, ip: str):
self._nodea_ip = ip
@property
def nodeb_ip(self) -> str | None:
""" IP address of second storage controller on HA. Will be `None` if not HA """
return self._nodeb_ip
@nodeb_ip.setter
def nodeb_ip(self, ip: str):
self._nodeb_ip = ip
@property
def server_type(self) -> str | None:
"""
Server type of target TrueNAS server
Returns
str - 'ENTERPRISE_HA' or 'STANDARD'
None - not configured
"""
return self._server_type
@server_type.setter
def server_type(self, server_type: str):
if server_type not in ('ENTERPRISE_HA', 'STANDARD'):
raise ValueError(f'{server_type}: unknown server type')
self._server_type = server_type
@property
def client(self) -> Client:
""" websocket client connection to target TrueNAS server """
if self._client is not None:
try:
self._client.ping()
return self._client
except Exception as e:
logger.warning('Re-connecting test client due to %r', e)
# failed liveness check, perhaps server rebooted
# if target is truly broken we'll pick up error
# when trying to establish a new client connection
self._client.close()
self._client = None
# Has to be called in order for `truenas_server` global variable to be correctly initialized when
# running `runtest.py` with a single test name
host()
if (addr := self.ip) is None:
raise RuntimeError('IP is not set')
uri = host_websocket_uri(addr)
cl = Client(uri, py_exceptions=True, log_py_exceptions=True)
try:
resp = cl.call('auth.login_ex', {
'mechanism': 'PASSWORD_PLAIN',
'username': 'root',
'password': password()
})
assert resp['response_type'] == 'SUCCESS'
except Exception:
cl.close()
raise
self._client = cl
return self._client
def ha_ips(self) -> dict:
if self.server_type == 'STANDARD':
raise ValueError('Not an HA server')
elif self.server_type is None:
raise RuntimeError('TrueNAS server object not initialized')
failover_node = self.client.call('failover.node')
if failover_node not in ('A', 'B'):
raise RuntimeError(f'{failover_node}: unexpected failover node')
if failover_node == 'A':
active_controller = self.nodea_ip
standby_controller = self.nodeb_ip
else:
active_controller = self.nodeb_ip
standby_controller = self.nodea_ip
assert all((active_controller, standby_controller)), 'Unable to determine both HA controller IP addresses'
return {
'active': active_controller,
'standby': standby_controller
}
truenas_server = TrueNAS_Server()
@contextlib.contextmanager
def client(*, auth=undefined, auth_required=True, py_exceptions=True, log_py_exceptions=True, host_ip=None):
if auth is undefined:
auth = ("root", password())
uri = host_websocket_uri(host_ip)
try:
with Client(uri, py_exceptions=py_exceptions, log_py_exceptions=log_py_exceptions) as c:
if auth is not None:
auth_req = {
"mechanism": "PASSWORD_PLAIN",
"username": auth[0],
"password": auth[1]
}
try:
resp = c.call("auth.login_ex", auth_req)
except CallError as e:
if e.errno == errno.EBUSY and e.errmsg == 'Rate Limit Exceeded':
# our "roles" tests (specifically common_checks() function)
# isn't designed very well since it's generating random users
# for every unique test_* function in every test file....
# TODO: we should probably fix that issue at some point but
# this is easiest path forward to not cause a bunch of roles
# related tests to trip on our rate limiting functionality
truenas_server.client.call("rate.limit.cache_clear")
resp = c.call("auth.login_ex", auth_req)
else:
raise
if auth_required:
assert resp['response_type'] == 'SUCCESS'
yield c
except socket.timeout:
fail(f'socket timeout on URI: {uri!r} HOST_IP: {host_ip!r}')
def host():
if truenas_server.ip:
return truenas_server
# Initialize our settings. At this point on HA servers, the VIP is not available
truenas_server.server_type = os.environ['SERVER_TYPE']
# Some older test runners have old python
if truenas_server.server_type == 'ENTERPRISE_HA':
if "USE_VIP" in os.environ and os.environ["USE_VIP"] == "yes":
truenas_server.ip = os.environ["virtual_ip"]
else:
truenas_server.ip = os.environ["controller1_ip"]
truenas_server.nodea_ip = os.environ["controller1_ip"]
truenas_server.nodeb_ip = os.environ["controller2_ip"]
else:
truenas_server.ip = os.environ["MIDDLEWARE_TEST_IP"]
return truenas_server
def host_websocket_uri(host_ip=None):
return f"ws://{host_ip or host().ip}/api/current"
def password():
if "NODE_A_IP" in os.environ:
return os.environ["APIPASS"]
else:
return os.environ["MIDDLEWARE_TEST_PASSWORD"]
@contextlib.contextmanager
def session():
with requests.Session() as s:
s.auth = ("root", os.environ["MIDDLEWARE_TEST_PASSWORD"])
yield s
def url():
return f"http://{host().ip}"
def websocket_url():
return f"ws://{host().ip}"
| 7,634 | Python | .py | 191 | 30.287958 | 114 | 0.591911 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,701 | filesystem.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/filesystem.py | import contextlib
import typing
from .call import call
__all__ = ['file_exists_and_perms_check']
def file_exists_and_perms_check(file_path: str, options: typing.Optional[dict] = None):
options = options or {}
options.setdefault('type', 'FILE')
with contextlib.suppress(Exception):
file_info = call('filesystem.stat', file_path)
return all(file_info.get(k) == options[k] for k in options)
return False
| 439 | Python | .py | 11 | 35.454545 | 87 | 0.703791 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,702 | time_utils.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/time_utils.py | """This is a copy of `middlewared.utils.time_utils`."""
from datetime import datetime, UTC
def utc_now(naive=True):
"""Wrapper for `datetime.now(UTC)`. Exclude timezone if `naive=True`."""
dt = datetime.now(UTC)
return dt.replace(tzinfo=None) if naive else dt
| 275 | Python | .py | 6 | 42.333333 | 76 | 0.710526 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,703 | failover.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/failover.py | import contextlib
import os
import sys
from time import sleep
try:
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import ha, hostname
except ImportError:
ha = False
hostname = None
from .call import call
__all__ = ["disable_failover"]
@contextlib.contextmanager
def disable_failover():
if ha:
call("failover.update", {"disabled": True, "master": True})
try:
yield
finally:
if ha:
call("failover.update", {"disabled": False, "master": True})
def wait_for_standby():
'''
NOTE:
1) This routine is for dual-controller (ha) only
2) This is nearly identical to 'wait_for_standby' in test_006_pool_and_sysds
This routine will wait for the standby controller to return from a reboot.
'''
if ha:
sleep(5)
sleep_time = 1
max_wait_time = 300
rebooted = False
waited_time = 0
while waited_time < max_wait_time and not rebooted:
if call('failover.remote_connected'):
rebooted = True
else:
waited_time += sleep_time
sleep(sleep_time)
assert rebooted, f'Standby did not connect after {max_wait_time} seconds'
waited_time = 0 # need to reset this
is_backup = False
while waited_time < max_wait_time and not is_backup:
try:
is_backup = call('failover.call_remote', 'failover.status') == 'BACKUP'
except Exception:
pass
if not is_backup:
waited_time += sleep_time
sleep(sleep_time)
assert is_backup, f'Standby node did not become BACKUP after {max_wait_time} seconds'
pass
else:
pass
| 1,785 | Python | .py | 56 | 23.785714 | 93 | 0.600933 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,704 | pool.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/pool.py | # -*- coding=utf-8 -*-
import logging
import os
import sys
try:
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import pool_name
except ImportError:
pool_name = os.environ.get("ZPOOL")
logger = logging.getLogger(__name__)
__all__ = ["pool"]
pool = pool_name
| 298 | Python | .py | 13 | 20.384615 | 39 | 0.701068 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,705 | ssh.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/ssh.py | import os
import sys
try:
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import user as default_user, password as default_password
from functions import SSH_TEST
except ImportError:
default_user = None
default_password = None
__all__ = ["ssh"]
def ssh(command, check=True, complete_response=False, *, user=default_user, password=default_password, ip=None, timeout=120):
result = SSH_TEST(command, user, password, ip, timeout=timeout)
if check:
assert result["result"], result["output"]
return result if complete_response else result["stdout"]
| 612 | Python | .py | 16 | 34.25 | 125 | 0.72973 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,706 | run.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/run.py | import os
import subprocess
class RunOnRunnerException(Exception):
pass
def run_on_runner(*args, **kwargs) -> subprocess.CompletedProcess:
if isinstance(args[0], list):
args = tuple(args[0])
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
exception_message = kwargs.pop('exception_msg', None)
check = kwargs.pop('check', True)
shell = kwargs.pop('shell', False)
env = kwargs.pop('env', None) or os.environ
proc = subprocess.Popen(
args, stdout=kwargs['stdout'], stderr=kwargs['stderr'], shell=shell, env=env, encoding='utf8', errors='ignore'
)
stdout, stderr = proc.communicate()
cp = subprocess.CompletedProcess(args, proc.returncode, stdout=stdout, stderr=stderr)
if check:
error_str = exception_message or stderr or ''
if cp.returncode:
raise RunOnRunnerException(
f'Command {" ".join(args) if isinstance(args, list) else args!r} returned exit code '
f'{cp.returncode}' + (f' ({error_str})' if error_str else '')
)
return cp
| 1,120 | Python | .py | 26 | 36.307692 | 118 | 0.659926 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,707 | smb.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/smb.py | import contextlib
import os
import sys
try:
apifolder = os.getcwd()
sys.path.append(apifolder)
from protocols.smb_proto import SMB, security
except ImportError:
SMB = None
security = None
from .client import truenas_server
__all__ = ["smb_connection"]
@contextlib.contextmanager
def smb_connection(
host=None,
share=None,
encryption='DEFAULT',
username=None,
domain=None,
password=None,
smb1=False
):
s = SMB()
s.connect(
host=host or truenas_server.ip,
share=share,
encryption=encryption,
username=username,
domain=domain,
password=password,
smb1=smb1
)
try:
yield s
finally:
s.disconnect()
| 738 | Python | .py | 36 | 15.444444 | 49 | 0.656609 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,708 | mock_binary.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/mock_binary.py | # -*- coding=utf-8 -*-
import base64
import contextlib
import json
import textwrap
from .call import call
from .ssh import ssh
RESULT_PATH = "/tmp/mocked_binary_launch"
class BinaryMock:
def _load(self):
try:
return json.loads(ssh(f"cat {RESULT_PATH}", check=False).strip())
except ValueError:
return None
@property
def launched(self):
return self._load() is not None
@property
def result(self):
result = self._load()
if result is None:
raise AttributeError("mocked binary was not launched")
return result
def set_usr_readonly(value):
cmd = 'python3 -c "import libzfs;'
cmd += 'hdl = libzfs.ZFS().get_dataset_by_path(\\\"/usr\\\");'
cmd += 'hdl.update_properties({\\\"readonly\\\": {\\\"value\\\": '
cmd += f'\\\"{value}\\\"' + '}});"'
ssh(cmd)
@contextlib.contextmanager
def mock_binary(path, code="", exitcode=1):
set_usr_readonly("off")
ssh(f"rm -f {RESULT_PATH}")
ssh(f"mv {path} {path}.bak")
try:
call(
"filesystem.file_receive",
path,
base64.b64encode(textwrap.dedent("""\
#!/usr/bin/python3
import json
import sys
exitcode = """ + repr(exitcode) + """
result = {
"argv": sys.argv,
}
%code%
with open(""" + repr(RESULT_PATH) + """, "w") as f:
json.dump(result, f)
sys.exit(exitcode)
""").replace("%code%", code).encode("utf-8")).decode("ascii"),
{"mode": 0o755},
)
yield BinaryMock()
finally:
set_usr_readonly("off") # In case something like `truenas-initrd.py` was launched by `yield`
ssh(f"mv {path}.bak {path}")
set_usr_readonly("on")
| 1,910 | Python | .py | 58 | 23.913793 | 101 | 0.533991 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,709 | __init__.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/__init__.py | from .call import * # noqa
from .client import * # noqa
from .failover import * # noqa
from .filesystem import * # noqa
from .job import * # noqa
from .mock import * # noqa
from .pool import * # noqa
from .pytest import * # noqa
from .run import * # noqa
from .ssh import * # noqa
| 287 | Python | .py | 10 | 27.7 | 32 | 0.689531 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,710 | mock.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/mock.py | # -*- coding=utf-8 -*-
import contextlib
import textwrap
from .client import client
__all__ = ["mock"]
@contextlib.contextmanager
def mock(method, declaration="", **kwargs):
"""
Context manager that temporarily replaces specified middleware `method` with a mock.
This only works for method calls dispatched with `self.middleware.call` or `self.middleware.call_sync`. Direct class
method calls (e.g. `self.get_disk_from_partition(...)`) will not be affected.
:param method: Method name to replace
:params args: Only use this mock when the method is called with the specified arguments.
:param return_value: The value returned when the mock is called.
:param declaration: A string, containing python function declaration for mock. Function should be named `mock`,
can be normal function or `async` and must accept `self` argument and all other arguments the function being
replaced accepts. No `@accepts`, `@job` or other decorators are required, but if a method being replaced is a
job, then mock signature must also accept `job` argument.
:param remote: Optional boolean to allow the mock to be sent to other node in HA-pair.
"""
args = kwargs.pop("args", None)
remote = kwargs.pop("remote", False)
if declaration and kwargs:
raise ValueError("Mock `declaration` is not allowed with kwargs")
elif declaration:
description = textwrap.dedent(declaration)
else:
description = kwargs
with client() as c:
if remote:
c.call("failover.call_remote", "test.set_mock", [method, args, description])
else:
c.call("test.set_mock", method, args, description)
try:
yield
finally:
with client() as c:
if remote:
c.call("failover.call_remote", "test.remove_mock", [method, args])
else:
c.call("test.remove_mock", method, args)
| 1,955 | Python | .py | 41 | 40.707317 | 120 | 0.679642 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,711 | mock_db.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/mock_db.py | # -*- coding=utf-8 -*-
import contextlib
from .call import call
@contextlib.contextmanager
def mock_table_contents(name, rows):
old_rows = call("datastore.query", name, [], {"relationships": False})
call("datastore.delete", name, [])
try:
for row in rows:
call("datastore.insert", name, row)
yield
finally:
call("datastore.delete", name, [])
for row in old_rows:
call("datastore.insert", name, row)
| 475 | Python | .py | 15 | 25.6 | 74 | 0.620614 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,712 | shell.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/shell.py | import json
import logging
import re
import time
import websocket
from middlewared.test.integration.utils import websocket_url
logger = logging.getLogger(__name__)
ansi_escape_8bit = re.compile(br"(?:\x1B[<-Z\\-_]|[\x80-\x9A\x9C-\x9F]|(?:\x1B\[|\x9B)[0-?]*[ -/]*[<-~])")
def assert_shell_works(token, username="root"):
if username == "root":
prompt = "# "
else:
prompt = "% "
ws = websocket.create_connection(websocket_url() + "/websocket/shell")
try:
ws.send(json.dumps({"token": token}))
resp_opcode, msg = ws.recv_data()
assert json.loads(msg.decode())["msg"] == "connected", msg
for i in range(60):
resp_opcode, msg = ws.recv_data()
msg = ansi_escape_8bit.sub(b"", msg).decode("ascii")
logger.debug("Received 1 %r", msg)
if "You are seeing this message because you have no zsh startup files" in msg:
ws.send_binary(b"q\n")
if msg.endswith(prompt):
break
ws.send_binary(b"whoami\n")
for i in range(60):
resp_opcode, msg = ws.recv_data()
msg = ansi_escape_8bit.sub(b"", msg).decode("ascii")
logger.debug("Received 2 %r", msg)
if username in msg.split():
break
finally:
ws.close()
# Give middleware time to kill user's zsh on connection close (otherwise, it will prevent user's home directory
# dataset from being destroyed)
time.sleep(5)
| 1,522 | Python | .py | 38 | 31.631579 | 119 | 0.586839 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,713 | system.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/system.py | from .ssh import ssh
def reset_systemd_svcs(svcs_to_reset):
'''
Systemd services can get disabled if they restart too
many times or too quickly. This can happen during testing.
Input a space delimited string of systemd services to reset.
Example usage:
reset_systemd_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
'''
ssh(f"systemctl reset-failed {svcs_to_reset}")
| 417 | Python | .py | 10 | 36.9 | 80 | 0.723457 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,714 | job.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/job.py | # -*- coding=utf-8 -*-
import contextlib
import logging
from types import SimpleNamespace
from .call import call
logger = logging.getLogger(__name__)
__all__ = ["assert_creates_job"]
@contextlib.contextmanager
def assert_creates_job(method):
newest_job_id = 0
if jobs := call("core.get_jobs"):
newest_job_id = jobs[-1]["id"]
job = SimpleNamespace(id=None)
yield job
jobs = call("core.get_jobs", [["method", "=", method]])
if not jobs or jobs[-1]["id"] <= newest_job_id:
raise RuntimeError(f"{method} was not started")
job.id = jobs[-1]["id"]
| 594 | Python | .py | 18 | 29.111111 | 59 | 0.65669 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,715 | unittest.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/unittest.py | import re
__all__ = ["RegexString"]
class RegexString:
def __init__(self, *args):
self.re = re.compile(*args)
def __eq__(self, other):
return isinstance(other, str) and self.re.fullmatch(other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f"<RegexString {self.re}>"
| 357 | Python | .py | 11 | 26.545455 | 66 | 0.594118 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,716 | mock_rclone.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/mock_rclone.py | # -*- coding=utf-8 -*-
import contextlib
import textwrap
from .mock_binary import mock_binary
@contextlib.contextmanager
def mock_rclone():
with mock_binary(
"/usr/bin/rclone",
textwrap.dedent("""\
import configparser
config = configparser.ConfigParser()
config.read(sys.argv[sys.argv.index("--config") + 1])
result["config"] = {s: dict(config.items(s)) for s in config.sections()}
"""),
) as mb:
yield mb
| 497 | Python | .py | 16 | 24.375 | 84 | 0.608787 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,717 | call.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/call.py | # -*- coding=utf-8 -*-
from .client import client, truenas_server
__all__ = ["call"]
def call(*args, **kwargs):
if not (client_kwargs := kwargs.pop("client_kwargs", {})) and truenas_server.ip:
return truenas_server.client.call(*args, **kwargs)
with client(**client_kwargs) as c:
return c.call(*args, **kwargs)
| 338 | Python | .py | 8 | 37.75 | 84 | 0.644172 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,718 | audit.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/audit.py | # -*- coding=utf-8 -*-
import contextlib
import pprint
import time
from middlewared.test.integration.utils import client
@contextlib.contextmanager
def expect_audit_log(entries, *, include_logins=False):
with client() as c:
time.sleep(5) # FIXME: proper audit log flush
existing = c.call("audit.query", {"services": ["MIDDLEWARE"]})
yield
time.sleep(5)
new = c.call("audit.query", {"services": ["MIDDLEWARE"]})
assert new[:len(existing)] == existing
new = new[len(existing):]
if not include_logins:
new = [entry for entry in new if entry["event"] != "AUTHENTICATION"]
assert len(new) == len(entries), \
f"Expected:\n{pprint.pformat(entries, indent=2)}\nGot:\n{pprint.pformat(new, indent=2)}"
for new_entry, expected_entry in zip(new, entries):
assert expected_entry.items() < new_entry.items(), \
f"Expected:\n{pprint.pformat(expected_entry, indent=2)}\nGot:\n{pprint.pformat(new_entry, indent=2)}"
@contextlib.contextmanager
def expect_audit_method_calls(calls):
with expect_audit_log([
{
"event": "METHOD_CALL",
"event_data": {
"authenticated": True,
"authorized": True,
**call,
},
}
for call in calls
]):
yield
def get_audit_entry(service, index=-1):
"""
service one of the audited services: 'MIDDLEWARE', 'SMB', 'SUDO' (see plugins/audit/utils.py)
index is which entry to return. The default (-1) is the last entry
"""
svc = str(service).upper()
assert svc in ['MIDDLEWARE', 'SMB', 'SUDO']
assert isinstance(index, int)
entry = {}
offset = 0
if index < 0:
max_count = 0
with client() as c:
if 0 < (max_count := c.call("audit.query", {"services": [svc], "query-options": {"count": True}})):
offset = max_count - 1
else:
offset = index
assert offset > -1
with client() as c:
entry_list = c.call('audit.query', {"services": [svc], "query-options": {"offset": offset}})
if len(entry_list):
entry = entry_list[0]
return entry
| 2,199 | Python | .py | 59 | 29.966102 | 113 | 0.6 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,719 | docker.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/docker.py | import os
IX_APPS_DIR_NAME = '.ix-apps'
IX_APPS_MOUNT_PATH: str = os.path.join('/mnt', IX_APPS_DIR_NAME)
IX_APPS_CATALOG_PATH: str = os.path.join(IX_APPS_MOUNT_PATH, 'truenas_catalog')
DOCKER_DATASET_PROPS = {
'aclmode': 'discard',
'acltype': 'posix',
'atime': 'off',
'casesensitivity': 'sensitive',
'canmount': 'noauto',
'dedup': 'off',
'encryption': 'off',
'exec': 'on',
'normalization': 'none',
'overlay': 'on',
'setuid': 'on',
'snapdir': 'hidden',
'xattr': 'on',
}
def dataset_props(ds_name: str) -> dict:
return DOCKER_DATASET_PROPS | {
'mountpoint': IX_APPS_MOUNT_PATH if ds_name.endswith('/ix-apps') else os.path.join(
IX_APPS_MOUNT_PATH, ds_name.split('/', 2)[-1]
),
}
| 770 | Python | .py | 25 | 26.08 | 91 | 0.597297 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,720 | filesystem.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/filesystem.py | import contextlib
from middlewared.test.integration.utils import call, ssh
@contextlib.contextmanager
def directory(path, options=None):
call('filesystem.mkdir', {'path': path} | (options or {}))
try:
yield path
finally:
ssh(f'rm -rf {path}')
@contextlib.contextmanager
def mkfile(path, size=None):
"""
Create a simple file
* path is the full-pathname. e.g. /mnt/tank/dataset/filename
* If size is None then use 'touch',
else create a random filled file of size bytes.
Creation will be faster if size is a power of 2, e.g. 1024 or 1048576
TODO: sparse files, owner, permissions
"""
try:
if size is None:
ssh(f"touch {path}")
else:
t = 1048576
while t > 1 and size % t != 0:
t = t // 2
ssh(f"dd if=/dev/urandom of={path} bs={t} count={size // t}")
yield path
finally:
ssh(f"rm -f {path}")
| 962 | Python | .py | 30 | 25.266667 | 75 | 0.601512 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,721 | pool.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/pool.py | import contextlib
import errno
import time
from truenas_api_client import ValidationErrors
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.utils import call, fail, pool
_1_disk_stripe_topology = (1, lambda disks: {
"data": [{"type": "STRIPE", "disks": disks[0:1]}],
})
_2_disk_mirror_topology = (2, lambda disks: {
"data": [{"type": "MIRROR", "disks": disks[0:2]}],
})
_4_disk_raidz2_topology = (4, lambda disks: {
"data": [{"type": "RAIDZ2", "disks": disks[0:4]}],
})
another_pool_topologies = [
_1_disk_stripe_topology,
_2_disk_mirror_topology,
_4_disk_raidz2_topology,
]
@contextlib.contextmanager
def another_pool(data=None, topology=None):
data = data or {}
if topology is None:
topology = another_pool_topologies[0]
unused = call("disk.get_unused")
if len(unused) < topology[0]:
raise RuntimeError(f"At least {topology[0]} unused disks required to test this pool topology")
try:
pool = call("pool.create", {
"name": "test",
"encryption": False,
"allow_duplicate_serials": True,
"topology": topology[1]([d["devname"] for d in unused]),
**data,
}, job=True)
except ValidationErrors as e:
if any(error.attribute == "pool_create.name" and error.errcode == errno.EEXIST for error in e.errors):
fail("Previous `another_pool` fixture failed to teardown. Aborting tests.")
raise
try:
yield pool
finally:
try:
call("pool.export", pool["id"], {"destroy": True}, job=True)
except ValidationErrors as e:
if not any(error.errcode == errno.ENOENT for error in e.errors):
raise
except InstanceNotFound:
pass
@contextlib.contextmanager
def dataset(name, data=None, pool=pool, **kwargs):
data = data or {}
dataset = f"{pool}/{name}"
call("pool.dataset.create", {"name": dataset, **data})
try:
if "acl" in kwargs:
call("filesystem.setacl", {'path': f"/mnt/{dataset}", "dacl": kwargs['acl']})
elif "mode" in kwargs:
call("filesystem.setperm", {'path': f"/mnt/{dataset}", "mode": kwargs['mode'] or "777"})
yield dataset
finally:
if 'delete_delay' in kwargs:
time.sleep(kwargs['delete_delay'])
try:
call("pool.dataset.delete", dataset, {"recursive": True})
except InstanceNotFound:
pass
@contextlib.contextmanager
def snapshot(dataset, name, **kwargs):
get = kwargs.pop("get", False)
result = call("zfs.snapshot.create", {"dataset": dataset, "name": name, **kwargs})
id_ = f"{dataset}@{name}"
try:
if get:
yield result
else:
yield id_
finally:
try:
call("zfs.snapshot.delete", id_, {"recursive": True})
except InstanceNotFound:
pass
| 2,972 | Python | .py | 83 | 28.674699 | 110 | 0.609484 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,722 | iscsi.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/iscsi.py | import contextlib
import json
import os
import platform
import time
from pathlib import Path
from middlewared.test.integration.utils import call, run_on_runner, RunOnRunnerException
# We could be running these tests on a Linux or FreeBSD test-runner, so the commands
# used by a client can be different depending on the platform of the test runner
# (i.e. NOT related to CORE vs SCALE).
SYSTEM = platform.system().upper()
IS_LINUX = SYSTEM == "LINUX"
@contextlib.contextmanager
def iscsi_auth(data):
auth = call("iscsi.auth.create", data)
try:
yield auth
finally:
call("iscsi.auth.delete", auth["id"])
@contextlib.contextmanager
def iscsi_extent(data):
extent = call("iscsi.extent.create", data)
try:
yield extent
finally:
call("iscsi.extent.delete", extent["id"])
@contextlib.contextmanager
def iscsi_host(data):
host = call("iscsi.host.create", data)
try:
yield host
finally:
call("iscsi.host.delete", host["id"])
@contextlib.contextmanager
def iscsi_initiator(data):
initiator = call("iscsi.initiator.create", data)
try:
yield initiator
finally:
call("iscsi.initiator.delete", initiator["id"])
@contextlib.contextmanager
def iscsi_portal(data):
portal = call("iscsi.portal.create", data)
try:
yield portal
finally:
call("iscsi.portal.delete", portal["id"])
@contextlib.contextmanager
def iscsi_target(data):
target = call("iscsi.target.create", data)
try:
yield target
finally:
call("iscsi.target.delete", target["id"])
def target_login_test(portal_ip, target_name, check_surfaced_luns=None):
if IS_LINUX:
return target_login_test_linux(portal_ip, target_name, check_surfaced_luns)
else:
return target_login_test_freebsd(portal_ip, target_name, check_surfaced_luns)
def target_login_test_linux(portal_ip, target_name, check_surfaced_luns=None):
logged_in = False
try:
if os.geteuid():
# Non-root requires sudo
iscsiadm = ['sudo', 'iscsiadm']
else:
iscsiadm = ['iscsiadm']
run_on_runner(iscsiadm + ['-m', 'discovery', '-t', 'sendtargets', '--portal', portal_ip])
run_on_runner(iscsiadm + ['-m', 'node', '--targetname', target_name, '--portal', portal_ip, '--login'])
logged_in = True
if check_surfaced_luns is not None:
retries = 20
pattern = f'ip-{portal_ip}:3260-iscsi-{target_name}-lun-*'
by_path = Path('/dev/disk/by-path')
while retries:
luns = set(int(p.name.split('-')[-1]) for p in by_path.glob(pattern))
if luns == check_surfaced_luns:
break
time.sleep(1)
retries -= 1
assert check_surfaced_luns == luns, luns
except RunOnRunnerException:
return False
except AssertionError:
return False
else:
return True
finally:
if logged_in:
run_on_runner(iscsiadm + ['-m', 'node', '--targetname', target_name, '--portal', portal_ip, '--logout'])
@contextlib.contextmanager
def iscsi_client_freebsd():
started = run_on_runner(['service', 'iscsid', 'onestatus'], check=False).returncode == 0
if started:
yield
else:
run_on_runner(['service', 'iscsid', 'onestart'])
try:
yield
finally:
run_on_runner(['service', 'iscsid', 'onestop'])
def target_login_impl_freebsd(portal_ip, target_name, check_surfaced_luns=None):
run_on_runner(['iscsictl', '-A', '-p', portal_ip, '-t', target_name], check=False)
retries = 5
connected = False
connected_clients = None
# Unfortunately iscsictl can take some time to show the client as actually connected so adding a few retries here
# to handle that case
while retries > 0 and not connected:
time.sleep(3)
cp = run_on_runner(['iscsictl', '-L', '--libxo', 'json'])
connected_clients = json.loads(cp.stdout)
connected = any(
session.get('state') == 'Connected' for session in connected_clients.get('iscsictl', {}).get('session', [])
if session.get('name') == target_name
)
retries -= 1
assert connected is True, connected_clients
if check_surfaced_luns is not None:
luns = set()
for session in connected_clients.get('iscsictl', {}).get('session', []):
if session.get('name') == target_name and session.get('state') == 'Connected':
for lun in session.get('devices', {}).get('lun'):
if lun_val := lun.get('lun'):
luns.add(lun_val)
assert check_surfaced_luns == luns, luns
def target_login_test_freebsd(portal_ip, target_name, check_surfaced_luns=None):
with iscsi_client_freebsd():
try:
target_login_impl_freebsd(portal_ip, target_name, check_surfaced_luns)
except AssertionError:
return False
else:
return True
finally:
run_on_runner(['iscsictl', '-R', '-t', target_name], check=False)
| 5,189 | Python | .py | 136 | 30.713235 | 119 | 0.626568 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,723 | roles.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/roles.py | import collections
import contextlib
import errno
import random
import pytest
import string
from middlewared.service_exception import CallError
from middlewared.test.integration.assets.account import unprivileged_user
from middlewared.test.integration.utils import call, client
USER_FIXTURE_TUPLE = collections.namedtuple('UserFixture', 'username password group_name')
@pytest.fixture(scope='module')
def unprivileged_user_fixture(request):
suffix = ''.join([random.choice(string.ascii_lowercase + string.digits) for _ in range(8)])
group_name = f'unprivileged_users_fixture_{suffix}'
with unprivileged_user(
username=f'unprivileged_fixture_{suffix}',
group_name=group_name,
privilege_name=f'Unprivileged users fixture ({suffix})',
allowlist=[],
roles=[],
web_shell=False,
) as t:
yield USER_FIXTURE_TUPLE(t.username, t.password, group_name)
@contextlib.contextmanager
def unprivileged_custom_user_client(user_client_context):
with client(auth=(user_client_context.username, user_client_context.password)) as c:
c.username = user_client_context.username
yield c
def common_checks(
user_client_context, method, role, valid_role, valid_role_exception=True, method_args=None,
method_kwargs=None, is_return_type_none=False,
):
method_args = method_args or []
method_kwargs = method_kwargs or {}
privilege = call('privilege.query', [['local_groups.0.group', '=', user_client_context.group_name]])
assert len(privilege) > 0, 'Privilege not found'
call('privilege.update', privilege[0]['id'], {'roles': [role]})
with unprivileged_custom_user_client(user_client_context) as client:
if valid_role:
if valid_role_exception:
with pytest.raises(Exception) as exc_info:
client.call(method, *method_args, **method_kwargs)
assert not (
isinstance(exc_info.value, CallError) and
exc_info.value.errno == errno.EACCES and
exc_info.value.errmsg == 'Not authorized'
)
elif is_return_type_none:
assert client.call(method, *method_args, **method_kwargs) is None
else:
assert client.call(method, *method_args, **method_kwargs) is not None
else:
with pytest.raises(CallError) as ve:
client.call(method, *method_args, **method_kwargs)
assert ve.value.errno == errno.EACCES
assert ve.value.errmsg == 'Not authorized'
| 2,592 | Python | .py | 56 | 38.125 | 104 | 0.67063 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,724 | cloud_backup.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/cloud_backup.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def task(data):
data = {
"description": "Test",
"schedule": {
"minute": "00",
"hour": "00",
"dom": "1",
"month": "1",
"dow": "1",
},
"keep_last": 10,
**data,
}
task = call("cloud_backup.create", data)
try:
yield task
finally:
call("cloud_backup.delete", task["id"])
def run_task(task, timeout=120):
call("cloud_backup.sync", task["id"], job=True, timeout=timeout)
| 608 | Python | .py | 23 | 19.043478 | 68 | 0.536332 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,725 | two_factor_auth.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/two_factor_auth.py | import contextlib
import pyotp
import typing
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def enabled_twofactor_auth():
try:
yield call('auth.twofactor.update', {'enabled': True, 'window': 3})
finally:
call('auth.twofactor.update', {'enabled': False, 'window': 0})
def get_user_secret(user_id: int, get: typing.Optional[bool] = True) -> typing.Union[dict, list]:
return call('datastore.query', 'account.twofactor_user_auth', [['user_id', '=', user_id]], {'get': get})
def get_2fa_totp_token(users_config: dict) -> str:
return pyotp.TOTP(
users_config['secret'],
interval=users_config['interval'],
digits=users_config['otp_digits'],
).now()
| 741 | Python | .py | 18 | 36.444444 | 108 | 0.681564 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,726 | hostkvm.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/hostkvm.py | import os
import re
import shlex
from middlewared.test.integration.utils import call, run_on_runner, RunOnRunnerException, ssh
try:
from config import KVM_HOST, KVM_PASSWORD, KVM_USERNAME
have_kvm_host_cfg = True
except ImportError:
have_kvm_host_cfg = False
TM_NODE_RE = re.compile('^tm[0-9]{3}$')
HA_NODE_RE = re.compile('^ha[0-9]{3}_c[1|2]$')
WHOLE_HA_NODE_RE = re.compile('^ha[0-9]{3}$')
def get_kvm_domain():
"""Fetch the name of the KVM domain."""
# By convention we have written it into DMI system serial number
info = call('system.dmidecode_info')
if serial := info.get('system-serial-number'):
# Verify that the string looks reasonable
if TM_NODE_RE.match(serial) or HA_NODE_RE.match(serial):
return serial
def _virsh(command):
"""
Execute the virsh command sequence.
"""
if have_kvm_host_cfg:
virsh = ['sudo', 'virsh']
ssh_command = shlex.join(virsh + command)
return ssh(ssh_command, user=KVM_USERNAME, password=KVM_PASSWORD, ip=KVM_HOST)
else:
try:
if os.geteuid():
# Non-root requires sudo
virsh = ['sudo', 'virsh']
else:
virsh = ['virsh']
cp = run_on_runner(virsh + command)
except RunOnRunnerException:
raise
except AssertionError:
raise
else:
return cp.stdout
def poweroff_vm(vmname):
"""
Issue a virsh destroy <domain>. This is similar to pulling the power
cable. The VM can be restarted later.
"""
return _virsh(['destroy', vmname])
def reset_vm(vmname):
return _virsh(['reset', vmname])
def shutdown_vm(vmname, mode='acpi'):
return _virsh(['shutdown', vmname, '--mode', mode])
def start_vm(vmname, force_boot=False):
command = ['start', vmname]
if force_boot:
command.append('--force-boot')
return _virsh(command)
| 1,950 | Python | .py | 57 | 27.684211 | 93 | 0.627263 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,727 | smb.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/smb.py | # -*- coding=utf-8 -*-
import contextlib
import logging
import os
import shlex
import sys
from base64 import b64encode, b64decode
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
logger = logging.getLogger(__name__)
__all__ = [
"copy_stream",
"list_stream",
"get_stream",
"set_stream",
"set_xattr_compat",
"smb_share",
"smb_mount"
]
STREAM_PREFIX = 'user.DosStream.'
STREAM_SUFFIX = ':$DATA'
STREAM_SUFFIX_ESC = ':\\$DATA'
SAMBA_COMPAT = '/proc/fs/cifs/stream_samba_compat'
@contextlib.contextmanager
def smb_share(path, name, options=None):
share = call("sharing.smb.create", {
"path": path,
"name": name,
**(options or {})
})
assert call("service.start", "cifs")
try:
yield share
finally:
call("sharing.smb.delete", share["id"])
call("service.stop", "cifs")
@contextlib.contextmanager
def smb_mount(share, username, password, local_path='/mnt/cifs', options=None, ip=None):
ip = ip or truenas_server.ip
mount_options = [f'username={username}', f'password={password}'] + (options or [])
escaped_path = shlex.quote(local_path)
mount_cmd = [
'mount.cifs', f'//{ip}/{share}', escaped_path,
'-o', ','.join(mount_options)
]
mount_string = ' '.join(mount_cmd)
ssh(f'mkdir {escaped_path}; {mount_string}')
try:
yield local_path
finally:
ssh(f'umount {escaped_path}; rmdir {local_path}')
def set_xattr_compat(enable_status: bool) -> None:
"""
Enable / disable samba compatibility byte for SMB client. See
SMB client implementation notes.
NOTE: this requires that at least one SMB share be mounted.
"""
assert isinstance(enable_status, bool)
val = 1 if enable_status is True else 0
ssh(f'echo {val} > {SAMBA_COMPAT}')
res = ssh(f'cat {SAMBA_COMPAT}')
assert val == int(res.strip())
def copy_stream(
filename: str,
xat_name_from: str,
xat_name_to: str,
mountpoint='/mnt/cifs'
) -> None:
"""
Duplicate one stream to another stream on the same file.
This is used to validate that xattr handler works properly
for large xattrs.
NOTE: requires existing SMB client mount at `mountpoint`.
"""
assert call('filesystem.statfs', mountpoint)['fstype'] == 'cifs'
local_path = os.path.join(mountpoint, filename)
xat_name_from = f'{STREAM_PREFIX}{xat_name_from}{STREAM_SUFFIX_ESC}'
xat_name_to = f'{STREAM_PREFIX}{xat_name_to}{STREAM_SUFFIX_ESC}'
cmd = 'python3 -c "from samba.xattr_native import wrap_getxattr, wrap_setxattr;'
cmd += f'wrap_setxattr(\\\"{local_path}\\\", \\\"{xat_name_to}\\\", '
cmd += f'wrap_getxattr(\\\"{local_path}\\\", \\\"{xat_name_from}\\\"))"'
results = ssh(cmd, complete_response=True, check=False)
assert results['result'], f'cmd: {cmd}, result: {results["stderr"]}'
def del_stream(
filename: str,
xat_name: str,
mountpoint='/mnt/cifs'
) -> None:
"""
Delete the alternate data stream with name `xat_name` from
the specified file.
NOTE: requires existing SMB client mount at `mountpoint`.
"""
assert call('filesystem.statfs', mountpoint)['fstype'] == 'cifs'
local_path = os.path.join(mountpoint, filename)
xat_name = f'{STREAM_PREFIX}{xat_name}{STREAM_SUFFIX_ESC}'
cmd = 'python3 -c "import os;'
cmd += f'os.removexattr(\\\"{local_path}\\\", \\\"{xat_name}\\\")"'
results = ssh(cmd, complete_response=True, check=False)
assert results['result'], f'cmd: {cmd}, result: {results["stderr"]}'
def list_stream(
filename: str,
mountpoint='/mnt/cifs'
) -> list:
"""
Return list of alternate data streams contained by the specified
file. Stream prefix and suffix will be stripped from return.
NOTE: requires existing SMB client mount at `mountpoint`.
"""
assert call('filesystem.statfs', mountpoint)['fstype'] == 'cifs'
local_path = os.path.join(mountpoint, filename)
# Vertical bar is used as separator because it is a reserved character
# over SMB and will never be present in stream name
cmd = 'python3 -c "import os;'
cmd += f'print(\\\"\\|\\\".join(os.listxattr(\\\"{local_path}\\\")))"'
results = ssh(cmd, complete_response=True, check=False)
assert results['result'], f'cmd: {cmd}, result: {results["stderr"]}'
streams = []
for entry in results['stdout'].strip().split('|'):
if not entry.startswith(STREAM_PREFIX):
continue
entry = entry.split(STREAM_PREFIX)[1]
assert entry.endswith(STREAM_SUFFIX)
# slice off the suffix
streams.append(entry[:-len(STREAM_SUFFIX)])
return streams
def get_stream(
filename: str,
xat_name: str,
mountpoint='/mnt/cifs'
) -> bytes:
"""
Retrieve binary data for an alternate data stream via the xattr handler on
a SMB client mount via the remote TrueNAS server. The python script below uses
the samba wrapper around getxattr due to limitations in os.getxattr regarding
maximum xattr size.
NOTE: requires existing SMB client mount at `mountpoint`.
"""
assert call('filesystem.statfs', mountpoint)['fstype'] == 'cifs'
local_path = os.path.join(mountpoint, filename)
xat_name = f'{STREAM_PREFIX}{xat_name}{STREAM_SUFFIX_ESC}'
cmd = 'python3 -c "from samba.xattr_native import wrap_getxattr; import base64;'
cmd += f'print(base64.b64encode(wrap_getxattr(\\\"{local_path}\\\", \\\"{xat_name}\\\")).decode())"'
results = ssh(cmd, complete_response=True, check=False)
assert results['result'], f'cmd: {cmd}, result: {results["stderr"]}'
return b64decode(results['stdout'])
def set_stream(
filename: str,
xat_name: str,
data: bytes,
mountpoint='/mnt/cifs'
) -> None:
"""
Write binary data for an alternate data stream via the xattr handler on
a SMB client mount via the remote TrueNAS server. The python script below uses
the samba wrapper around setxattr due to limitations in os.setxattr regarding
maximum xattr size.
NOTE: requires existing SMB client mount at `mountpoint`.
"""
assert call('filesystem.statfs', mountpoint)['fstype'] == 'cifs'
b64data = b64encode(data).decode()
local_path = os.path.join(mountpoint, filename)
xat_name = f'{STREAM_PREFIX}{xat_name}{STREAM_SUFFIX_ESC}'
cmd = 'python3 -c "from samba.xattr_native import wrap_setxattr; import base64;'
cmd += f'wrap_setxattr(\\\"{local_path}\\\", \\\"{xat_name}\\\", base64.b64decode(\\\"{b64data}\\\"))"'
results = ssh(cmd, complete_response=True, check=False)
assert results['result'], f'cmd: {cmd}, result: {results["stderr"]}'
| 6,761 | Python | .py | 167 | 35.538922 | 107 | 0.664276 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,728 | apps.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/apps.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def app(app_name: str, create_payload: dict, delete_payload: dict | None = None):
create_payload = create_payload | {'app_name': app_name}
app_info = call('app.create', create_payload, job=True)
try:
yield app_info
finally:
call('app.delete', app_name, delete_payload or {}, job=True)
| 417 | Python | .py | 10 | 37.2 | 81 | 0.710396 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,729 | cloud_sync.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/cloud_sync.py | import contextlib
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.ftp import anonymous_ftp_server
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def credential(data):
data = {
"name": "Test",
**data,
}
credential = call("cloudsync.credentials.create", data)
try:
yield credential
finally:
call("cloudsync.credentials.delete", credential["id"])
@contextlib.contextmanager
def task(data):
data = {
"description": "Test",
"schedule": {
"minute": "00",
"hour": "00",
"dom": "1",
"month": "1",
"dow": "1",
},
**data
}
task = call("cloudsync.create", data)
try:
yield task
finally:
call("cloudsync.delete", task["id"])
@contextlib.contextmanager
def local_ftp_credential_data():
with anonymous_ftp_server(dataset_name="cloudsync_remote") as ftp:
yield {
"provider": "FTP",
"attributes": {
"host": "localhost",
"port": 21,
"user": ftp.username,
"pass": ftp.password,
},
}
@contextlib.contextmanager
def local_ftp_credential():
with local_ftp_credential_data() as data:
with credential(data) as c:
yield c
@contextlib.contextmanager
def local_ftp_task(params=None):
params = params or {}
with dataset("cloudsync_local") as local_dataset:
with local_ftp_credential() as c:
with task({
"direction": "PUSH",
"transfer_mode": "COPY",
"path": f"/mnt/{local_dataset}",
"credentials": c["id"],
"attributes": {
"folder": "",
},
**params,
}) as t:
yield t
def run_task(task, timeout=120):
call("cloudsync.sync", task["id"], job=True, timeout=timeout)
| 2,054 | Python | .py | 68 | 21.352941 | 72 | 0.552846 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,730 | account.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/account.py | import contextlib
import random
import string
import types
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.assets.privilege import privilege
from middlewared.test.integration.utils import call, client, ssh
@contextlib.contextmanager
def user(data, *, get_instance=True):
data.setdefault('home_create', True) # create user homedir by default
user = call("user.create", data)
try:
value = None
if get_instance:
value = call("user.get_instance", user)
yield value
finally:
try:
call("user.delete", user)
except InstanceNotFound:
pass
@contextlib.contextmanager
def group(data):
group = call("group.create", data)
try:
yield call("group.get_instance", group)
finally:
try:
call("group.delete", group)
except InstanceNotFound:
pass
@contextlib.contextmanager
def unprivileged_user(*, username, group_name, privilege_name, allowlist, web_shell, roles=None):
with group({
"name": group_name,
}) as g:
with privilege({
"name": privilege_name,
"local_groups": [g["gid"]],
"ds_groups": [],
"allowlist": allowlist,
"roles": roles or [],
"web_shell": web_shell,
}):
with dataset(f"{username}_homedir") as homedir:
if web_shell:
# To prevent `zsh-newuser-install` interactive prompt
ssh(f"touch /mnt/{homedir}/.zshrc")
password = "test1234"
with user({
"username": username,
"full_name": "Unprivileged user",
"group_create": True,
"groups": [g["id"]],
"home": f"/mnt/{homedir}",
"password": password,
}):
yield types.SimpleNamespace(username=username, password=password)
@contextlib.contextmanager
def unprivileged_user_client(roles=None, allowlist=None):
suffix = "".join([random.choice(string.ascii_lowercase + string.digits) for _ in range(8)])
with unprivileged_user(
username=f"unprivileged_{suffix}",
group_name=f"unprivileged_users_{suffix}",
privilege_name=f"Unprivileged users ({suffix})",
allowlist=allowlist or [],
roles=roles or [],
web_shell=False,
) as t:
with client(auth=(t.username, t.password)) as c:
c.username = t.username
yield c
@contextlib.contextmanager
def root_with_password_disabled():
root_backup = call("datastore.query", "account.bsdusers", [["bsdusr_username", "=", "root"]], {"get": True})
root_backup["bsdusr_group"] = root_backup["bsdusr_group"]["id"]
root_backup["bsdusr_groups"] = [g["id"] for g in root_backup["bsdusr_groups"]]
root_id = root_backup.pop("id")
# Connect before removing root password
with client() as c:
try:
c.call("datastore.update", "account.bsdusers", root_id, {"bsdusr_password_disabled": True})
yield types.SimpleNamespace(client=c, root_id=root_id, root_backup=root_backup)
finally:
# Restore root access on test failure
c.call("datastore.update", "account.bsdusers", root_id, root_backup)
c.call("etc.generate", "user")
| 3,496 | Python | .py | 88 | 30.534091 | 112 | 0.60749 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,731 | keychain.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/keychain.py | import contextlib
import uuid
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def ssh_keypair():
keypair = call("keychaincredential.create", {
"name": str(uuid.uuid4()),
"type": "SSH_KEY_PAIR",
"attributes": call("keychaincredential.generate_ssh_key_pair"),
})
try:
yield keypair
finally:
with contextlib.suppress(InstanceNotFound):
call("keychaincredential.delete", keypair["id"])
@contextlib.contextmanager
def localhost_ssh_credentials(**data):
url = data.pop("url", "http://localhost")
with ssh_keypair() as keypair:
credentials = call("keychaincredential.remote_ssh_semiautomatic_setup", {
"name": str(uuid.uuid4()),
"url": url,
"token": call("auth.generate_token"),
"private_key": keypair["id"],
**data,
})
try:
yield {
"keypair": keypair,
"credentials": credentials,
}
finally:
with contextlib.suppress(InstanceNotFound):
call("keychaincredential.delete", credentials["id"])
| 1,232 | Python | .py | 35 | 26.771429 | 81 | 0.619849 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,732 | replication.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/replication.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def replication_task(data):
task = call("replication.create", data)
try:
yield task
finally:
call("replication.delete", task["id"])
| 261 | Python | .py | 9 | 24.444444 | 51 | 0.733871 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,733 | api_key.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/api_key.py | # -*- coding=utf-8 -*-
import contextlib
from middlewared.test.integration.utils import call
__all__ = ["api_key"]
@contextlib.contextmanager
def api_key(username="root"):
key = call("api_key.create", {"name": "Test API Key", "username": username})
try:
yield key["key"]
finally:
call("api_key.delete", key["id"])
| 346 | Python | .py | 11 | 27.545455 | 80 | 0.652568 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,734 | product.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/product.py | import contextlib
from middlewared.test.integration.utils import mock
@contextlib.contextmanager
def product_type(product_type='SCALE_ENTERPRISE'):
with mock('system.product_type', return_value=product_type):
yield
| 230 | Python | .py | 6 | 34.833333 | 64 | 0.800905 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,735 | ftp.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/ftp.py | import contextlib
from types import SimpleNamespace
from middlewared.test.integration.assets.account import user
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, ssh
@contextlib.contextmanager
def ftp_server(config=None):
if config is not None:
call("ftp.update", config)
call("service.start", "ftp")
try:
yield
finally:
call("service.stop", "ftp")
@contextlib.contextmanager
def anonymous_ftp_server(config=None, dataset_name="anonftp"):
config = config or {}
with dataset(dataset_name) as ds:
path = f"/mnt/{ds}"
ssh(f"chmod 777 {path}")
with ftp_server({
"onlyanonymous": True,
"anonpath": path,
"onlylocal": False,
**config,
}):
yield SimpleNamespace(dataset=ds, username="anonymous", password="")
@contextlib.contextmanager
def ftp_server_with_user_account(config=None):
config = config or {}
ftp_id = call("group.query", [["name", "=", "ftp"], ['local', '=', True]], {"get": True})["id"]
with dataset("ftptest") as ds:
with user({
"username": "ftptest",
"group_create": True,
"home": f"/mnt/{ds}",
"full_name": "FTP Test",
"password": "pass",
"home_create": False,
"groups": [ftp_id],
}):
with ftp_server({
"onlyanonymous": False,
"anonpath": None,
"onlylocal": True,
**config,
}):
yield SimpleNamespace(dataset=ds, username="ftptest", password="pass")
| 1,690 | Python | .py | 48 | 26.583333 | 99 | 0.581495 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,736 | privilege.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/privilege.py | import contextlib
from middlewared.service_exception import InstanceNotFound
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def privilege(data):
privilege = call("privilege.create", data)
try:
yield privilege
finally:
try:
call("privilege.delete", privilege["id"])
except InstanceNotFound:
pass
| 391 | Python | .py | 13 | 24.153846 | 58 | 0.719251 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,737 | datastore.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/datastore.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def row(datastore, data, options=None):
options = options or {}
id_ = call("datastore.insert", datastore, data, options)
try:
yield id_
finally:
call("datastore.delete", datastore, id_)
| 319 | Python | .py | 10 | 27.3 | 60 | 0.718033 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,738 | snapshot_task.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/snapshot_task.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def snapshot_task(data):
task = call("pool.snapshottask.create", data)
try:
yield task
finally:
call("pool.snapshottask.delete", task["id"])
| 270 | Python | .py | 9 | 25.444444 | 52 | 0.735409 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,739 | disk.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/disk.py | import contextlib
from middlewared.test.integration.utils import call, mock
@contextlib.contextmanager
def fake_disks(disks):
disk_query = call("disk.query")
template = disk_query[-1]
for i, (name, data) in enumerate(disks.items()):
suffix = f"_fake{i + 1}"
disk = template.copy()
disk["identifier"] += suffix
disk["serial"] += suffix
disk["name"] = name
disk["devname"] = name
disk.update(**data)
disk_query.append(disk)
with mock("disk.query", return_value=disk_query):
yield
| 571 | Python | .py | 17 | 27.117647 | 57 | 0.630237 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,740 | directory_service.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/directory_service.py | # -*- coding=utf-8 -*-
import contextlib
import logging
import os
import sys
from middlewared.test.integration.utils import call, fail
try:
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import ha, hostname
except ImportError:
ha = False
hostname = None
try:
from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, ADNameServer, AD_COMPUTER_OU
except ImportError:
AD_DOMAIN = None
ADPASSWORD = None
ADUSERNAME = None
ADNameServer = None
AD_COMPUTER_OU = None
try:
from config import (
LDAPBASEDN,
LDAPBINDDN,
LDAPBINDPASSWORD,
LDAPHOSTNAME,
LDAPUSER,
LDAPPASSWORD
)
except ImportError:
LDAPBASEDN = None
LDAPBINDDN = None
LDAPBINDPASSWORD = None
LDAPHOSTNAME = None
LDAPUSER = None
LDAPPASSWORD = None
try:
from config import (
FREEIPA_IP,
FREEIPA_BASEDN,
FREEIPA_BINDDN,
FREEIPA_BINDPW,
FREEIPA_ADMIN_BINDDN,
FREEIPA_ADMIN_BINDPW,
FREEIPA_HOSTNAME,
)
except ImportError:
FREEIPA_IP = None
FREEIPA_BASEDN = None
FREEIPA_BINDDN = None
FREEIPA_BINDPW = None
FREEIPA_ADMIN_BINDDN = None
FREEIPA_ADMIN_BINDPW = None
FREEIPA_HOSTNAME = None
logger = logging.getLogger(__name__)
__all__ = ['active_directory', 'ldap', 'override_nameservers', 'ipa']
if ha and "hostname_virtual" in os.environ:
hostname = os.environ["hostname_virtual"]
@contextlib.contextmanager
def override_nameservers(_nameserver1=ADNameServer, _nameserver2='', _nameserver3=''):
net_config = call('network.configuration.config')
nameserver1 = net_config['nameserver1']
nameserver2 = net_config['nameserver2']
nameserver3 = net_config['nameserver3']
try:
yield call('network.configuration.update', {
'nameserver1': _nameserver1,
'nameserver2': _nameserver2,
'nameserver3': _nameserver3
})
finally:
call('network.configuration.update', {
'nameserver1': nameserver1,
'nameserver2': nameserver2,
'nameserver3': nameserver3,
})
def clear_ad_info():
call('activedirectory.update', {
"domainname": "",
"bindname": "",
"verbose_logging": False,
"allow_trusted_doms": False,
"use_default_domain": False,
"allow_dns_updates": True,
"disable_freenas_cache": False,
"restrict_pam": False,
"site": None,
"timeout": 60,
"dns_timeout": 10,
"nss_info": None,
"enable": False,
"kerberos_principal": "",
"createcomputer": "",
"kerberos_realm": None,
}, job=True)
@contextlib.contextmanager
def active_directory(
domain=AD_DOMAIN,
username=ADUSERNAME,
password=ADPASSWORD,
hostname=hostname,
nameserver=ADNameServer,
computerou=AD_COMPUTER_OU,
**kwargs
):
payload = {
'domainname': domain,
'bindname': username,
'bindpw': password,
'netbiosname': hostname,
'createcomputer': computerou,
'kerberos_principal': '',
'use_default_domain': False,
'enable': True,
**kwargs
}
with override_nameservers(nameserver):
try:
config = call('activedirectory.update', payload, job=True)
except Exception:
clear_ad_info()
# we may be testing ValidationErrors
raise
try:
domain_info = call('activedirectory.domain_info')
except Exception:
# This is definitely unexpected and not recoverable
fail('Failed to retrieve domain information')
dc_info = call('activedirectory.lookup_dc', domain)
u = f'{dc_info["Pre-Win2k Domain"]}\\{ADUSERNAME.lower()}'
try:
user_obj = call('user.get_user_obj', {'username': u, 'sid_info': True})
except Exception:
# This is definitely unexpected and not recoverable
fail(f'{username}: failed to retrieve information about user')
try:
yield {
'config': config,
'domain_info': domain_info,
'dc_info': dc_info,
'user_obj': user_obj,
}
finally:
call('activedirectory.leave', {'username': username, 'password': password}, job=True)
clear_ad_info()
def clear_ldap_info():
call('ldap.update', {
"hostname": [],
"basedn": "",
"binddn": "",
"bindpw": "",
"ssl": "ON",
"enable": False,
"kerberos_principal": "",
"kerberos_realm": None,
"anonbind": False,
"validate_certificates": True,
"disable_freenas_cache": False,
"certificate": None,
"auxiliary_parameters": ""
}, job=True)
@contextlib.contextmanager
def ldap(
basedn=LDAPBASEDN,
binddn=LDAPBINDDN,
bindpw=LDAPBINDPASSWORD,
hostname=LDAPHOSTNAME,
**kwargs
):
config = call('ldap.update', {
"basedn": basedn,
"binddn": binddn,
"bindpw": bindpw,
"hostname": [hostname],
"ssl": "ON",
"auxiliary_parameters": "",
"validate_certificates": True,
"enable": True,
**kwargs
}, job=True)
try:
config['bindpw'] = None
yield {
'config': config,
}
finally:
clear_ldap_info()
@contextlib.contextmanager
def ipa(
basedn=FREEIPA_BASEDN,
binddn=FREEIPA_ADMIN_BINDDN,
bindpw=FREEIPA_ADMIN_BINDPW,
hostname=FREEIPA_HOSTNAME,
nameserver=FREEIPA_IP,
**kwargs
):
with override_nameservers(nameserver):
try:
config = call('ldap.update', {
"basedn": basedn,
"binddn": binddn,
"bindpw": bindpw,
"hostname": [hostname],
"ssl": "ON",
"auxiliary_parameters": "",
"validate_certificates": False,
"enable": True,
**kwargs
}, job=True)
ipa_config = call('ldap.ipa_config', config)
config['bindpw'] = None
try:
yield config | {'ipa_config': ipa_config}
finally:
call('directoryservices.connection.leave_domain', 'IPA', ipa_config['domain'], job=True)
finally:
clear_ldap_info()
| 6,468 | Python | .py | 218 | 21.784404 | 104 | 0.588491 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,741 | vmware.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/vmware.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def vmware(data):
vmware = call("vmware.create", data)
try:
yield vmware
finally:
call("vmware.delete", vmware["id"])
| 247 | Python | .py | 9 | 22.888889 | 51 | 0.722222 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,742 | nfs.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/nfs.py | # -*- coding=utf-8 -*-
import contextlib
import logging
from middlewared.test.integration.utils import call
from time import sleep
logger = logging.getLogger(__name__)
__all__ = ["nfs_share", "nfs_server"]
@contextlib.contextmanager
def nfs_server():
try:
res = call('service.start', 'nfs', {'silent': False})
sleep(1)
yield res
finally:
call('service.stop', 'nfs', {'silent': False})
@contextlib.contextmanager
def nfs_share(dataset):
share = call("sharing.nfs.create", {
"path": f"/mnt/{dataset}",
})
assert call("service.start", "nfs")
try:
yield share
finally:
call("sharing.nfs.delete", share["id"])
call("service.stop", "nfs")
| 733 | Python | .py | 26 | 23.346154 | 61 | 0.633763 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,743 | crypto.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/crypto.py | import contextlib
from middlewared.test.integration.utils import call
def get_cert_params():
return {
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': 397,
'serial': 12931,
'cert_extensions': {},
}
@contextlib.contextmanager
def root_certificate_authority(name):
ca = call('certificateauthority.create', {
**get_cert_params(),
'name': name,
'create_type': 'CA_CREATE_INTERNAL',
})
try:
yield ca
finally:
call('certificateauthority.delete', ca['id'])
@contextlib.contextmanager
def intermediate_certificate_authority(root_ca_name, intermediate_ca_name):
with root_certificate_authority(root_ca_name) as root_ca:
intermediate_ca = call('certificateauthority.create', {
**get_cert_params(),
'signedby': root_ca['id'],
'name': intermediate_ca_name,
'create_type': 'CA_CREATE_INTERMEDIATE',
})
try:
yield root_ca, intermediate_ca
finally:
call('certificateauthority.delete', intermediate_ca['id'])
@contextlib.contextmanager
def certificate_signing_request(csr_name):
cert_params = get_cert_params()
cert_params.pop('lifetime')
csr = call('certificate.create', {
'name': csr_name,
'create_type': 'CERTIFICATE_CREATE_CSR',
**cert_params,
}, job=True)
try:
yield csr
finally:
call('certificate.delete', csr['id'], job=True)
| 1,785 | Python | .py | 56 | 24.446429 | 75 | 0.595807 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,744 | docker.py | truenas_middleware/src/middlewared/middlewared/test/integration/assets/docker.py | import contextlib
from middlewared.test.integration.utils import call
@contextlib.contextmanager
def docker(pool: dict):
docker_config = call('docker.update', {'pool': pool['name']}, job=True)
assert docker_config['pool'] == pool['name'], docker_config
try:
yield docker_config
finally:
docker_config = call(
'docker.update', {'pool': None, 'address_pools': [{'base': '172.17.0.0/12', 'size': 24}]}, job=True
)
assert docker_config['pool'] is None, docker_config
| 526 | Python | .py | 13 | 34.615385 | 111 | 0.65098 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,745 | script.py.mako | truenas_middleware/src/middlewared/middlewared/alembic/script.py.mako | """${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
| 494 | Python | .py | 17 | 27.176471 | 43 | 0.725532 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,746 | env.py | truenas_middleware/src/middlewared/middlewared/alembic/env.py | import contextlib
from logging.config import fileConfig
import os
from alembic import context
from alembic.operations import ops
from alembic.operations.base import BatchOperations, Operations
from alembic.operations.batch import ApplyBatchImpl, BatchOperationsImpl
from sqlalchemy import engine_from_config, ForeignKeyConstraint, pool
from middlewared.plugins.config import FREENAS_DATABASE
from middlewared.sqlalchemy import JSON, Model
from middlewared.utils.plugins import load_modules
from middlewared.utils.python import get_middlewared_dir
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Model.metadata
list(load_modules(os.path.join(get_middlewared_dir(), "plugins"), depth=1))
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
DATABASE_URL = f"sqlite:///{os.environ.get('FREENAS_DATABASE', FREENAS_DATABASE)}"
original_batch_alter_table = Operations.batch_alter_table
@contextlib.contextmanager
def batch_alter_table_impl(self, *args, **kwargs):
# https://github.com/sqlalchemy/alembic/issues/380
kwargs["table_kwargs"] = {"sqlite_autoincrement": True, **kwargs.get("table_kwargs", {})}
with original_batch_alter_table(self, *args, **kwargs) as result:
yield result
@Operations.register_operation("drop_references")
@BatchOperations.register_operation("drop_references", "batch_drop_references")
class DropReferencesOp(ops.MigrateOperation):
def __init__(
self,
field_name,
table_name,
):
self.field_name = field_name
self.table_name = table_name
@classmethod
def drop_references(cls, operations):
raise RuntimeError()
@classmethod
def batch_drop_references(cls, operations, field_name):
op = cls(
field_name,
operations.impl.table_name,
)
return operations.invoke(op)
@Operations.implementation_for(DropReferencesOp)
def drop_references(operations, operation):
operations.impl.drop_references(
operation.field_name,
)
def drop_references_impl(self, column_name):
for constraint in self.unnamed_constraints:
if isinstance(constraint, ForeignKeyConstraint) and len(constraint.columns) == 1:
if list(constraint.columns)[0].name == column_name:
self.unnamed_constraints.remove(constraint)
break
Operations.batch_alter_table = batch_alter_table_impl
BatchOperationsImpl.drop_references = lambda self, column: self.batch.append(("drop_references", (column,), {}))
ApplyBatchImpl.drop_references = drop_references_impl
def include_object(object_, name, type_, reflected, compare_to):
if type_ == "table" and name in {"sqlite_sequence"}:
return False
else:
return True
def render_item(type_, obj, autogen_context):
"""Apply custom rendering for selected items."""
if isinstance(obj, JSON):
return "sa.TEXT()"
# default rendering for other objects
return False
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=DATABASE_URL,
target_metadata=target_metadata,
render_as_batch=True,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
include_object=include_object,
render_item=render_item,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
ini_config = config.get_section(config.config_ini_section)
ini_config["sqlalchemy.url"] = DATABASE_URL
connectable = engine_from_config(
ini_config,
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=True,
include_object=include_object,
render_item=render_item,
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 5,005 | Python | .py | 127 | 33.905512 | 112 | 0.718685 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,747 | 2024-09-04_19-40_add_to_trust_store_field_for_cert.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-09-04_19-40_add_to_trust_store_field_for_cert.py | """
Add add_to_trust_store field for certifacates
Revision ID: c31881e67797
Revises: 98c1ebde0079
Create Date: 2024-09-04 19:40:16.801832+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = 'c31881e67797'
down_revision = '98c1ebde0079'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('system_certificate', schema=None) as batch_op:
batch_op.add_column(sa.Column('cert_add_to_trusted_store', sa.Boolean(), nullable=False, server_default='0'))
def downgrade():
pass
| 535 | Python | .py | 17 | 29.117647 | 117 | 0.761252 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,748 | 2024-02-12_19-47_add_enable_learning_flag.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-02-12_19-47_add_enable_learning_flag.py | """Add enable learning flag
Revision ID: 7836261b2f64
Revises: 968d515e63e7
Create Date: 2024-02-12 19:47:35.379137+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = '7836261b2f64'
down_revision = '968d515e63e7'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('network_bridge', schema=None) as batch_op:
batch_op.add_column(sa.Column('enable_learning', sa.Boolean(), nullable=False, server_default='1'))
def downgrade():
pass
| 500 | Python | .py | 16 | 28.75 | 107 | 0.758403 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,749 | 2024-06-26_19-44_add_smb_encryption.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-06-26_19-44_add_smb_encryption.py | """add smb encryption parameter
Revision ID: d8bfbf4e277e
Revises: 91724c382023
Create Date: 2024-06-26 19:44:55.116098+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd8bfbf4e277e'
down_revision = '91724c382023'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_cifs', schema=None) as batch_op:
batch_op.add_column(sa.Column('cifs_srv_encryption', sa.String(length=120), nullable=True))
| 508 | Python | .py | 15 | 31.666667 | 99 | 0.772074 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,750 | 2024-05-02_12-00_merge_migration.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-05-02_12-00_merge_migration.py | """Merge migration for changes in SMB-related fields from 24.04
Revision ID: 135a7e02cbec
Revises: 4f11cc19bb9c, f38c2bbe776a
Create Date: 2024-05-02 12:00:35.086514+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '135a7e02cbec'
down_revision = ('4f11cc19bb9c', 'f38c2bbe776a')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 436 | Python | .py | 16 | 25.25 | 63 | 0.783981 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,751 | 2024-08-30_22-13_cpu_topology_extension.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-08-30_22-13_cpu_topology_extension.py | """
Providing cpu topology extension to VMs
Revision ID: d24d6760fda4
Revises: 7b13df980355
Create Date: 2024-08-30 22:13:09.525439+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = 'd24d6760fda4'
down_revision = '7b13df980355'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('vm_vm', schema=None) as batch_op:
batch_op.add_column(
sa.Column('enable_cpu_topology_extension', sa.Boolean(), nullable=False, server_default='0')
)
def downgrade():
pass
| 542 | Python | .py | 19 | 25.263158 | 104 | 0.73062 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,752 | 2024-09-03_20-33_docker_addr_pool.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-09-03_20-33_docker_addr_pool.py | """
Add address_pools column to services_docker
Revision ID: 98c1ebde0079
Revises: d24d6760fda4
Create Date: 2024-09-03 20:33:47.996994+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = '98c1ebde0079'
down_revision = 'd24d6760fda4'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_docker', schema=None) as batch_op:
batch_op.add_column(
sa.Column(
'address_pools',
sa.TEXT(),
nullable=False,
server_default='[{"base": "172.17.0.0/12", "size": 24}]'
)
)
def downgrade():
pass
| 653 | Python | .py | 24 | 21.25 | 74 | 0.628617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,753 | 2024-05-15_13-29_remove_swap_entry.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-05-15_13-29_remove_swap_entry.py | """
Remove swap configuration from system advanced
Revision ID: 0dc9c3f51393
Revises: 135a7e02cbec
Create Date: 2024-05-13 13:29:06.007342+00:00
"""
from alembic import op
revision = '0dc9c3f51393'
down_revision = '135a7e02cbec'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('system_advanced', schema=None) as batch_op:
batch_op.drop_column('adv_swapondrive')
def downgrade():
pass
| 440 | Python | .py | 16 | 25 | 74 | 0.766827 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,754 | 2024-04-01_13-59_cloud_backup_keep_last.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-04-01_13-59_cloud_backup_keep_last.py | """Cloud backup keep last
Revision ID: 1a6fc6735dc2
Revises: 7836261b2f64
Create Date: 2024-04-01 13:59:15.352191+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1a6fc6735dc2'
down_revision = '7836261b2f64'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('tasks_cloud_backup', schema=None) as batch_op:
batch_op.add_column(sa.Column('keep_last', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('tasks_cloud_backup', schema=None) as batch_op:
batch_op.drop_column('keep_last')
# ### end Alembic commands ###
| 832 | Python | .py | 22 | 34.545455 | 81 | 0.71375 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,755 | 2024-10-03_20-46_docker_address_pool.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-10-03_20-46_docker_address_pool.py | """
Docker address pool default updated
Revision ID: 92b98613c498
Revises: c31881e67797
Create Date: 2024-10-03 20:46:17.935672+00:00
"""
import json
from alembic import op
revision = '92b98613c498'
down_revision = 'c31881e67797'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
if docker_config := list(map(
dict, conn.execute('SELECT * FROM services_docker').fetchall()
)):
docker_config = docker_config[0]
address_pool_config = json.loads(docker_config['address_pools'])
if address_pool_config == [{'base': '172.30.0.0/16', 'size': 27}, {'base': '172.31.0.0/16', 'size': 27}]:
conn.execute("UPDATE services_docker SET address_pools = ? WHERE id = ?", [json.dumps(
[{"base": "172.17.0.0/12", "size": 24}]
), docker_config['id']])
def downgrade():
pass
| 878 | Python | .py | 25 | 30.2 | 113 | 0.642942 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,756 | 2024-04-05_18-48_merge.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-04-05_18-48_merge.py | """Merge
Revision ID: 3cb1ccef4ade
Revises: d7243bc7bab9, 423e4c21c28d
Create Date: 2024-04-05 18:48:47.198813+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cb1ccef4ade'
down_revision = ('d7243bc7bab9', '423e4c21c28d')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 381 | Python | .py | 16 | 21.8125 | 48 | 0.778711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,757 | 2024-08-12_08-09_nvidia.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-08-12_08-09_nvidia.py | """NVIDIA
Revision ID: 5654da8713d1
Revises: 4b0b7ba46e63
Create Date: 2024-08-12 08:09:19.248327+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5654da8713d1'
down_revision = '4b0b7ba46e63'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_docker', schema=None) as batch_op:
batch_op.add_column(sa.Column('nvidia', sa.Boolean(), nullable=False, server_default='0'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_docker', schema=None) as batch_op:
batch_op.drop_column('nvidia')
# ### end Alembic commands ###
| 824 | Python | .py | 22 | 34.181818 | 98 | 0.712121 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,758 | 2024-07-22_18-50_remove_ftp_rootlogin.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-07-22_18-50_remove_ftp_rootlogin.py | """ Remove FTP rootlogin
Revision ID: 81b8bae8fb11
Revises: 1307a8e6a8b6
Create Date: 2024-07-22 18:50:09.235185+00:00
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '81b8bae8fb11'
down_revision = '1307a8e6a8b6'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_ftp', schema=None) as batch_op:
batch_op.drop_column('ftp_rootlogin')
# ### end Alembic commands ###
| 457 | Python | .py | 15 | 28 | 71 | 0.745413 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,759 | 2024-08-16_10-01_lagg_on_delete_cascade.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-08-16_10-01_lagg_on_delete_cascade.py | """Cascade delete LAGG interface configuration when network interface configuration is deleted
Revision ID: 7b13df980355
Revises: 5654da8713d1
Create Date: 2024-08-16 10:01:54.594886+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7b13df980355'
down_revision = '5654da8713d1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('network_lagginterface', schema=None) as batch_op:
batch_op.drop_constraint('fk_network_lagginterface_lagg_interface_id_network_interfaces', type_='foreignkey')
batch_op.create_foreign_key(batch_op.f('fk_network_lagginterface_lagg_interface_id_network_interfaces'), 'network_interfaces', ['lagg_interface_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('network_lagginterface', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_network_lagginterface_lagg_interface_id_network_interfaces'), type_='foreignkey')
batch_op.create_foreign_key('fk_network_lagginterface_lagg_interface_id_network_interfaces', 'network_interfaces', ['lagg_interface_id'], ['id'])
# ### end Alembic commands ###
| 1,371 | Python | .py | 24 | 53.375 | 185 | 0.742708 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,760 | 2024-08-09_14-35_image_update.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-08-09_14-35_image_update.py | """
Add flag to see if image update is requried
Revision ID: 4b0b7ba46e63
Revises: 81b8bae8fb11
Create Date: 2024-08-09 14:35:35.379137+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = '4b0b7ba46e63'
down_revision = '81b8bae8fb11'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_docker', schema=None) as batch_op:
batch_op.add_column(sa.Column('enable_image_updates', sa.Boolean(), nullable=False, server_default='1'))
def downgrade():
pass
| 526 | Python | .py | 17 | 28.529412 | 112 | 0.758483 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,761 | 2024-04-09_23-56_merge.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-04-09_23-56_merge.py | """Merge
Revision ID: 4f11cc19bb9c
Revises: 3cb1ccef4ade, d774066c6c0c
Create Date: 2024-04-09 23:56:47.198813+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4f11cc19bb9c'
down_revision = ('3cb1ccef4ade', 'd774066c6c0c')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 381 | Python | .py | 16 | 21.8125 | 48 | 0.778711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,762 | 2024-06-28_07-35_add_login_banner.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-06-28_07-35_add_login_banner.py | """Add login_banner column
Revision ID: 1307a8e6a8b6
Revises: d8bfbf4e277e
Create Date: 2024-06-24 12:57:36.048308+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1307a8e6a8b6'
down_revision = 'd8bfbf4e277e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_advanced', schema=None) as batch_op:
batch_op.add_column(sa.Column('adv_login_banner', sa.Text(), nullable=False, server_default=''))
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_advanced', schema=None) as batch_op:
batch_op.drop_column('adv_login_banner')
| 786 | Python | .py | 20 | 36.25 | 104 | 0.731836 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,763 | 2024-01-31_10-05_merge.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-01-31_10-05_merge.py | """merge migration
Revision ID: 968d515e63e7
Revises: 6a7c2281f48e
Create Date: 2024-01-31 11:30:59.733983+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '968d515e63e7'
down_revision = '6a7c2281f48e'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 359 | Python | .py | 16 | 20.4375 | 45 | 0.785075 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,764 | 2024-04-01_20-15_add_global_system_id.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-04-01_20-15_add_global_system_id.py | """Add global system ID
Revision ID: 14974a858948
Revises: 1a6fc6735dc2
Create Date: 2024-04-01 20:15:50.886820+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '14974a858948'
down_revision = '1a6fc6735dc2'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'system_globalid',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('system_uuid', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_system_globalid')),
)
def downgrade():
pass
| 607 | Python | .py | 21 | 25.47619 | 71 | 0.716753 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,765 | 2024-05-19_18-25_apps_catalog.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-05-19_18-25_apps_catalog.py | """
Apps catalog integration
Revision ID: 91724c382023
Revises: 0dc9c3f51393
Create Date: 2024-05-19 16:25:17.935672+00:00
"""
import json
import sqlalchemy as sa
from alembic import op
revision = '91724c382023'
down_revision = '0dc9c3f51393'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
# We will drop all old catalogs
conn.execute('DELETE FROM services_catalog')
with op.batch_alter_table('services_catalog', schema=None) as batch_op:
batch_op.drop_column('repository')
batch_op.drop_column('branch')
batch_op.drop_column('builtin')
# Now we will add our catalog
conn.execute(
"INSERT INTO services_catalog (label, preferred_trains) VALUES ('TRUENAS', ?)", (json.dumps(['stable']),)
)
# We will add the model which will be used for docker
op.create_table(
'services_docker',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pool', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_services_docker')),
)
def downgrade():
pass
| 1,119 | Python | .py | 34 | 28.382353 | 113 | 0.690587 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,766 | 2024-04-03_15-15_merge.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/24.10/2024-04-03_15-15_merge.py | """Merge
Revision ID: d7243bc7bab9
Revises: 14974a858948, ea024b5dff95
Create Date: 2024-04-03 15:15:28.723303+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd7243bc7bab9'
down_revision = ('14974a858948', 'ea024b5dff95')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 381 | Python | .py | 16 | 21.8125 | 48 | 0.778711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,767 | 2020-05-29_16-11_remove_alerts_unique_index.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-05-29_16-11_remove_alerts_unique_index.py | """Remove alerts unique index
Revision ID: ffcd02f6af9f
Revises: 1432b666093e
Create Date: 2020-05-29 16:11:37.779011+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ffcd02f6af9f'
down_revision = '1432b666093e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('system_alert', schema=None) as batch_op:
batch_op.drop_index('system_alert_node_f77e0d77_uniq')
except Exception:
# Might be already done by migrate113
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_alert', schema=None) as batch_op:
batch_op.create_index('system_alert_node_f77e0d77_uniq', ['node', 'klass', 'key'], unique=1)
# ### end Alembic commands ###
| 962 | Python | .py | 26 | 32.846154 | 100 | 0.696544 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,768 | 2021-05-10_13-44_unique_iscsi_target_extent_name.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-05-10_13-44_unique_iscsi_target_extent_name.py | """Unique iscsi_target_extent_name
Revision ID: 2e2c8b0e787b
Revises: 50c8360d9616
Create Date: 2021-05-10 13:44:29.872200+00:00
"""
import itertools
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2e2c8b0e787b'
down_revision = '50c8360d9616'
branch_labels = None
depends_on = None
def ensure_unique_string(conn, table, column):
values = set()
for row in conn.execute(f"SELECT * FROM {table}").fetchall():
value = row[column]
if value is not None:
update = False
if value in values:
update = True
for i in itertools.count(1):
new_value = value + str(i)
if new_value not in values:
value = new_value
break
if update:
conn.execute(f"UPDATE {table} SET {column} = ? WHERE id = ?", [value, row["id"]])
values.add(value)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
ensure_unique_string(conn, 'services_iscsitargetextent', 'iscsi_target_extent_name')
with op.batch_alter_table('services_iscsitargetextent', schema=None) as batch_op:
batch_op.create_unique_constraint(batch_op.f('uq_services_iscsitargetextent_iscsi_target_extent_name'), ['iscsi_target_extent_name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_iscsitargetextent', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('uq_services_iscsitargetextent_iscsi_target_extent_name'), type_='unique')
# ### end Alembic commands ###
| 1,773 | Python | .py | 41 | 35.609756 | 141 | 0.64627 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,769 | 2020-01-07_11-27_add_fsrvp_to_smb.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-01-07_11-27_add_fsrvp_to_smb.py | """cifs_fsrvp
Revision ID: 133f2d9049d2
Revises: c0f121844b00
Create Date: 2020-01-07 11:27:47.818373+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '133f2d9049d2'
down_revision = 'c0f121844b00'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('sharing_cifs_share', schema=None) as batch_op:
batch_op.add_column(sa.Column('cifs_fsrvp', sa.Boolean(), nullable=True))
batch_op.drop_index('ix_sharing_cifs_share_cifs_storage_task_id')
batch_op.drop_column('cifs_storage_task_id')
op.execute("UPDATE sharing_cifs_share SET cifs_fsrvp = 0")
with op.batch_alter_table('sharing_cifs_share', schema=None) as batch_op:
batch_op.alter_column('cifs_fsrvp', nullable=False)
| 807 | Python | .py | 20 | 36.75 | 81 | 0.735558 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,770 | 2020-06-11_17-30_storage_disk_primary_key.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-06-11_17-30_storage_disk_primary_key.py | """storage_disk primary key
Revision ID: 8ac8158773c4
Revises: 5a365c7248da
Create Date: 2020-06-11 17:30:51.913706+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8ac8158773c4'
down_revision = '5a365c7248da'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
for disk_identifier, count in conn.execute("""
SELECT disk_identifier, COUNT(*)
FROM storage_disk
GROUP BY disk_identifier
HAVING COUNT(*) > 1
""").fetchall():
conn.execute("""
DELETE FROM storage_disk
WHERE ROWID IN (
SELECT ROWID
FROM storage_disk
WHERE disk_identifier = ?
LIMIT ?
)
""", [disk_identifier, count - 1])
with op.batch_alter_table('storage_disk', schema=None) as batch_op:
batch_op.create_primary_key('pk_storage_disk', ['disk_identifier'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 1,212 | Python | .py | 37 | 26.324324 | 75 | 0.626072 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,771 | 2020-03-12_21-32_mail_oauth.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-03-12_21-32_mail_oauth.py | """E-Mail OAuth
Revision ID: a06b1946f6a3
Revises: 32f55c715352
Create Date: 2020-03-12 21:32:11.938578+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a06b1946f6a3'
down_revision = '32f55c715352'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_email', schema=None) as batch_op:
batch_op.add_column(sa.Column('em_oauth', sa.TEXT(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_email', schema=None) as batch_op:
batch_op.drop_column('em_oauth')
# ### end Alembic commands ###
| 804 | Python | .py | 22 | 33.272727 | 76 | 0.707254 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,772 | 2019-10-30_16-17_dataset_model.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-10-30_16-17_dataset_model.py | """Encrypted dataset model
Revision ID: 7e8f7f07153e
Revises: 7f8be1364037
Create Date: 2019-10-30 16:17:53.964201+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7e8f7f07153e'
down_revision = '7f8be1364037'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'storage_encrypteddataset',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('encryption_key', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('storage_encrypteddataset')
| 680 | Python | .py | 22 | 27.181818 | 65 | 0.721538 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,773 | 2021-09-30_18-42_syslog_ca.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-09-30_18-42_syslog_ca.py | """
Allow configuring CA for remote syslog tls connection
Revision ID: 45d6f6f07b0f
Revises: 26de83f45a9d
Create Date: 2021-09-30 18:42:42.818433+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = '45d6f6f07b0f'
down_revision = '26de83f45a9d'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('system_advanced', schema=None) as batch_op:
batch_op.add_column(sa.Column('adv_syslog_tls_certificate_authority_id', sa.Integer(), nullable=True))
batch_op.create_index(
batch_op.f('ix_system_advanced_adv_syslog_tls_certificate_authority_id'),
['adv_syslog_tls_certificate_authority_id'], unique=False
)
batch_op.create_foreign_key(
batch_op.f('fk_system_advanced_adv_syslog_tls_certificate_authority_id_system_certificateauthority'),
'system_certificateauthority', ['adv_syslog_tls_certificate_authority_id'], ['id']
)
def downgrade():
pass
| 986 | Python | .py | 25 | 34.32 | 113 | 0.714885 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,774 | 2020-03-25_23-12_api_key_created_at.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-03-25_23-12_api_key_created_at.py | """API Key created_at
Revision ID: 38773ca83199
Revises: 6602857581db
Create Date: 2020-03-25 23:12:18.867794+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '38773ca83199'
down_revision = '6602857581db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("DELETE FROM account_api_key")
with op.batch_alter_table('account_api_key', schema=None) as batch_op:
batch_op.add_column(sa.Column('created_at', sa.DateTime(), nullable=True))
with op.batch_alter_table('account_api_key', schema=None) as batch_op:
batch_op.alter_column('created_at', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account_api_key', schema=None) as batch_op:
batch_op.drop_column('created_at')
# ### end Alembic commands ###
| 1,006 | Python | .py | 25 | 36.56 | 82 | 0.709278 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,775 | 2019-09-27_07-44_drop_nfs_share_path_mtm.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-09-27_07-44_drop_nfs_share_path_mtm.py | """Drop nfs_share_path MtM
Revision ID: a3423860aea0
Revises: 0d545b21e189
Create Date: 2019-09-27 07:44:47.071979+00:00
"""
import json
import operator
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a3423860aea0'
down_revision = '0d545b21e189'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sharing_nfs_share', sa.Column('nfs_paths', sa.TEXT(), nullable=True))
conn = op.get_bind()
for share_id, in conn.execute("SELECT id FROM sharing_nfs_share").fetchall():
paths = list(map(operator.itemgetter(0),
conn.execute("SELECT path FROM sharing_nfs_share_path WHERE share_id = ?", [share_id])))
conn.execute("UPDATE sharing_nfs_share SET nfs_paths = ? WHERE id = ?", [json.dumps(paths), share_id])
with op.batch_alter_table('sharing_nfs_share', schema=None) as batch_op:
batch_op.alter_column('nfs_paths',
existing_type=sa.TEXT(),
nullable=False)
op.drop_table('sharing_nfs_share_path')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sharing_nfs_share', 'nfs_paths')
op.create_table('sharing_nfs_share_path',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('share_id', sa.INTEGER(), nullable=False),
sa.Column('path', sa.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
| 1,598 | Python | .py | 38 | 36.973684 | 113 | 0.675065 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,776 | 2019-09-27_08-20_fix_lacking_foreign_keys.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-09-27_08-20_fix_lacking_foreign_keys.py | """Fix lacking foreign keys
Revision ID: d38e9cc6174c
Revises: a3423860aea0
Create Date: 2019-09-27 08:20:13.391318+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd38e9cc6174c'
down_revision = 'a3423860aea0'
branch_labels = None
depends_on = None
def create_foreign_key(*args, **kwargs):
op.execute(f"DELETE FROM {args[1]} WHERE {args[3][0]} NOT IN (SELECT {args[4][0]} FROM {args[2]})")
def create_foreign_key_nullable(*args, **kwargs):
op.execute(f"UPDATE {args[1]} SET {args[3][0]} = NULL WHERE {args[3][0]} IS NOT NULL AND {args[3][0]} NOT IN (SELECT {args[4][0]} FROM {args[2]})")
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
create_foreign_key(None, 'account_bsdgroupmembership', 'account_bsdgroups', ['bsdgrpmember_group_id'], ['id'])
create_foreign_key(None, 'account_bsdgroupmembership', 'account_bsdusers', ['bsdgrpmember_user_id'], ['id'])
with op.batch_alter_table('account_bsdgroupmembership', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_account_bsdgroupmembership_bsdgrpmember_group_id_account_bsdgroups'), 'account_bsdgroups', ['bsdgrpmember_group_id'], ['id'], ondelete='CASCADE')
batch_op.create_foreign_key(batch_op.f('fk_account_bsdgroupmembership_bsdgrpmember_user_id_account_bsdusers'), 'account_bsdusers', ['bsdgrpmember_user_id'], ['id'], ondelete='CASCADE')
create_foreign_key_nullable(None, 'directoryservice_activedirectory', 'directoryservice_kerberosrealm', ['ad_kerberos_realm_id'], ['id'])
with op.batch_alter_table('directoryservice_activedirectory', schema=None) as batch_op:
batch_op.drop_references('ad_kerberos_realm_id')
batch_op.create_foreign_key(batch_op.f('fk_directoryservice_activedirectory_ad_kerberos_realm_id_directoryservice_kerberosrealm'), 'directoryservice_kerberosrealm', ['ad_kerberos_realm_id'], ['id'], ondelete='SET NULL')
create_foreign_key(None, 'network_alias', 'network_interfaces', ['alias_interface_id'], ['id'])
with op.batch_alter_table('network_alias', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_network_alias_alias_interface_id_network_interfaces'), 'network_interfaces', ['alias_interface_id'], ['id'], ondelete='CASCADE')
create_foreign_key(None, 'network_bridge', 'network_interfaces', ['interface_id'], ['id'])
with op.batch_alter_table('network_bridge', schema=None) as batch_op:
batch_op.drop_references('interface_id')
batch_op.create_foreign_key(batch_op.f('fk_network_bridge_interface_id_network_interfaces'), 'network_interfaces', ['interface_id'], ['id'], ondelete='CASCADE')
create_foreign_key(None, 'network_lagginterface', 'network_interfaces', ['lagg_interface_id'], ['id'])
with op.batch_alter_table('network_lagginterface', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_network_lagginterface_lagg_interface_id_network_interfaces'), 'network_interfaces', ['lagg_interface_id'], ['id'])
create_foreign_key(None, 'network_lagginterfacemembers', 'network_lagginterface', ['lagg_interfacegroup_id'], ['id'])
with op.batch_alter_table('network_lagginterfacemembers', schema=None) as batch_op:
batch_op.drop_references('lagg_interfacegroup_id')
batch_op.create_foreign_key(batch_op.f('fk_network_lagginterfacemembers_lagg_interfacegroup_id_network_lagginterface'), 'network_lagginterface', ['lagg_interfacegroup_id'], ['id'], ondelete='CASCADE')
create_foreign_key_nullable(None, 'services_fibrechanneltotarget', 'services_iscsitarget', ['fc_target_id'], ['id'])
with op.batch_alter_table('services_fibrechanneltotarget', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_services_fibrechanneltotarget_fc_target_id_services_iscsitarget'), 'services_iscsitarget', ['fc_target_id'], ['id'])
create_foreign_key_nullable(None, 'services_iscsitargetgroups', 'services_iscsitargetauthorizedinitiator', ['iscsi_target_initiatorgroup_id'], ['id'])
create_foreign_key(None, 'services_iscsitargetgroups', 'services_iscsitarget', ['iscsi_target_id'], ['id'])
create_foreign_key(None, 'services_iscsitargetgroups', 'services_iscsitargetportal', ['iscsi_target_portalgroup_id'], ['id'])
with op.batch_alter_table('services_iscsitargetgroups', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_initiatorgroup_id_services_iscsitargetauthorizedinitiator'), 'services_iscsitargetauthorizedinitiator', ['iscsi_target_initiatorgroup_id'], ['id'], ondelete='SET NULL')
batch_op.create_foreign_key(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_id_services_iscsitarget'), 'services_iscsitarget', ['iscsi_target_id'], ['id'])
batch_op.create_foreign_key(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_portalgroup_id_services_iscsitargetportal'), 'services_iscsitargetportal', ['iscsi_target_portalgroup_id'], ['id'])
create_foreign_key(None, 'services_iscsitargetportalip', 'services_iscsitargetportal', ['iscsi_target_portalip_portal_id'], ['id'])
with op.batch_alter_table('services_iscsitargetportalip', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_services_iscsitargetportalip_iscsi_target_portalip_portal_id_services_iscsitargetportal'), 'services_iscsitargetportal', ['iscsi_target_portalip_portal_id'], ['id'])
create_foreign_key_nullable(None, 'services_webdav', 'system_certificate', ['webdav_certssl_id'], ['id'])
with op.batch_alter_table('services_webdav', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_services_webdav_webdav_certssl_id_system_certificate'), 'system_certificate', ['webdav_certssl_id'], ['id'])
create_foreign_key(None, 'storage_encrypteddisk', 'storage_volume', ['encrypted_volume_id'], ['id'])
create_foreign_key_nullable(None, 'storage_encrypteddisk', 'storage_disk', ['encrypted_disk_id'], ['disk_identifier'])
with op.batch_alter_table('storage_encrypteddisk', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_storage_encrypteddisk_encrypted_volume_id_storage_volume'), 'storage_volume', ['encrypted_volume_id'], ['id'])
batch_op.create_foreign_key(batch_op.f('fk_storage_encrypteddisk_encrypted_disk_id_storage_disk'), 'storage_disk', ['encrypted_disk_id'], ['disk_identifier'], ondelete='SET NULL')
create_foreign_key(None, 'storage_scrub', 'storage_volume', ['scrub_volume_id'], ['id'])
with op.batch_alter_table('storage_scrub', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_storage_scrub_scrub_volume_id_storage_volume'), 'storage_volume', ['scrub_volume_id'], ['id'])
create_foreign_key(None, 'tasks_smarttest_smarttest_disks', 'storage_disk', ['disk_id'], ['disk_identifier'], ondelete='CASCADE')
create_foreign_key(None, 'tasks_smarttest_smarttest_disks', 'tasks_smarttest', ['smarttest_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('tasks_smarttest_smarttest_disks', schema=None) as batch_op:
batch_op.create_foreign_key(batch_op.f('fk_tasks_smarttest_smarttest_disks_disk_id_storage_disk'), 'storage_disk', ['disk_id'], ['disk_identifier'], ondelete='CASCADE')
batch_op.create_foreign_key(batch_op.f('fk_tasks_smarttest_smarttest_disks_smarttest_id_tasks_smarttest'), 'tasks_smarttest', ['smarttest_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('tasks_smarttest_smarttest_disks', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_tasks_smarttest_smarttest_disks_smarttest_id_tasks_smarttest'), type_='foreignkey')
batch_op.drop_constraint(batch_op.f('fk_tasks_smarttest_smarttest_disks_disk_id_storage_disk'), type_='foreignkey')
with op.batch_alter_table('storage_scrub', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_storage_scrub_scrub_volume_id_storage_volume'), type_='foreignkey')
with op.batch_alter_table('storage_encrypteddisk', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_storage_encrypteddisk_encrypted_disk_id_storage_disk'), type_='foreignkey')
batch_op.drop_constraint(batch_op.f('fk_storage_encrypteddisk_encrypted_volume_id_storage_volume'), type_='foreignkey')
with op.batch_alter_table('services_webdav', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_services_webdav_webdav_certssl_id_system_certificate'), type_='foreignkey')
with op.batch_alter_table('services_iscsitargetportalip', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_services_iscsitargetportalip_iscsi_target_portalip_portal_id_services_iscsitargetportal'), type_='foreignkey')
with op.batch_alter_table('services_iscsitargetgroups', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_portalgroup_id_services_iscsitargetportal'), type_='foreignkey')
batch_op.drop_constraint(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_id_services_iscsitarget'), type_='foreignkey')
batch_op.drop_constraint(batch_op.f('fk_services_iscsitargetgroups_iscsi_target_initiatorgroup_id_services_iscsitargetauthorizedinitiator'), type_='foreignkey')
with op.batch_alter_table('services_fibrechanneltotarget', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_services_fibrechanneltotarget_fc_target_id_services_iscsitarget'), type_='foreignkey')
with op.batch_alter_table('network_lagginterfacemembers', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_network_lagginterfacemembers_lagg_interfacegroup_id_network_lagginterface'), type_='foreignkey')
with op.batch_alter_table('network_lagginterface', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_network_lagginterface_lagg_interface_id_network_interfaces'), type_='foreignkey')
with op.batch_alter_table('network_bridge', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_network_bridge_interface_id_network_interfaces'), type_='foreignkey')
with op.batch_alter_table('network_alias', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_network_alias_alias_interface_id_network_interfaces'), type_='foreignkey')
with op.batch_alter_table('directoryservice_activedirectory', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_directoryservice_activedirectory_ad_kerberos_realm_id_directoryservice_kerberosrealm'), type_='foreignkey')
with op.batch_alter_table('account_bsdgroupmembership', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_account_bsdgroupmembership_bsdgrpmember_user_id_account_bsdusers'), type_='foreignkey')
batch_op.drop_constraint(batch_op.f('fk_account_bsdgroupmembership_bsdgrpmember_group_id_account_bsdgroups'), type_='foreignkey')
# ### end Alembic commands ###
| 11,213 | Python | .py | 105 | 100.552381 | 259 | 0.736495 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,777 | 2020-10-07_19-01_replication_encryption.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-10-07_19-01_replication_encryption.py | """Replication target dataset encryption
Revision ID: 1a191726e5ea
Revises: a3298f120609
Create Date: 2020-10-07 19:01:33.955108+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1a191726e5ea'
down_revision = 'a3298f120609'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.add_column(sa.Column('repl_encryption', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('repl_encryption_key', sa.Text(), nullable=True))
batch_op.add_column(sa.Column('repl_encryption_key_format', sa.String(length=120), nullable=True))
batch_op.add_column(sa.Column('repl_encryption_key_location', sa.Text(), nullable=True))
op.execute('UPDATE storage_replication SET repl_encryption = FALSE')
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.alter_column('repl_encryption',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.drop_column('repl_encryption_key_location')
batch_op.drop_column('repl_encryption_key_format')
batch_op.drop_column('repl_encryption_key')
batch_op.drop_column('repl_encryption')
# ### end Alembic commands ###
| 1,601 | Python | .py | 33 | 43.090909 | 106 | 0.705656 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,778 | 2020-02-24_17-48_webdav_enable.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-02-24_17-48_webdav_enable.py | """webdav_enable
Revision ID: b5cac06345ea
Revises: 06bfbd354deb
Create Date: 2020-02-24 17:48:00.013754+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b5cac06345ea'
down_revision = '06bfbd354deb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('sharing_webdav_share', schema=None) as batch_op:
batch_op.add_column(sa.Column('webdav_enabled', sa.Boolean(), nullable=False, server_default='1'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('sharing_webdav_share', schema=None) as batch_op:
batch_op.drop_column('webdav_enabled')
# ### end Alembic commands ###
| 857 | Python | .py | 22 | 35.681818 | 106 | 0.717576 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,779 | 2020-10-20_22-08_merge.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-10-20_22-08_merge.py | """Merge
Revision ID: 56016e596321
Revises: 1a191726e5ea, b410e4d0145f
Create Date: 2020-10-20 22:08:42.985220+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '56016e596321'
down_revision = ('1a191726e5ea', 'b410e4d0145f')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 381 | Python | .py | 16 | 21.8125 | 48 | 0.778711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,780 | 2020-03-10_09-03_replication_readonly.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-03-10_09-03_replication_readonly.py | """Replication readonly
Revision ID: 8f874e6e40bc
Revises: b5cac06345ea
Create Date: 2020-03-10 09:03:42.449016+00:00
"""
import os
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8f874e6e40bc'
down_revision = 'b5cac06345ea'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
for column in conn.execute("PRAGMA TABLE_INFO(storage_replication)"):
if column["name"] == "repl_readonly":
return
readonly = "REQUIRE" if os.path.exists("/data/license") else "SET"
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.add_column(sa.Column('repl_readonly', sa.String(length=120), nullable=False, server_default=readonly))
batch_op.alter_column('repl_readonly',
existing_type=sa.TEXT(),
server_default=None)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.drop_column('repl_readonly')
# ### end Alembic commands ###
| 1,245 | Python | .py | 31 | 35.193548 | 119 | 0.691091 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,781 | 2020-05-21_11-45_make-netatalk-logging-configurable.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-05-21_11-45_make-netatalk-logging-configurable.py | """make netatalk logging configurable
Revision ID: e9e611d7c5a7
Revises: 730c995cbd37
Create Date: 2020-05-21 11:45:15.691643+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e9e611d7c5a7'
down_revision = '730c995cbd37'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_afp', schema=None) as batch_op:
batch_op.add_column(sa.Column('afp_srv_loglevel', sa.String(length=120), nullable=True))
op.execute('UPDATE services_afp SET afp_srv_loglevel="MINIMUM"')
with op.batch_alter_table('services_afp', schema=None) as batch_op:
batch_op.alter_column('afp_srv_loglevel', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_afp', schema=None) as batch_op:
batch_op.drop_column('afp_srv_loglevel')
# ### end Alembic commands ###
| 1,063 | Python | .py | 25 | 38.8 | 96 | 0.719298 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,782 | 2020-06-26_11-26_add-smb-secrets.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-06-26_11-26_add-smb-secrets.py | """Add SMB secrets
Revision ID: c01e9d77922e
Revises: 8ac8158773c4
Create Date: 2020-06-26 11:26:47.888532+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c01e9d77922e'
down_revision = '8ac8158773c4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_cifs', schema=None) as batch_op:
batch_op.add_column(sa.Column('cifs_srv_secrets', sa.TEXT(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_cifs', schema=None) as batch_op:
batch_op.drop_column('cifs_srv_secrets')
# ### end Alembic commands ###
| 825 | Python | .py | 22 | 34.227273 | 84 | 0.712484 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,783 | 2021-09-30_19-28_v4_owner_major.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-09-30_19-28_v4_owner_major.py | """add nfsv4 owner_major column
Revision ID: fee786dfe121
Revises: 45d6f6f07b0f
Create Date: 2021-09-14 19:28:42.914039+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = 'fee786dfe121'
down_revision = '45d6f6f07b0f'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_nfs', schema=None) as batch_op:
batch_op.add_column(sa.Column('nfs_srv_v4_owner_major', sa.String(length=1023), nullable=True))
op.execute('UPDATE services_nfs SET nfs_srv_v4_owner_major = ""')
with op.batch_alter_table('services_nfs', schema=None) as batch_op:
batch_op.alter_column('nfs_srv_v4_owner_major', existing_type=sa.VARCHAR(1023), nullable=False)
def downgrade():
with op.batch_alter_table('services_nfs', schema=None) as batch_op:
batch_op.drop_column('nfs_srv_v4_owner_major')
| 864 | Python | .py | 20 | 39.7 | 103 | 0.736211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,784 | 2020-01-15_21-13_fix_shell_choices.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-01-15_21-13_fix_shell_choices.py | """Fix user shell choices
Revision ID: f3875acb8d76
Revises: 39a133a04496
Create Date: 2020-01-15 21:13:01.570666+00:00
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'f3875acb8d76'
down_revision = '39a133a04496'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute('UPDATE account_bsdUsers SET bsdusr_shell = ? WHERE bsdusr_shell = ?', (
'/etc/netcli.sh', '//etc/netcli.sh'
))
def downgrade():
pass
| 500 | Python | .py | 18 | 24.944444 | 89 | 0.72093 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,785 | 2019-10-07_08-39_failover_master_node.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-10-07_08-39_failover_master_node.py | """Failover master node
Revision ID: 74cf6ec20dcd
Revises: d38e9cc6174c
Create Date: 2019-10-07 08:39:20.884714+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '74cf6ec20dcd'
down_revision = 'd38e9cc6174c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_failover', schema=None) as batch_op:
batch_op.add_column(sa.Column('master_node', sa.String(length=1), nullable=True))
op.execute("UPDATE system_failover SET master_node = 'A'")
with op.batch_alter_table('system_failover', schema=None) as batch_op:
batch_op.alter_column('master_node',
existing_type=sa.VARCHAR(length=1),
nullable=False)
batch_op.drop_column('master')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_failover', schema=None) as batch_op:
batch_op.add_column(sa.Column('master', sa.BOOLEAN(), nullable=False))
batch_op.drop_column('master_node')
# ### end Alembic commands ###
| 1,219 | Python | .py | 29 | 37.103448 | 89 | 0.692699 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,786 | 2021-03-31_09-08_fix_ldap_pwenc.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-03-31_09-08_fix_ldap_pwenc.py | """Fix LDAP password encryption
Revision ID: 50c8360d9616
Revises: dc143ce20fcd
Create Date: 2021-03-31 09:08:41.264829+00:00
"""
from alembic import op
import sqlalchemy as sa
from middlewared.plugins.pwenc import encrypt, decrypt
# revision identifiers, used by Alembic.
revision = '50c8360d9616'
down_revision = 'dc143ce20fcd'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
for row in conn.execute("SELECT * FROM directoryservice_ldap").fetchall():
if row["ldap_binddn"] and decrypt(row["ldap_binddn"]):
# New (>= 12.0) configurations have ldap_binddn erroneously encrypted instead of ldap_bindpw
# due to fd623d849d1abee8c5786128b150e92209ba1f69
conn.execute("UPDATE directoryservice_ldap SET ldap_binddn = ?, ldap_bindpw = ? WHERE id = ?", [
decrypt(row["ldap_binddn"]),
encrypt(row["ldap_bindpw"]),
row["id"],
])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 1,131 | Python | .py | 29 | 33.241379 | 108 | 0.671245 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,787 | 2020-05-18_14-29_nmbclusters-fix.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-05-18_14-29_nmbclusters-fix.py | """remove nmbclusters sysctl
Revision ID: 730c995cbd37
Revises: 434ea5397cd3
Create Date: 2020-05-18 14:29:59.887895+00:00
"""
# revision identifiers, used by Alembic.
revision = '730c995cbd37'
down_revision = '434ea5397cd3'
branch_labels = None
depends_on = None
TABLE = 'system_tunable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 550 | Python | .py | 19 | 26.210526 | 65 | 0.710728 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,788 | 2019-11-19_21-01_2fa.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-11-19_21-01_2fa.py | """Two-Factor auth
Revision ID: f2e8d8e7fd57
Revises: 514ce6934952
Create Date: 2019-11-19 21:01:50.682690+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f2e8d8e7fd57'
down_revision = '514ce6934952'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('system_twofactorauthentication',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('otp_digits', sa.Integer(), nullable=False),
sa.Column('secret', sa.String(length=16), nullable=True),
sa.Column('window', sa.Integer(), nullable=False),
sa.Column('interval', sa.Integer(), nullable=False),
sa.Column('services', sa.TEXT(), nullable=False),
sa.Column('enabled', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_system_twofactorauthentication'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('system_twofactorauthentication')
# ### end Alembic commands ###
| 1,131 | Python | .py | 29 | 35.655172 | 81 | 0.710238 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,789 | 2020-01-14_13-26_add_service_announcments.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-01-14_13-26_add_service_announcments.py | """gc_service_announcement
Revision ID: 39a133a04496
Revises: 133f2d9049d2
Create Date: 2020-01-14 13:26:02.169132+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '39a133a04496'
down_revision = '133f2d9049d2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('network_globalconfiguration', schema=None) as batch_op:
batch_op.add_column(sa.Column('gc_service_announcement', sa.Text(), nullable=True))
defaults = '{"mdns": true, "wsd": true, "netbios": true}'
op.execute(f"UPDATE network_globalconfiguration SET gc_service_announcement = \'{defaults}\'")
with op.batch_alter_table('network_globalconfiguration', schema=None) as batch_op:
batch_op.alter_column('gc_service_announcement', nullable=False)
with op.batch_alter_table('services_cifs', schema=None) as batch_op:
batch_op.drop_column('cifs_srv_zeroconf')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_cifs', schema=None) as batch_op:
batch_op.add_column(sa.Column('cifs_srv_zeroconf', sa.BOOLEAN(), nullable=False))
with op.batch_alter_table('network_globalconfiguration', schema=None) as batch_op:
batch_op.drop_column('gc_service_announcement')
# ### end Alembic commands ###
| 1,485 | Python | .py | 30 | 45.266667 | 98 | 0.719834 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,790 | 2021-08-26_13-25_s3_bindip.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-08-26_13-25_s3_bindip.py | """
Normalize s3 bindip
Revision ID: 26de83f45a9d
Revises: 2e2c8b0e787b
Create Date: 2021-08-26 13:25:29.872200+00:00
"""
from alembic import op
revision = '26de83f45a9d'
down_revision = '2e2c8b0e787b'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute("UPDATE services_s3 SET s3_bindip = '0.0.0.0' WHERE s3_bindip = ''")
def downgrade():
pass
| 401 | Python | .py | 16 | 22.8125 | 85 | 0.737401 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,791 | 2020-10-23_14-18_replication_properties_exclude.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-10-23_14-18_replication_properties_exclude.py | """Replication properties exclude
Revision ID: 90b815426c10
Revises: c09a16ca3399
Create Date: 2020-10-23 14:18:55.023736+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '90b815426c10'
down_revision = 'c09a16ca3399'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.add_column(sa.Column('repl_properties_exclude', sa.TEXT(), nullable=True))
op.execute("UPDATE storage_replication SET repl_properties_exclude = '[]'")
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.alter_column('repl_properties_exclude',
existing_type=sa.TEXT(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.drop_column('repl_properties_exclude')
# ### end Alembic commands ###
| 1,155 | Python | .py | 27 | 38.148148 | 91 | 0.708781 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,792 | 2020-02-10_09-47_ssh_weak_ciphers.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-02-10_09-47_ssh_weak_ciphers.py | """SSH weak ciphers
Revision ID: 06bfbd354deb
Revises: 4abbf75347b2
Create Date: 2020-02-10 09:47:12.017225+00:00
"""
import json
import re
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '06bfbd354deb'
down_revision = '4abbf75347b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_ssh', schema=None) as batch_op:
batch_op.add_column(sa.Column('ssh_weak_ciphers', sa.TEXT(), nullable=True))
conn = op.get_bind()
for row in conn.execute("SELECT * FROM services_ssh").fetchall():
row = dict(row)
ssh_weak_ciphers = ['AES128-CBC', 'NONE']
m = re.search('NoneEnabled\s+(yes|no)', row['ssh_options'], flags=re.IGNORECASE)
if m:
row['ssh_options'] = row['ssh_options'].replace(m.group(0), '')
if m.group(1).lower() == 'no':
ssh_weak_ciphers.remove('NONE')
if 'Ciphers' in row['ssh_options']:
ssh_weak_ciphers.remove('AES128-CBC')
conn.execute("UPDATE services_ssh SET ssh_weak_ciphers = :ssh_weak_ciphers, "
"ssh_options = :ssh_options WHERE id = :id",
ssh_weak_ciphers=json.dumps(ssh_weak_ciphers),
ssh_options=row["ssh_options"],
id=row["id"])
with op.batch_alter_table('services_ssh', schema=None) as batch_op:
batch_op.alter_column('ssh_weak_ciphers',
existing_type=sa.TEXT(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('services_ssh', schema=None) as batch_op:
batch_op.drop_column('ssh_weak_ciphers')
# ### end Alembic commands ###
| 1,887 | Python | .py | 44 | 35.272727 | 88 | 0.624863 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,793 | 2020-06-29_16-10_rsyncmod_enabled.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-06-29_16-10_rsyncmod_enabled.py | """
Add Rsyncmod enabled field
Revision ID: 71a8d1e504a7
Revises: c01e9d77922e
Create Date: 2020-06-19 16:10:59.501147+00:00
"""
from alembic import op
import sqlalchemy as sa
revision = '71a8d1e504a7'
down_revision = 'c01e9d77922e'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('services_rsyncmod', schema=None) as batch_op:
batch_op.add_column(sa.Column('rsyncmod_enabled', sa.Boolean(), default=True))
| 456 | Python | .py | 15 | 28.266667 | 86 | 0.768349 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,794 | 2019-10-22_17-52_rename_indexes.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2019-10-22_17-52_rename_indexes.py | """Rename indexes
Revision ID: ed69a9a6fab1
Revises: 74cf6ec20dcd
Create Date: 2019-10-22 17:52:07.184559+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ed69a9a6fab1'
down_revision = '74cf6ec20dcd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account_bsdusers', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_account_bsdusers_bsdusr_group_id'), ['bsdusr_group_id'], unique=False)
batch_op.drop_index('account_bsdusers_30f2801f')
with op.batch_alter_table('directoryservice_activedirectory', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_directoryservice_activedirectory_ad_certificate_id'), ['ad_certificate_id'], unique=False)
batch_op.create_index(batch_op.f('ix_directoryservice_activedirectory_ad_kerberos_realm_id'), ['ad_kerberos_realm_id'], unique=False)
batch_op.drop_index('directoryservice_activedirectory_a4250fac')
batch_op.drop_index('directoryservice_activedirectory_b03e01d8')
with op.batch_alter_table('directoryservice_idmap_ldap', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_directoryservice_idmap_ldap_idmap_ldap_certificate_id'), ['idmap_ldap_certificate_id'], unique=False)
batch_op.drop_index('directoryservice_idmap_ldap_592ad9d0')
with op.batch_alter_table('directoryservice_idmap_rfc2307', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_directoryservice_idmap_rfc2307_idmap_rfc2307_certificate_id'), ['idmap_rfc2307_certificate_id'], unique=False)
batch_op.drop_index('directoryservice_idmap_rfc2307_869bf111')
with op.batch_alter_table('directoryservice_ldap', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_directoryservice_ldap_ldap_certificate_id'), ['ldap_certificate_id'], unique=False)
batch_op.create_index(batch_op.f('ix_directoryservice_ldap_ldap_kerberos_realm_id'), ['ldap_kerberos_realm_id'], unique=False)
batch_op.drop_index('directoryservice_ldap_9a19be3d')
batch_op.drop_index('directoryservice_ldap_c6ef382f')
with op.batch_alter_table('network_alias', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_network_alias_alias_interface_id'), ['alias_interface_id'], unique=False)
for index in ['network_alias_5f318ef4', 'network_alias_9ab5564d']:
try:
with op.batch_alter_table('network_alias', schema=None) as batch_op:
batch_op.drop_index(index)
except Exception:
pass
with op.batch_alter_table('network_lagginterfacemembers', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_network_lagginterfacemembers_lagg_interfacegroup_id'), ['lagg_interfacegroup_id'], unique=False)
batch_op.drop_index('network_lagginterfacemembers_14f52ba0')
with op.batch_alter_table('services_fibrechanneltotarget', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_fibrechanneltotarget_fc_target_id'), ['fc_target_id'], unique=False)
batch_op.drop_index('services_fiberchanneltotarget_1d6856ca')
with op.batch_alter_table('services_ftp', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_ftp_ftp_ssltls_certificate_id'), ['ftp_ssltls_certificate_id'], unique=False)
batch_op.drop_index('services_ftp_f897b229')
with op.batch_alter_table('services_iscsitargetgroups', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_id'), ['iscsi_target_id'], unique=False)
batch_op.create_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_initiatorgroup_id'), ['iscsi_target_initiatorgroup_id'], unique=False)
batch_op.create_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_portalgroup_id'), ['iscsi_target_portalgroup_id'], unique=False)
batch_op.drop_index('services_iscsitargetgroups_39e2d7df')
batch_op.drop_index('services_iscsitargetgroups_c939c4d7')
batch_op.drop_index('services_iscsitargetgroups_dcc120ea')
with op.batch_alter_table('services_iscsitargetportalip', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_iscsitargetportalip_iscsi_target_portalip_portal_id'), ['iscsi_target_portalip_portal_id'], unique=False)
for index in ['services_iscsitargetportalip_fe35c684', 'services_iscsitargetportalip_914529ba']:
try:
with op.batch_alter_table('services_iscsitargetportalip', schema=None) as batch_op:
batch_op.drop_index(index)
except Exception:
pass
with op.batch_alter_table('services_iscsitargettoextent', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_iscsitargettoextent_iscsi_extent_id'), ['iscsi_extent_id'], unique=False)
batch_op.create_index(batch_op.f('ix_services_iscsitargettoextent_iscsi_target_id'), ['iscsi_target_id'], unique=False)
batch_op.drop_index('services_iscsitargettoextent_74972900')
batch_op.drop_index('services_iscsitargettoextent_8c3551d7')
with op.batch_alter_table('services_s3', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_services_s3_s3_certificate_id'), ['s3_certificate_id'], unique=False)
batch_op.drop_index('services_s3_3f8aa88e')
with op.batch_alter_table('sharing_cifs_share', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_sharing_cifs_share_cifs_storage_task_id'), ['cifs_storage_task_id'], unique=False)
batch_op.drop_index('sharing_cifs_share_d7a6a3ae')
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_storage_replication_repl_ssh_credentials_id'), ['repl_ssh_credentials_id'], unique=False)
batch_op.drop_index('storage_replication_d46a5b35')
with op.batch_alter_table('system_acmeregistrationbody', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_system_acmeregistrationbody_acme_id'), ['acme_id'], unique=False)
batch_op.drop_index('system_acmeregistrationbody_1ece6752')
with op.batch_alter_table('system_certificate', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_system_certificate_cert_acme_id'), ['cert_acme_id'], unique=False)
batch_op.create_index(batch_op.f('ix_system_certificate_cert_signedby_id'), ['cert_signedby_id'], unique=False)
batch_op.drop_index('system_certificate_8dc6a655')
batch_op.drop_index('system_certificate_c172260b')
with op.batch_alter_table('system_certificateauthority', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_system_certificateauthority_cert_signedby_id'), ['cert_signedby_id'], unique=False)
batch_op.drop_index('system_certificateauthority_c172260b')
with op.batch_alter_table('system_settings', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_system_settings_stg_guicertificate_id'), ['stg_guicertificate_id'], unique=False)
batch_op.drop_index('system_settings_cf5c60c6')
with op.batch_alter_table('tasks_cloudsync', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_tasks_cloudsync_credential_id'), ['credential_id'], unique=False)
batch_op.drop_index('tasks_cloudsync_3472cfe9')
with op.batch_alter_table('vm_device', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_vm_device_vm_id'), ['vm_id'], unique=False)
batch_op.drop_index('vm_device_0e0cecb8')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('vm_device', schema=None) as batch_op:
batch_op.create_index('vm_device_0e0cecb8', ['vm_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_vm_device_vm_id'))
with op.batch_alter_table('tasks_cloudsync', schema=None) as batch_op:
batch_op.create_index('tasks_cloudsync_3472cfe9', ['credential_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_tasks_cloudsync_credential_id'))
with op.batch_alter_table('system_settings', schema=None) as batch_op:
batch_op.create_index('system_settings_cf5c60c6', ['stg_guicertificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_system_settings_stg_guicertificate_id'))
with op.batch_alter_table('system_certificateauthority', schema=None) as batch_op:
batch_op.create_index('system_certificateauthority_c172260b', ['cert_signedby_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_system_certificateauthority_cert_signedby_id'))
with op.batch_alter_table('system_certificate', schema=None) as batch_op:
batch_op.create_index('system_certificate_c172260b', ['cert_signedby_id'], unique=False)
batch_op.create_index('system_certificate_8dc6a655', ['cert_acme_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_system_certificate_cert_signedby_id'))
batch_op.drop_index(batch_op.f('ix_system_certificate_cert_acme_id'))
with op.batch_alter_table('system_acmeregistrationbody', schema=None) as batch_op:
batch_op.create_index('system_acmeregistrationbody_1ece6752', ['acme_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_system_acmeregistrationbody_acme_id'))
with op.batch_alter_table('storage_replication', schema=None) as batch_op:
batch_op.create_index('storage_replication_d46a5b35', ['repl_ssh_credentials_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_storage_replication_repl_ssh_credentials_id'))
with op.batch_alter_table('sharing_cifs_share', schema=None) as batch_op:
batch_op.create_index('sharing_cifs_share_d7a6a3ae', ['cifs_storage_task_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_sharing_cifs_share_cifs_storage_task_id'))
with op.batch_alter_table('services_s3', schema=None) as batch_op:
batch_op.create_index('services_s3_3f8aa88e', ['s3_certificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_s3_s3_certificate_id'))
with op.batch_alter_table('services_iscsitargettoextent', schema=None) as batch_op:
batch_op.create_index('services_iscsitargettoextent_8c3551d7', ['iscsi_extent_id'], unique=False)
batch_op.create_index('services_iscsitargettoextent_74972900', ['iscsi_target_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_iscsitargettoextent_iscsi_target_id'))
batch_op.drop_index(batch_op.f('ix_services_iscsitargettoextent_iscsi_extent_id'))
with op.batch_alter_table('services_iscsitargetportalip', schema=None) as batch_op:
batch_op.create_index('services_iscsitargetportalip_fe35c684', ['iscsi_target_portalip_portal_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_iscsitargetportalip_iscsi_target_portalip_portal_id'))
with op.batch_alter_table('services_iscsitargetgroups', schema=None) as batch_op:
batch_op.create_index('services_iscsitargetgroups_dcc120ea', ['iscsi_target_portalgroup_id'], unique=False)
batch_op.create_index('services_iscsitargetgroups_c939c4d7', ['iscsi_target_id'], unique=False)
batch_op.create_index('services_iscsitargetgroups_39e2d7df', ['iscsi_target_initiatorgroup_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_portalgroup_id'))
batch_op.drop_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_initiatorgroup_id'))
batch_op.drop_index(batch_op.f('ix_services_iscsitargetgroups_iscsi_target_id'))
with op.batch_alter_table('services_ftp', schema=None) as batch_op:
batch_op.create_index('services_ftp_f897b229', ['ftp_ssltls_certificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_ftp_ftp_ssltls_certificate_id'))
with op.batch_alter_table('services_fibrechanneltotarget', schema=None) as batch_op:
batch_op.create_index('services_fiberchanneltotarget_1d6856ca', ['fc_target_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_services_fibrechanneltotarget_fc_target_id'))
with op.batch_alter_table('network_lagginterfacemembers', schema=None) as batch_op:
batch_op.create_index('network_lagginterfacemembers_14f52ba0', ['lagg_interfacegroup_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_network_lagginterfacemembers_lagg_interfacegroup_id'))
with op.batch_alter_table('network_alias', schema=None) as batch_op:
batch_op.create_index('network_alias_5f318ef4', ['alias_interface_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_network_alias_alias_interface_id'))
with op.batch_alter_table('directoryservice_ldap', schema=None) as batch_op:
batch_op.create_index('directoryservice_ldap_c6ef382f', ['ldap_certificate_id'], unique=False)
batch_op.create_index('directoryservice_ldap_9a19be3d', ['ldap_kerberos_realm_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_directoryservice_ldap_ldap_kerberos_realm_id'))
batch_op.drop_index(batch_op.f('ix_directoryservice_ldap_ldap_certificate_id'))
with op.batch_alter_table('directoryservice_idmap_rfc2307', schema=None) as batch_op:
batch_op.create_index('directoryservice_idmap_rfc2307_869bf111', ['idmap_rfc2307_certificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_directoryservice_idmap_rfc2307_idmap_rfc2307_certificate_id'))
with op.batch_alter_table('directoryservice_idmap_ldap', schema=None) as batch_op:
batch_op.create_index('directoryservice_idmap_ldap_592ad9d0', ['idmap_ldap_certificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_directoryservice_idmap_ldap_idmap_ldap_certificate_id'))
with op.batch_alter_table('directoryservice_activedirectory', schema=None) as batch_op:
batch_op.create_index('directoryservice_activedirectory_b03e01d8', ['ad_kerberos_realm_id'], unique=False)
batch_op.create_index('directoryservice_activedirectory_a4250fac', ['ad_certificate_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_directoryservice_activedirectory_ad_kerberos_realm_id'))
batch_op.drop_index(batch_op.f('ix_directoryservice_activedirectory_ad_certificate_id'))
with op.batch_alter_table('account_bsdusers', schema=None) as batch_op:
batch_op.create_index('account_bsdusers_30f2801f', ['bsdusr_group_id'], unique=False)
batch_op.drop_index(batch_op.f('ix_account_bsdusers_bsdusr_group_id'))
# ### end Alembic commands ###
| 14,845 | Python | .py | 178 | 75.640449 | 159 | 0.725917 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,795 | 2020-03-24_11-29_api_key.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-03-24_11-29_api_key.py | """API Key
Revision ID: 6602857581db
Revises: a06b1946f6a3
Create Date: 2020-03-24 11:29:26.865110+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6602857581db'
down_revision = 'a06b1946f6a3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('account_api_key',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=False),
sa.Column('key', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_account_api_key'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('account_api_key')
# ### end Alembic commands ###
| 846 | Python | .py | 25 | 30.76 | 66 | 0.697417 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,796 | 2020-05-13_12-08_encrypt-smbhash.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-05-13_12-08_encrypt-smbhash.py | """encrypt smbhash
Revision ID: 434ea5397cd3
Revises: 22230265ab30
Create Date: 2020-05-13 12:08:48.976300+00:00
"""
from alembic import op
import sqlalchemy as sa
from middlewared.plugins.pwenc import encrypt
# revision identifiers, used by Alembic.
revision = '434ea5397cd3'
down_revision = '22230265ab30'
branch_labels = None
depends_on = None
def upgrade():
table = "account_bsdusers"
conn = op.get_bind()
for row in conn.execute(f"SELECT id, bsdusr_smbhash FROM {table} WHERE bsdusr_unixhash != '*'").fetchall():
encrypted_hash = encrypt(row["bsdusr_smbhash"])
conn.execute(f"UPDATE {table} SET bsdusr_smbhash = ? WHERE id = {row['id']}", encrypted_hash)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 824 | Python | .py | 23 | 32.695652 | 111 | 0.718434 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,797 | 2020-05-08_15-18_birthday.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-05-08_15-18_birthday.py | """birthday
Revision ID: a3ac49efb063
Revises: e9e611d7c5a7
Create Date: 2020-05-08 15:18:35.333997+00:00
"""
import os
from alembic import op
import sqlalchemy as sa
from datetime import datetime
# revision identifiers, used by Alembic.
revision = 'a3ac49efb063'
down_revision = 'e9e611d7c5a7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_settings', schema=None) as batch_op:
batch_op.add_column(sa.Column('stg_birthday', sa.DateTime(), nullable=True))
if os.environ.get("FREENAS_INSTALL") != "yes":
op.execute("UPDATE system_settings SET stg_birthday = '" + str(datetime(1970, 1, 1, 0, 0, 0)) + "'")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('system_settings', schema=None) as batch_op:
batch_op.drop_column('stg_birthday')
# ### end Alembic commands ###
| 1,018 | Python | .py | 26 | 35.769231 | 108 | 0.702648 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,798 | 2021-12-22_12-59_add_server_tls_uri_to_s3_config.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2021-12-22_12-59_add_server_tls_uri_to_s3_config.py | """Add tls_server_uri to s3 config
Revision ID: 9c11f6c6f152
Revises: fee786dfe121
Create Date: 2021-12-22 12:59:17.737066+00:00
"""
import re
from alembic import op
import sqlalchemy as sa
from OpenSSL import crypto
# revision identifiers, used by Alembic.
revision = '9c11f6c6f152'
down_revision = 'fee786dfe121'
branch_labels = None
depends_on = None
# Pattern is taken from middlewared.validators.Hostname
hostname_re = re.compile(r'^[a-z\.\-0-9]*[a-z0-9]$', flags=re.IGNORECASE)
def is_valid_hostname(hostname: str):
"""
Validates hostname and makes sure it
does not contain a wild card.
"""
return hostname_re.match(hostname)
def upgrade():
with op.batch_alter_table('services_s3', schema=None) as batch_op:
batch_op.add_column(sa.Column('s3_tls_server_uri', sa.String(length=128), nullable=True))
# Try to get tls_server_uri in following order:
# 1. SAN from certificate
# 2. Common name from certificate
# 3. Fallback to localhost
conn = op.get_bind()
if s3_conf := conn.execute("SELECT s3_certificate_id FROM services_s3 WHERE s3_certificate_id IS NOT NULL").fetchone():
if cert_data := conn.execute("SELECT cert_certificate FROM system_certificate WHERE id = :cert_id", cert_id=s3_conf[0]).fetchone():
s3_tls_server_uri = 'localhost'
try:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data[0])
cert_cn = cert.get_subject().CN
if cert_cn and is_valid_hostname(cert_cn):
s3_tls_server_uri = cert_cn
cert_sans = []
for ext in filter(lambda e: e.get_short_name().decode() != 'UNDEF', (
map(lambda i: cert.get_extension(i), range(cert.get_extension_count()))
if isinstance(cert, crypto.X509)
else cert.get_extensions()
)):
if 'subjectAltName' == ext.get_short_name().decode():
cert_sans = [s.strip() for s in ext.__str__().split(',') if s]
for cert_san in cert_sans:
san = cert_san.split(':')[-1].strip()
if san and is_valid_hostname(san):
s3_tls_server_uri = san
break
except Exception:
pass
conn.execute(
"UPDATE services_s3 SET s3_tls_server_uri = :s3_tls_server_uri",
s3_tls_server_uri=s3_tls_server_uri
)
def downgrade():
with op.batch_alter_table('services_s3', schema=None) as batch_op:
batch_op.drop_column('s3_tls_server_uri')
| 2,674 | Python | .py | 60 | 34.866667 | 139 | 0.603846 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,799 | 2020-06-24_17-14_cleanup-ad-parameters.py | truenas_middleware/src/middlewared/middlewared/alembic/versions/12.0/2020-06-24_17-14_cleanup-ad-parameters.py | """cleanup AD parameters
Revision ID: 25962b409a1e
Revises: 71a8d1e504a7
Create Date: 2020-06-24 17:14:26.706480+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '25962b409a1e'
down_revision = '71a8d1e504a7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('directoryservice_activedirectory', schema=None) as batch_op:
batch_op.drop_index('ix_directoryservice_activedirectory_ad_certificate_id')
batch_op.drop_column('ad_ldap_sasl_wrapping')
batch_op.drop_column('ad_certificate_id')
batch_op.drop_column('ad_ssl')
batch_op.drop_column('ad_validate_certificates')
# ### end Alembic commands ###
| 807 | Python | .py | 21 | 34.619048 | 91 | 0.732991 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |