id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,100 | hostname.py | truenas_middleware/src/middlewared/middlewared/etc_files/hostname.py | from socket import sethostname
from middlewared.service import CallError
def render(service, middleware):
hostname = middleware.call_sync("network.configuration.config")['hostname_local']
with open("/etc/hostname", "w") as f:
f.write(hostname)
# set the new hostname in kernel
try:
sethostname(hostname)
except Exception as e:
raise CallError(f'Failed to set hostname: {e}')
| 424 | Python | .py | 11 | 33.090909 | 85 | 0.715686 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,101 | pam_tdb.py | truenas_middleware/src/middlewared/middlewared/etc_files/pam_tdb.py | from middlewared.utils.user_api_key import (
UserApiKey,
PamTdbEntry,
flush_user_api_keys
)
def convert_keys(username, keys) -> PamTdbEntry:
user_api_keys = []
for key in keys:
if key['expires_at'] is None:
expiry = 0
elif key['revoked']:
# Backstop. We filter these out when we etc.generate, but we don't
# want to have an avenue to accidentally insert revoked keys.
continue
else:
expiry = int(key['expires_at'].timestamp())
user_api_keys.append(UserApiKey(
expiry=expiry,
dbid=key['id'],
userhash=key['keyhash']
))
return PamTdbEntry(
username=username,
keys=user_api_keys
)
def render(service, middleware, render_ctx):
api_keys = render_ctx['api_key.query']
entries = {}
pdb_entries = []
for key in api_keys:
if key['username'] not in entries:
entries[key['username']] = [key]
else:
entries[key['username']].append(key)
for user, keys in entries.items():
entry = convert_keys(user, keys)
pdb_entries.append(entry)
flush_user_api_keys(pdb_entries)
| 1,220 | Python | .py | 38 | 24.026316 | 78 | 0.59335 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,102 | udev.py | truenas_middleware/src/middlewared/middlewared/etc_files/udev.py | import pathlib
def render(service, middleware):
path = pathlib.Path("/etc/udev/rules.d")
for f in path.iterdir():
if f.is_file():
f.unlink()
for tunable in middleware.call_sync("tunable.query", [["type", "=", "UDEV"], ["enabled", "=", True]]):
(path / f"{tunable['var']}.rules").write_text(tunable["value"] + "\n")
| 359 | Python | .py | 8 | 38.375 | 106 | 0.585014 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,103 | krb5.keytab.py | truenas_middleware/src/middlewared/middlewared/etc_files/krb5.keytab.py | import logging
import os
import base64
import subprocess
import stat
from contextlib import suppress
logger = logging.getLogger(__name__)
kdir = "/etc/kerberos"
keytabfile = "/etc/krb5.keytab"
unified_keytab = os.path.join(kdir, 'tmp_keytab')
def mit_copy(temp_keytab):
kt_copy = subprocess.run(
['ktutil'],
input=f'rkt {temp_keytab}\nwkt {unified_keytab}'.encode(),
capture_output=True
)
if kt_copy.stderr:
logger.error("%s: failed to add to uinified keytab: %s",
temp_keytab, kt_copy.stderr.decode())
def write_keytab(db_keytabname, db_keytabfile):
dirfd = None
def opener(path, flags):
return os.open(path, flags, mode=0o600, dir_fd=dirfd)
with suppress(FileExistsError):
os.mkdir(kdir, mode=0o700)
try:
dirfd = os.open(kdir, os.O_DIRECTORY)
st = os.fstat(dirfd)
if stat.S_IMODE(st.st_mode) != 0o700:
os.fchmod(dirfd, 0o700)
with open(db_keytabname, "wb", opener=opener) as f:
f.write(db_keytabfile)
kt_name = os.readlink(f'/proc/self/fd/{f.fileno()}')
mit_copy(kt_name)
os.remove(db_keytabname, dir_fd=dirfd)
finally:
os.close(dirfd)
def render(service, middleware, render_ctx):
keytabs = middleware.call_sync('kerberos.keytab.query')
if not keytabs:
logger.trace('No keytabs in configuration database, skipping keytab generation')
return
for keytab in keytabs:
db_keytabfile = base64.b64decode(keytab['file'].encode())
db_keytabname = f'keytab_{keytab["id"]}'
write_keytab(db_keytabname, db_keytabfile)
with open(unified_keytab, 'rb') as f:
keytab_bytes = f.read()
os.unlink(unified_keytab)
return keytab_bytes
| 1,798 | Python | .py | 50 | 29.32 | 88 | 0.652802 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,104 | libvirt.py | truenas_middleware/src/middlewared/middlewared/etc_files/libvirt.py | import os
from middlewared.utils.io import write_if_changed
LIBVIRTD_CONF_PATH = '/etc/libvirt/libvirtd.conf'
def render(service, middleware):
os.makedirs('/run/truenas_libvirt', exist_ok=True)
write_if_changed(LIBVIRTD_CONF_PATH, 'unix_sock_dir = "/run/truenas_libvirt"')
| 286 | Python | .py | 6 | 44.5 | 82 | 0.763636 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,105 | grub.py | truenas_middleware/src/middlewared/middlewared/etc_files/grub.py | import subprocess
from middlewared.utils import run
async def render(service, middleware):
try:
await run(["truenas-grub.py"], encoding="utf-8", errors="ignore")
except subprocess.CalledProcessError as e:
middleware.logger.error("truenas-grub.py error:\n%s", e.stderr)
raise
try:
await run(["update-grub"], encoding="utf-8", errors="ignore")
except subprocess.CalledProcessError as e:
middleware.logger.error("update-grub.py error:\n%s", e.stderr)
raise
| 522 | Python | .py | 13 | 33.923077 | 73 | 0.689109 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,106 | localtime_config.py | truenas_middleware/src/middlewared/middlewared/etc_files/localtime_config.py | import contextlib
import os
import subprocess
def localtime_configuration(middleware):
system_config = middleware.call_sync('system.general.config')
if not system_config['timezone']:
system_config['timezone'] = 'America/Los_Angeles'
with contextlib.suppress(OSError):
os.unlink('/etc/localtime')
os.symlink(os.path.join('/usr/share/zoneinfo', system_config['timezone']), '/etc/localtime')
cp = subprocess.Popen(
['systemctl', 'daemon-reload'], stdout=subprocess.DEVNULL, stderr=subprocess.PIPE
)
stderr = cp.communicate()[1]
if cp.returncode:
middleware.logger.error(
'Failed to reload systemctl daemon after timezone configuration: %s', stderr.decode()
)
def render(service, middleware):
localtime_configuration(middleware)
| 818 | Python | .py | 20 | 35.25 | 97 | 0.711223 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,107 | fstab_configure.py | truenas_middleware/src/middlewared/middlewared/etc_files/fstab_configure.py | import subprocess
def fstab_configuration(middleware):
for command in [
['systemctl', 'daemon-reload'],
['systemctl', 'restart', 'local-fs.target'],
]:
ret = subprocess.run(command, capture_output=True)
if ret.returncode:
middleware.logger.debug(f'Failed to execute "{" ".join(command)}": {ret.stderr.decode()}')
def render(service, middleware):
fstab_configuration(middleware)
| 438 | Python | .py | 11 | 33.363636 | 102 | 0.664303 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,108 | truenas_nvdimm.py | truenas_middleware/src/middlewared/middlewared/etc_files/truenas_nvdimm.py | from subprocess import run
def render(service, middleware):
try:
run(['truenas-nvdimm.py'], check=True)
except Exception as e:
middleware.logger.error("truenas-nvdimm.py error:\n%s", e)
raise
| 226 | Python | .py | 7 | 26.428571 | 66 | 0.672811 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,109 | fips.py | truenas_middleware/src/middlewared/middlewared/etc_files/fips.py | import subprocess
def render(service, middleware):
try:
subprocess.run(['configure_fips'], capture_output=True, check=True)
except subprocess.CalledProcessError as e:
middleware.logger.error('configure_fips error:\n%s', e.stderr)
raise
| 270 | Python | .py | 7 | 32.714286 | 75 | 0.716475 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,110 | generate_ssl_certs.py | truenas_middleware/src/middlewared/middlewared/etc_files/generate_ssl_certs.py | import itertools
import os
import shutil
import subprocess
from middlewared.main import Middleware
from middlewared.service import CallError, Service
def write_certificates(certs: list) -> set:
expected_files = set()
for cert in certs:
if cert['chain_list']:
expected_files.add(cert['certificate_path'])
with open(cert['certificate_path'], 'w') as f:
f.write('\n'.join(cert['chain_list']))
if cert['privatekey']:
expected_files.add(cert['privatekey_path'])
with open(cert['privatekey_path'], 'w') as f:
os.fchmod(f.fileno(), 0o400)
f.write(cert['privatekey'])
if cert['type'] & 0x20 and cert['CSR']:
expected_files.add(cert['csr_path'])
with open(cert['csr_path'], 'w') as f:
f.write(cert['CSR'])
# trusted_cas_path is a ZFS dataset mountpoint and so it does
# not need to be recreated after the rmtree. This call is simply
# to forcibly remove all locally-added CAs.
trusted_cas_path = '/var/local/ca-certificates'
shutil.rmtree(trusted_cas_path, ignore_errors=True)
for cert in filter(lambda c: c['chain_list'] and c['add_to_trusted_store'], certs):
cert_type = 'ca' if cert['cert_type'] == 'CA' else 'cert'
with open(os.path.join(trusted_cas_path, f'{cert_type}_{cert["name"]}.crt'), 'w') as f:
f.write('\n'.join(cert['chain_list']))
cp = subprocess.Popen('update-ca-certificates', stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
err = cp.communicate()[1]
if cp.returncode:
raise CallError(f'Failed to update system\'s trusted certificate store: {err.decode()}')
return expected_files
def write_crls(cas: list, middleware: Middleware) -> set:
expected_files = set()
for ca in cas:
crl = middleware.call_sync(
'cryptokey.generate_crl',
ca, list(
filter(
lambda cert: cert['revoked_date'],
middleware.call_sync(
'certificateauthority.get_ca_chain', ca['id']
)
)
)
)
if crl:
expected_files.add(ca['crl_path'])
with open(ca['crl_path'], 'w') as f:
f.write(crl)
return expected_files
def render(service: Service, middleware: Middleware) -> None:
os.makedirs('/etc/certificates', 0o755, exist_ok=True)
os.makedirs('/etc/certificates/CA', 0o755, exist_ok=True)
expected_files = {'/etc/certificates/CA'}
certs = middleware.call_sync('certificate.query')
cas = middleware.call_sync('certificateauthority.query')
expected_files |= write_certificates(certs + cas)
expected_files |= write_crls(cas, middleware)
# We would like to remove certificates which have been deleted
found_files = set(itertools.chain(
map(lambda f: '/etc/certificates/' + f, os.listdir('/etc/certificates')),
map(lambda f: '/etc/certificates/CA/' + f, os.listdir('/etc/certificates/CA'))
))
for to_remove in found_files - expected_files:
if os.path.isdir(to_remove):
shutil.rmtree(to_remove)
else:
os.unlink(to_remove)
| 3,275 | Python | .py | 73 | 35.808219 | 102 | 0.614878 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,111 | smartd.py | truenas_middleware/src/middlewared/middlewared/etc_files/smartd.py | import logging
import re
import shlex
import subprocess
import json
from middlewared.common.smart.smartctl import get_smartctl_args, smartctl, SMARTCTX
from middlewared.plugins.smart_.schedule import SMARTD_SCHEDULE_PIECES, smartd_schedule_piece
from middlewared.schema import Cron
from middlewared.utils.asyncio_ import asyncio_map
logger = logging.getLogger(__name__)
async def annotate_disk_for_smart(context, disk, smartoptions):
if args := await get_smartctl_args(context, disk, smartoptions):
if context.enterprise_hardware or await ensure_smart_enabled(args):
args.extend(["-a"])
args.extend(["-d", "removable"])
return disk, dict(smartctl_args=args)
async def ensure_smart_enabled(args):
if any(arg.startswith("/dev/nvme") for arg in args):
return True
p = await smartctl(args + ["-i", "--json=c"], check=False, stderr=subprocess.STDOUT, encoding="utf8", errors="ignore")
pjson = json.loads(p.stdout)
if not pjson["smart_support"]["available"]:
logger.debug("SMART is not supported on %r", args)
return False
if pjson["smart_support"]["enabled"]:
return True
p = await smartctl(args + ["-s", "on"], check=False, stderr=subprocess.STDOUT)
if p.returncode == 0:
return True
else:
logger.debug("Unable to enable smart on %r", args)
return False
def get_smartd_config(disk):
args = shlex.join(disk["smartctl_args"])
critical = disk['smart_critical'] if disk['disk_critical'] is None else disk['disk_critical']
difference = disk['smart_difference'] if disk['disk_difference'] is None else disk['disk_difference']
informational = disk['smart_informational'] if disk['disk_informational'] is None else disk['disk_informational']
config = f"{args} -n {disk['smart_powermode']} -W {difference}," \
f"{informational},{critical}"
config += " -m root -M exec /usr/local/libexec/smart_alert.py"
if disk.get('smarttest_type'):
config += f"\\\n-s {disk['smarttest_type']}/" + get_smartd_schedule(disk) + "\\\n"
return config
def get_smartd_schedule(disk):
return "/".join([
smartd_schedule_piece(disk["smarttest_schedule"][piece.key], piece.min, piece.max, piece.enum, piece.map)
for piece in SMARTD_SCHEDULE_PIECES
])
def write_config(config):
with open("/etc/smartd.conf", "w") as f:
f.write(config)
async def render(service, middleware):
smart_config = await middleware.call("datastore.query", "services.smart", [], {"get": True})
disks = await middleware.call("datastore.sql", """
SELECT *
FROM storage_disk d
LEFT JOIN tasks_smarttest_smarttest_disks sd ON sd.disk_id = d.disk_identifier
LEFT JOIN tasks_smarttest s ON s.id = sd.smarttest_id OR s.smarttest_all_disks = true
WHERE disk_togglesmart = 1 AND disk_expiretime IS NULL AND disk_name NOT LIKE 'pmem%'
""")
if await middleware.call("failover.licensed") and (await middleware.call("failover.status") != "MASTER"):
# If failover is licensed and we are not a `MASTER` node, only monitor boot pool disks to avoid
# reservation conflicts
boot_pool_disks = set(await middleware.call("boot.get_disks"))
disks = [disk for disk in disks if disk["disk_name"] in boot_pool_disks]
disks = [dict(disk, **smart_config) for disk in disks]
for disk in disks:
Cron.convert_db_format_to_schedule(disk, "smarttest_schedule", "smarttest_")
devices = await middleware.call("device.get_disks")
hardware = await middleware.call("truenas.is_ix_hardware")
context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=middleware)
annotated = dict(filter(None, await asyncio_map(
lambda disk: annotate_disk_for_smart(context, disk["disk_name"], disk["disk_smartoptions"]),
[disk for disk in disks if disk["disk_name"] is not None],
16
)))
disks = [dict(disk, **annotated[disk["disk_name"]]) for disk in disks if disk["disk_name"] in annotated]
config = ""
for disk in disks:
config += get_smartd_config(disk) + "\n"
await middleware.run_in_thread(write_config, config)
| 4,238 | Python | .py | 81 | 46.08642 | 122 | 0.684262 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,112 | systemd.py | truenas_middleware/src/middlewared/middlewared/etc_files/systemd.py | import json
import re
import os
from middlewared.service import CallError
from middlewared.utils import run
RE_IS_NOT_A_NATIVE_SERVICE = re.compile(r"(.+)\.service is not a native service, redirecting to systemd-sysv-install\.")
async def render(service, middleware):
services = []
services_enabled = {}
for service in await middleware.call("datastore.query", "services.services", [], {"prefix": "srv_"}):
for unit in await middleware.call("service.systemd_units", service["service"]):
services.append(unit)
services_enabled[unit] = service["enable"]
p = await run(["systemctl", "is-enabled"] + services, check=False, encoding="utf-8", errors="ignore")
are_enabled = p.stdout.strip().split()
if len(are_enabled) != len(services):
raise CallError(p.stderr.strip())
# sysv inits are handled first by systemd
# https://github.com/systemd/systemd/blob/161bc1b62777b3f32ce645a8e128007a654a2300/src/systemctl/systemctl.c#L7093
services_native = []
for line in p.stderr.splitlines():
if m := RE_IS_NOT_A_NATIVE_SERVICE.match(line):
service = m.group(1)
services.remove(service)
services_native.append(service)
services = services_native + services
for service, is_enabled in zip(services, are_enabled):
enable = services_enabled[service]
is_enabled = {"enabled": True, "disabled": False}[is_enabled]
if enable != is_enabled:
await run(["systemctl", "enable" if enable else "disable", service])
# Write out a user enabled services to json file which shows which services user has enabled/disabled
with open('/data/user-services.json', 'w') as f:
os.fchmod(f.fileno(), 0o600)
f.write(json.dumps(services_enabled))
| 1,805 | Python | .py | 35 | 44.857143 | 120 | 0.682747 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,113 | ca.crt.py | truenas_middleware/src/middlewared/middlewared/etc_files/ipa/ca.crt.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils.directoryservices.ipa_constants import IpaConfigName
from middlewared.utils.directoryservices.constants import DSType
def render(service, middleware, render_ctx):
if render_ctx['directoryservices.status']['type'] != DSType.IPA.value:
raise FileShouldNotExist()
cert = middleware.call_sync('certificateauthority.query', [[
'name', '=', IpaConfigName.IPA_CACERT.value
]])
if not cert:
raise FileShouldNotExist()
return cert[0]['certificate']
| 568 | Python | .py | 12 | 42.166667 | 75 | 0.758182 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,114 | default_conf.py | truenas_middleware/src/middlewared/middlewared/etc_files/ipa/default_conf.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils.directoryservices.ipa import generate_ipa_default_config
from middlewared.utils.directoryservices.constants import DSType
def render(service, middleware, render_ctx):
if render_ctx['directoryservices.status']['type'] != DSType.IPA.value:
raise FileShouldNotExist()
conf = middleware.call_sync('ldap.ipa_config')
return generate_ipa_default_config(
conf['host'],
conf['basedn'],
conf['domain'],
conf['realm'],
conf['target_server']
)
| 579 | Python | .py | 14 | 35.5 | 79 | 0.721925 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,115 | smb.keytab.py | truenas_middleware/src/middlewared/middlewared/etc_files/ipa/smb.keytab.py | from base64 import b64decode
from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils.directoryservices.ipa_constants import IpaConfigName
from middlewared.utils.directoryservices.constants import DSType
def render(service, middleware, render_ctx):
if render_ctx['directoryservices.status']['type'] != DSType.IPA.value:
raise FileShouldNotExist()
kt = middleware.call_sync('kerberos.keytab.query', [[
'name', '=', IpaConfigName.IPA_SMB_KEYTAB.value
]])
if not kt:
raise FileShouldNotExist()
return b64decode(kt[0]['file'])
| 595 | Python | .py | 13 | 40.846154 | 75 | 0.756522 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,116 | daemon.json.py | truenas_middleware/src/middlewared/middlewared/etc_files/docker/daemon.json.py | import json
import os
import subprocess
from middlewared.plugins.etc import FileShouldNotExist
from middlewared.plugins.docker.state_utils import IX_APPS_MOUNT_PATH
from middlewared.utils.gpu import get_nvidia_gpus
def render(service, middleware):
config = middleware.call_sync('docker.config')
if not config['pool']:
raise FileShouldNotExist()
# We need to do this so that proxy changes are respected by systemd on docker daemon start
subprocess.run(['systemctl', 'daemon-reload'], capture_output=True, check=True)
os.makedirs('/etc/docker', exist_ok=True)
data_root = os.path.join(IX_APPS_MOUNT_PATH, 'docker')
base = {
'data-root': data_root,
'exec-opts': ['native.cgroupdriver=cgroupfs'],
'iptables': True,
'storage-driver': 'overlay2',
'default-address-pools': config['address_pools'],
}
isolated = middleware.call_sync('system.advanced.config')['isolated_gpu_pci_ids']
for gpu in filter(lambda x: x not in isolated, get_nvidia_gpus()):
base.update({
'runtimes': {
'nvidia': {
'path': '/usr/bin/nvidia-container-runtime',
'runtimeArgs': []
}
},
'default-runtime': 'nvidia',
})
break
return json.dumps(base)
| 1,342 | Python | .py | 34 | 31.705882 | 94 | 0.638249 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,117 | DEV_INFO.service.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/avahi/services/DEV_INFO.service.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils import mdns
"""
Device Info:
-------------------------
The TXTRecord string here determines the icon that will be displayed in Finder on MacOS
clients. Default is to use MacRack which will display the icon for a rackmounted server.
"""
def render(service, middleware, render_ctx):
try:
return mdns.generate_avahi_srv_record(
'DEV_INFO', txt_records=[f'model={mdns.DevType.MACPRORACK}']
)
except Exception:
middleware.logger.error(
'Failed to generate mDNS SRV record for the DEV_INFO service',
exc_info=True
)
raise FileShouldNotExist()
| 700 | Python | .py | 19 | 31.368421 | 88 | 0.690828 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,118 | HTTP.service.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/avahi/services/HTTP.service.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils import mdns
def render(service, middleware, render_ctx):
conf = render_ctx['system.general.config']
if conf['ui_address'][0] != '0.0.0.0':
iindexes = mdns.ip_addresses_to_interface_indexes(
render_ctx['interface.query'], conf['ui_address']
)
else:
iindexes = None
try:
return mdns.generate_avahi_srv_record(
'HTTP', iindexes, custom_port=conf['ui_port']
)
except Exception:
middleware.logger.error(
'Failed to generate mDNS SRV record for the HTTP service',
exc_info=True
)
raise FileShouldNotExist()
| 712 | Python | .py | 20 | 27.95 | 70 | 0.643377 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,119 | nut.service.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/avahi/services/nut.service.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils import mdns
def render(service, middleware, render_ctx):
conf = render_ctx['ups.config']
if not render_ctx['ups.service.started_or_enabled']:
raise FileShouldNotExist()
try:
return mdns.generate_avahi_srv_record('NUT', custom_port=conf['remoteport'])
except Exception:
middleware.logger.error(
'Failed to generate mDNS SRV record for the nut service',
exc_info=True
)
raise FileShouldNotExist()
| 554 | Python | .py | 14 | 32.785714 | 84 | 0.702804 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,120 | ADISK.service.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/avahi/services/ADISK.service.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils import mdns
"""
Time Machine (adisk):
-------------------------
sys=adVF=0x100 -- this is required when _adisk._tcp is present on device. When it is
set, the MacOS client will send a NetShareEnumAll IOCTL and shares will be visible.
Otherwise, Finder will only see the Time Machine share. In the absence of _adisk._tcp
MacOS will _always_ send NetShareEnumAll IOCTL.
waMa=0 -- MacOS server uses waMa=0, while embedded devices have it set to their Mac Address.
Speculation in Samba-Technical indicates that this stands for "Wireless ADisk Mac Address".
adVU -- ADisk Volume UUID.
dk(n)=adVF=
0xa1, 0x81 - AFP support
0xa2, 0x82 - SMB support
0xa3, 0x83 - AFP and SMB support
adVN -- AirDisk Volume Name. We set this to the share name.
network analysis indicates that current MacOS Time Machine shares set the port for adisk to 311.
"""
def render(service, middleware, render_ctx):
conf = render_ctx['smb.config']
if not render_ctx['service.started_or_enabled']:
raise FileShouldNotExist()
shares = middleware.call_sync('sharing.smb.query', [
['OR', [['purpose', 'in', ['TIMEMACHINE', 'ENHANCED_TIMEMACHINE']], ['timemachine', '=', True]]],
['enabled', '=', True], ['locked', '=', False]
])
if not shares:
raise FileShouldNotExist()
if conf['bindip']:
iindexes = mdns.ip_addresses_to_interface_indexes(
render_ctx['interface.query'], conf['bindip']
)
else:
iindexes = None
txt_records = ['sys=waMa=0,adVF=0x100']
for dkno, share in enumerate(shares):
txt_records.append(
f'dk{dkno}=adVN={share["name"]},adVF=0x82,adVU={share["vuid"]}'
)
try:
return mdns.generate_avahi_srv_record('ADISK', iindexes, txt_records=txt_records)
except Exception:
middleware.logger.error(
'Failed to generate mDNS SRV record for the ADISK service',
exc_info=True
)
raise FileShouldNotExist()
| 2,053 | Python | .py | 48 | 37.458333 | 105 | 0.68191 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,121 | SMB.service.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/avahi/services/SMB.service.py | from middlewared.plugins.etc import FileShouldNotExist
from middlewared.utils import mdns
def render(service, middleware, render_ctx):
conf = render_ctx['smb.config']
if not render_ctx['service.started_or_enabled']:
raise FileShouldNotExist()
if conf['bindip']:
iindexes = mdns.ip_addresses_to_interface_indexes(
render_ctx['interface.query'], conf['bindip']
)
else:
iindexes = None
try:
return mdns.generate_avahi_srv_record('SMB', iindexes)
except Exception:
middleware.logger.error(
'Failed to generate mDNS SRV record for the SMB service',
exc_info=True
)
raise FileShouldNotExist()
| 713 | Python | .py | 20 | 28.35 | 69 | 0.668122 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,122 | config.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/ssh/config.py | import base64
import os
import re
import stat
import shutil
from contextlib import suppress
SSH_CONFIG_PATH = '/etc/ssh'
SSH_KEYS = [
'ssh_host_key', 'ssh_host_key.pub', 'ssh_host_dsa_key', 'ssh_host_dsa_key.pub', 'ssh_host_dsa_key-cert.pub',
'ssh_host_ecdsa_key', 'ssh_host_ecdsa_key.pub', 'ssh_host_ecdsa_key-cert.pub', 'ssh_host_rsa_key',
'ssh_host_rsa_key.pub', 'ssh_host_rsa_key-cert.pub', 'ssh_host_ed25519_key', 'ssh_host_ed25519_key.pub',
'ssh_host_ed25519_key-cert.pub'
]
DEFAULT_FILES = ['moduli', 'ssh_config', 'ssh_config.d', 'sshd_config', 'sshd_config.d']
def generate_ssh_config(middleware, ssh_config, dirfd):
mode = 0o600
def opener(path, flags):
return os.open(path, flags, mode=mode, dir_fd=dirfd)
for k in SSH_KEYS:
s_key = re.sub(r'([.-])', '_', k).replace('ssh_', '', 1)
if ssh_config[s_key]:
decoded_key = base64.b64decode(ssh_config[s_key])
if decoded_key:
mode = 0o644 if k.endswith('.pub') else 0o600
with open(k, 'wb', opener=opener) as f:
st = os.fstat(f.fileno())
if stat.S_ISREG(st.st_mode) == 0:
middleware.logger.warning(
"%s/%s: is not a regular file and will be removed. "
"This may impact SSH access to the server.",
SSH_CONFIG_PATH, k
)
with suppress(FileNotFoundError):
os.remove(os.path.join(SSH_CONFIG_PATH, k))
if stat.S_IMODE(st.st_mode) != mode:
middleware.logger.debug(
"%s/%s: file has unexpected permissions [%s]. "
"Changing to new value [%s].",
SSH_CONFIG_PATH, k, stat.S_IMODE(st.st_mode), mode
)
os.fchmod(f.fileno(), mode)
if st.st_uid != 0 or st.st_gid != 0:
middleware.logger.debug(
"%s/%s: unexpected user or group ownership [%d:%d]. "
"Changing to new value [0:0]. ",
SSH_CONFIG_PATH, k, st.st_uid, st.st_gid
)
os.fchown(f.fileno(), 0, 0)
f.write(decoded_key)
expected_files = SSH_KEYS + DEFAULT_FILES
with os.scandir(dirfd) as entries:
for entry in filter(lambda x: x.name not in expected_files, entries):
if entry.is_dir():
middleware.logger.debug("%s: removing unexpected directory.",
os.path.join(SSH_CONFIG_PATH, entry.name))
shutil.rmtree(os.path.join(SSH_CONFIG_PATH, entry.name))
else:
middleware.logger.debug("%s: removing unexpected file.",
os.path.join(SSH_CONFIG_PATH, entry.name))
os.remove(entry.name, dir_fd=dirfd)
def render(service, middleware, render_ctx):
dirfd = os.open(SSH_CONFIG_PATH, os.O_RDONLY | os.O_DIRECTORY)
try:
generate_ssh_config(middleware, render_ctx['ssh.config'], dirfd)
finally:
os.close(dirfd)
| 3,318 | Python | .py | 66 | 35.166667 | 112 | 0.520235 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,123 | ups_config.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/nut/ups_config.py | import os
import shutil
UPS_CONFPATH = '/etc/nut'
UPS_USER = 'nut'
UPS_VARPATH = '/var/run/nut'
UPSSCHED_VARPATH = '/var/run/nut/private'
def generate_ups_config(middleware):
if os.path.isdir(UPS_CONFPATH):
shutil.rmtree(UPS_CONFPATH)
os.makedirs(UPS_CONFPATH)
os.makedirs(UPS_VARPATH, exist_ok=True)
os.makedirs(UPSSCHED_VARPATH, exist_ok=True)
ups_group = middleware.call_sync('group.query', [['group', '=', UPS_USER]], {'get': True})
os.chown(UPS_VARPATH, 0, ups_group['gid'])
os.chmod(UPS_VARPATH, 0o775)
os.chmod(UPSSCHED_VARPATH, 0o770)
def render(service, middleware):
generate_ups_config(middleware)
| 660 | Python | .py | 18 | 32.777778 | 94 | 0.706625 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,124 | ups_perms.py | truenas_middleware/src/middlewared/middlewared/etc_files/local/nut/ups_perms.py | import os
import pathlib
from middlewared.plugins.ups import UPS_POWERDOWN_FLAG_FILE
UPS_CONFPATH = '/etc/nut'
UPS_VARPATH = '/var/run/nut'
UPS_CONFIG = f'{UPS_CONFPATH}/ups.conf'
UPS_MONFILE = f'{UPS_CONFPATH}/upsmon.conf'
UPS_SCHEDFILE = f'{UPS_CONFPATH}/upssched.conf'
UPS_USERSFILE = f'{UPS_CONFPATH}/upsd.users'
UPS_DAEMONFILE = f'{UPS_CONFPATH}/upsd.conf'
def ups_config_perms(middleware):
ups_config = middleware.call_sync('ups.config')
master_mode_files = (UPS_CONFIG, UPS_USERSFILE, UPS_DAEMONFILE)
for file in master_mode_files:
if ups_config['mode'].lower() != 'master':
os.remove(file)
pathlib.Path(UPS_POWERDOWN_FLAG_FILE).unlink(missing_ok=True)
def render(service, middleware):
ups_config_perms(middleware)
| 771 | Python | .py | 19 | 37 | 67 | 0.732167 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,125 | schedule.py | truenas_middleware/src/middlewared/middlewared/alert/schedule.py | from datetime import datetime
from dateutil.tz import tzlocal
class BaseSchedule:
def should_run(self, now, last_run):
raise NotImplementedError
class IntervalSchedule:
def __init__(self, interval):
self.interval = interval
def should_run(self, now, last_run):
return now >= last_run + self.interval
class CrontabSchedule:
def __init__(self, hour):
self.hour = hour
def should_run(self, now, last_run):
if last_run == datetime.min:
return True
local_now = now + tzlocal().utcoffset(now)
local_last_run = last_run + tzlocal().utcoffset(last_run)
return local_now.hour == self.hour and local_last_run.date() != local_now.date()
| 731 | Python | .py | 19 | 31.947368 | 88 | 0.662873 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,126 | base.py | truenas_middleware/src/middlewared/middlewared/alert/base.py | from datetime import datetime, timedelta
import enum
import json
import logging
from typing import Any, TypeAlias
import html2text
from middlewared.alert.schedule import IntervalSchedule
from middlewared.utils import ProductName, ProductType
from middlewared.utils.lang import undefined
__all__ = [
"UnavailableException", "AlertClass", "OneShotAlertClass", "SimpleOneShotAlertClass", "DismissableAlertClass",
"AlertCategory", "AlertLevel", "Alert", "AlertSource", "ThreadedAlertSource", "AlertService",
"ThreadedAlertService", "ProThreadedAlertService", "format_alerts", "ellipsis"
]
logger = logging.getLogger(__name__)
class UnavailableException(Exception):
pass
class AlertClassMeta(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
if cls.__name__ != "AlertClass":
if not cls.__name__.endswith("AlertClass"):
raise NameError(f"Invalid alert class name {cls.__name__}")
cls.name = cls.__name__.replace("AlertClass", "")
AlertClass.classes.append(cls)
AlertClass.class_by_name[cls.name] = cls
class AlertClass(metaclass=AlertClassMeta):
"""
Alert class: a description of a specific type of issue that can exist in the system.
:cvar category: `AlertCategory` value
:cvar level: Default `AlertLevel` value (alert level can be later changed by user)
:cvar title: Short description of the alert class (e.g. "An SSL certificate is expiring")
:cvar text: Format string for the alert class instance (e.g. "%(name)s SSL certificate is expiring")
:cvar exclude_from_list: Set this to `true` to exclude the alert from the UI configuration. For example, you might
want to hide some rare legacy hardware-specific alert. It will still be sent if it occurs, but users won't be
able to disable it or change its level.
:cvar products: A list of `system.product_type` return values on which alerts of this class can be emitted.
:cvar proactive_support: Set this to `true` if, upon creation of the alert, a support ticket should be open for the
systems that have a corresponding support license.
:cvar proactive_support_notify_gone: Set this to `true` if, upon removal of the alert, a support ticket should be
open for the systems that have a corresponding support license.
"""
classes = []
class_by_name = {}
category = NotImplemented
level = NotImplemented
title = NotImplemented
text = None
exclude_from_list = False
products = ("CORE", "ENTERPRISE", ProductType.SCALE, ProductType.SCALE_ENTERPRISE)
proactive_support = False
proactive_support_notify_gone = False
def __init__(self, middleware):
self.middleware = middleware
@classmethod
def format(cls, args):
if cls.text is None:
return cls.title
if args is None:
return cls.text
return cls.text % (tuple(args) if isinstance(args, list) else args)
class OneShotAlertClass:
"""
One-shot alert mixin: add this to `AlertClass` superclass list to the alerts that are created not by an
`AlertSource` but using `alert.oneshot_create` API method.
:cvar deleted_automatically: Set this to `false` if there is no one to call `alert.oneshot_delete` when the alert
situation is resolved. In that case, the alert will be deleted when the user dismisses it.
:cvar expires_after: Lifetime for the alert.
"""
deleted_automatically = True
expires_after = None
async def create(self, args):
"""
Returns an `Alert` instance created using `args` that were passed to `alert.oneshot_create`.
:param args: free-form data that was passed to `alert.oneshot_create`.
:return: an `Alert` instance.
"""
raise NotImplementedError
async def delete(self, alerts, query):
"""
Returns only those `alerts` that do not match `query` that was passed to `alert.oneshot_delete`.
:param alerts: all the alerts of this class.
:param query: free-form data that was passed to `alert.oneshot_delete`.
:return: `alerts` that do not match query (e.g. `query` specifies `{"certificate_id": "xxx"}` and the method
implementation returns all `alerts` except the ones related to the certificate `xxx`).
"""
raise NotImplementedError
async def load(self, alerts):
"""
This is called on system startup. Returns only those `alerts` that are still applicable to this system (i.e.,
corresponsing resources still exist).
:param alerts: all the existing alerts of the class
:return: `alerts` that should exist on this system.
"""
return alerts
class SimpleOneShotAlertClass(OneShotAlertClass):
"""
A simple implementation of `OneShotAlertClass` that pass `args` as `args` when creating an `Alert` and will match
`args` dict keys (or their subset) when deleting an alert.
:cvar keys: controls how alerts are deleted:
`keys = ["id", "name"]` When deleting an alert, only this keys will be compared
`keys = []` When deleting an alert, all alerts of this class will be deleted
`keys = None` All present alert keys must be equal to the delete query (default)
"""
keys = None
async def create(self, args):
return Alert(self.__class__, args)
async def delete(self, alerts, query):
return list(filter(
lambda alert: (
any(alert.args[k] != query[k] for k in self.keys) if self.keys is not None
else alert.args != query
),
alerts
))
class DismissableAlertClass:
async def dismiss(self, alerts, alert):
raise NotImplementedError
class AlertCategory(enum.Enum):
APPLICATIONS = "APPLICATIONS"
AUDIT = "Audit"
CERTIFICATES = "CERTIFICATES"
CLUSTERING = "CLUSTERING"
DIRECTORY_SERVICE = "DIRECTORY_SERVICE"
HA = "HA"
HARDWARE = "HARDWARE"
KMIP = "KMIP"
PLUGINS = "PLUGINS"
NETWORK = "NETWORK"
REPORTING = "REPORTING"
SHARING = "SHARING"
STORAGE = "STORAGE"
SYSTEM = "SYSTEM"
TASKS = "TASKS"
UPS = "UPS"
alert_category_names = {
AlertCategory.APPLICATIONS: "Applications",
AlertCategory.AUDIT: "Audit",
AlertCategory.CERTIFICATES: "Certificates",
AlertCategory.CLUSTERING: "Clustering",
AlertCategory.DIRECTORY_SERVICE: "Directory Service",
AlertCategory.HA: "High-Availability",
AlertCategory.HARDWARE: "Hardware",
AlertCategory.KMIP: "Key Management Interoperability Protocol (KMIP)",
AlertCategory.PLUGINS: "Plugins",
AlertCategory.NETWORK: "Network",
AlertCategory.REPORTING: "Reporting",
AlertCategory.SHARING: "Sharing",
AlertCategory.STORAGE: "Storage",
AlertCategory.SYSTEM: "System",
AlertCategory.TASKS: "Tasks",
AlertCategory.UPS: "UPS",
}
class AlertLevel(enum.Enum):
INFO = 1
NOTICE = 2
WARNING = 3
ERROR = 4
CRITICAL = 5
ALERT = 6
EMERGENCY = 7
DateTimeType: TypeAlias = datetime
class Alert:
"""
Alert: a message about a single issues in the system (or a group of similar issues that can be potentially resolved
with a single action).
:ivar klass: Alert class: generic description of the alert (e.g. `CertificateIsExpiringAlertClass`)
:ivar args: specific description of the alert (e.g. `{"name": "my certificate", "days": 3}`).
The resulting alert text will be obtained by doing `klass.text % args`
:ivar key: the information that will be used to distinguish this alert from the others of the same class. If empty,
will default to `args`, which is the most common use case. Can be anything that can be JSON serialized.
However, for some alerts it makes sense to pass only a subset of args as the key. For example, for a
`CertificateIsExpiringAlertClass` you may only want to include the certificate name as the key and omit how
many days are left before the certificate expires. That way, at day change, the alerts "certificate xxx expires
in 3 days" and "certificate xxx expires in 2 days" will be considered the same alert (as only certificate name
will be compared) and the newer one will silently replace the old one (in opposite case, an E-Mail would be
sent claiming that one alert was cleared and another one was added).
:ivar datetime: timestamp when the alert was first seen.
:ivar last_occurrence: timestamp when the alert was last seen.
:ivar node: HA node when the alert was seen.
:ivar dismissed: whether the alert was dismissed by user.
:ivar mail: if this parameter is not null, it will be an argument to an extra call to `mail.send` that will be made
when the alert is first seen.
"""
klass: type[AlertClass]
args: dict[str, Any] | list
key: Any
datetime: DateTimeType
last_occurrence: DateTimeType
node: str | None
dismissed: bool
mail: dict | None
def __init__(self, klass, args=None, key=undefined, datetime=None, last_occurrence=None, node=None, dismissed=None,
mail=None, _uuid=None, _source=None, _key=None, _text=None):
self.uuid = _uuid
self.source = _source
self.klass = klass
self.args = args
self.node = node
if _key is None:
if key is undefined:
key = args
self.key = json.dumps(key, sort_keys=True)
else:
self.key = _key
self.datetime = datetime
self.last_occurrence = last_occurrence or datetime
self.dismissed = dismissed
self.mail = mail
self.text = _text or self.klass.text or self.klass.title
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return repr(self.__dict__)
@property
def formatted(self):
try:
return self.klass.format(self.args)
except Exception:
return self.text
class AlertSource:
"""
Alert source: a class that periodically checks for a specific erroneous condition and returns one or multiple
`Alert` instances.
:cvar schedule: `BaseSchedule` instance that will be used to determine whether this alert source should be ran at
any given moment. By default, alert checkers are ran every minute.
:cvar products: A list of `system.product_type` return values for which this source will be ran.
:cvar failover_related: should be `true` if this alert is HA failover related. Failover-related alerts are not ran
within a specific time interval after failover to prevent false positives.
:cvar run_on_backup_node: set this to `false` to prevent running this alert on HA `BACKUP` node.
"""
schedule = IntervalSchedule(timedelta())
products = ("CORE", "ENTERPRISE", ProductType.SCALE, ProductType.SCALE_ENTERPRISE)
failover_related = False
run_on_backup_node = True
def __init__(self, middleware):
self.middleware = middleware
@property
def name(self):
return self.__class__.__name__.replace("AlertSource", "")
async def check(self):
"""
This method will be called on the specific `schedule` to check for the alert conditions.
:return: an `Alert` instance, or a list of `Alert` instances, or `None` for no alerts.
"""
raise NotImplementedError
class ThreadedAlertSource(AlertSource):
async def check(self):
return await self.middleware.run_in_thread(self.check_sync)
def check_sync(self):
raise NotImplementedError
class AlertService:
title = NotImplementedError
schema = NotImplementedError
html = False
def __init__(self, middleware, attributes):
self.middleware = middleware
self.attributes = attributes
self.logger = logging.getLogger(self.__class__.__name__)
# If we remove some attributes, it should not be an error if they are still left in the database
schema = self.schema.copy()
schema.additional_attrs = True
# Set defaults for new attributes
self.attributes = schema.clean(self.attributes)
@classmethod
def name(cls):
return cls.__name__.replace("AlertService", "")
@classmethod
def validate(cls, attributes):
cls.schema.validate(attributes)
async def send(self, alerts, gone_alerts, new_alerts):
raise NotImplementedError
async def _format_alerts(self, alerts, gone_alerts, new_alerts):
hostname = await self.middleware.call("system.hostname")
if await self.middleware.call("system.is_enterprise"):
node_map = await self.middleware.call("alert.node_map")
else:
node_map = None
html = format_alerts(ProductName.PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
if self.html:
return html
return html2text.html2text(html).rstrip()
class ThreadedAlertService(AlertService):
async def send(self, alerts, gone_alerts, new_alerts):
return await self.middleware.run_in_thread(self.send_sync, alerts, gone_alerts, new_alerts)
def send_sync(self, alerts, gone_alerts, new_alerts):
raise NotImplementedError
def _format_alerts(self, alerts, gone_alerts, new_alerts):
hostname = self.middleware.call_sync("system.hostname")
if self.middleware.call_sync("system.is_enterprise"):
node_map = self.middleware.call_sync("alert.node_map")
else:
node_map = None
return format_alerts(ProductName.PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
class ProThreadedAlertService(ThreadedAlertService):
def send_sync(self, alerts, gone_alerts, new_alerts):
exc = None
for alert in gone_alerts:
try:
self.delete_alert(alert)
except Exception as e:
self.logger.warning("An exception occurred while deleting alert", exc_info=True)
exc = e
for alert in new_alerts:
try:
self.create_alert(alert)
except Exception as e:
self.logger.warning("An exception occurred while creating alert", exc_info=True)
exc = e
if exc is not None:
raise exc
def create_alert(self, alert):
raise NotImplementedError
def delete_alert(self, alert):
raise NotImplementedError
def format_alerts(product_name, hostname, node_map, alerts, gone_alerts, new_alerts):
text = f"{product_name} @ {hostname}<br><br>"
if len(alerts) == 1 and len(gone_alerts) == 0 and len(new_alerts) == 1 and new_alerts[0].klass.name == "Test":
return text + "This is a test alert"
if new_alerts:
if len(gone_alerts) == 1:
text += "New alert"
else:
text += "New alerts"
text += ":\n<ul>" + "".join([
"<li>%s</li>\n" % format_alert(alert, node_map)
for alert in new_alerts
]) + "</ul>"
if gone_alerts:
if len(gone_alerts) == 1:
text += "The following alert has been cleared"
else:
text += "These alerts have been cleared"
text += ":\n<ul>" + "".join([
"<li>%s</li>\n" % format_alert(alert, node_map)
for alert in gone_alerts
]) + "</ul>\n"
if alerts:
text += "Current alerts:\n<ul>" + "".join([
"<li>%s</li>\n" % format_alert(alert, node_map)
for alert in alerts
]) + "</ul>\n"
return text
def format_alert(alert, node_map):
return (f"{node_map[alert.node]} - " if node_map else "") + alert.formatted
def ellipsis(a, b):
if len(a) <= b:
return a
return a[:(b - 1)] + "…"
| 16,017 | Python | .py | 349 | 38.404011 | 119 | 0.666624 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,127 | snmp_trap.py | truenas_middleware/src/middlewared/middlewared/alert/service/snmp_trap.py | import pysnmp.hlapi
import pysnmp.smi
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Bool, Dict, Int, Str
class SNMPTrapAlertService(ThreadedAlertService):
title = "SNMP Trap"
schema = Dict(
"snmp_attributes",
Str("host", required=True),
Int("port", required=True),
Bool("v3", required=True),
# v1/v2
Str("community", null=True, default=None, empty=False),
# v3
Str("v3_username", null=True, default=None, empty=False),
Str("v3_authkey", null=True, default=None),
Str("v3_privkey", null=True, default=None),
Str("v3_authprotocol", enum=[None, "MD5", "SHA", "128SHA224", "192SHA256", "256SHA384", "384SHA512"],
null=True, default=None),
Str("v3_privprotocol", enum=[None, "DES", "3DESEDE", "AESCFB128", "AESCFB192", "AESCFB256",
"AESBLUMENTHALCFB192", "AESBLUMENTHALCFB256"],
null=True, default=None),
strict=True,
)
def __init__(self, middleware, attributes):
super().__init__(middleware, attributes)
self.initialized = False
def send_sync(self, alerts, gone_alerts, new_alerts):
if self.attributes["host"] in ("localhost", "127.0.0.1", "::1"):
if not self.middleware.call_sync("service.started", "snmp"):
self.logger.trace("Local SNMP service not started, not sending traps")
return
if not self.initialized:
self.snmp_engine = pysnmp.hlapi.SnmpEngine()
if self.attributes["v3"]:
self.auth_data = pysnmp.hlapi.UsmUserData(
self.attributes["v3_username"] or "",
self.attributes["v3_authkey"],
self.attributes["v3_privkey"],
{
None: pysnmp.hlapi.usmNoAuthProtocol,
"MD5": pysnmp.hlapi.usmHMACMD5AuthProtocol,
"SHA": pysnmp.hlapi.usmHMACSHAAuthProtocol,
"128SHA224": pysnmp.hlapi.usmHMAC128SHA224AuthProtocol,
"192SHA256": pysnmp.hlapi.usmHMAC192SHA256AuthProtocol,
"256SHA384": pysnmp.hlapi.usmHMAC256SHA384AuthProtocol,
"384SHA512": pysnmp.hlapi.usmHMAC384SHA512AuthProtocol,
}[self.attributes["v3_authprotocol"]],
{
None: pysnmp.hlapi.usmNoPrivProtocol,
"DES": pysnmp.hlapi.usmDESPrivProtocol,
"3DESEDE": pysnmp.hlapi.usm3DESEDEPrivProtocol,
"AESCFB128": pysnmp.hlapi.usmAesCfb128Protocol,
"AESCFB192": pysnmp.hlapi.usmAesCfb192Protocol,
"AESCFB256": pysnmp.hlapi.usmAesCfb256Protocol,
"AESBLUMENTHALCFB192": pysnmp.hlapi.usmAesBlumenthalCfb192Protocol,
"AESBLUMENTHALCFB256": pysnmp.hlapi.usmAesBlumenthalCfb256Protocol,
}[self.attributes["v3_privprotocol"]],
)
else:
self.auth_data = pysnmp.hlapi.CommunityData(self.attributes["community"])
self.transport_target = pysnmp.hlapi.UdpTransportTarget((self.attributes["host"], self.attributes["port"]))
self.context_data = pysnmp.hlapi.ContextData()
mib_builder = pysnmp.smi.builder.MibBuilder()
mib_sources = mib_builder.getMibSources() + (
pysnmp.smi.builder.DirMibSource("/usr/local/share/pysnmp/mibs"),)
mib_builder.setMibSources(*mib_sources)
mib_builder.loadModules("TRUENAS-MIB")
self.snmp_alert_level_type = mib_builder.importSymbols("TRUENAS-MIB", "AlertLevelType")[0]
mib_view_controller = pysnmp.smi.view.MibViewController(mib_builder)
self.snmp_alert = pysnmp.hlapi.ObjectIdentity("TRUENAS-MIB", "alert"). \
resolveWithMib(mib_view_controller)
self.snmp_alert_id = pysnmp.hlapi.ObjectIdentity("TRUENAS-MIB", "alertId"). \
resolveWithMib(mib_view_controller)
self.snmp_alert_level = pysnmp.hlapi.ObjectIdentity("TRUENAS-MIB", "alertLevel"). \
resolveWithMib(mib_view_controller)
self.snmp_alert_message = pysnmp.hlapi.ObjectIdentity("TRUENAS-MIB", "alertMessage"). \
resolveWithMib(mib_view_controller)
self.snmp_alert_cancellation = pysnmp.hlapi.ObjectIdentity("TRUENAS-MIB", "alertCancellation"). \
resolveWithMib(mib_view_controller)
self.initialized = True
classes = (self.middleware.call_sync("alertclasses.config"))["classes"]
for alert in gone_alerts:
error_indication, error_status, error_index, var_binds = next(
pysnmp.hlapi.sendNotification(
self.snmp_engine,
self.auth_data,
self.transport_target,
self.context_data,
"trap",
pysnmp.hlapi.NotificationType(self.snmp_alert_cancellation).addVarBinds(
(pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_id),
pysnmp.hlapi.OctetString(alert.uuid))
)
)
)
if error_indication:
self.logger.error("Failed to send SNMP trap: %s", error_indication)
for alert in new_alerts:
error_indication, error_status, error_index, var_binds = next(
pysnmp.hlapi.sendNotification(
self.snmp_engine,
self.auth_data,
self.transport_target,
self.context_data,
"trap",
pysnmp.hlapi.NotificationType(self.snmp_alert).addVarBinds(
(pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_id),
pysnmp.hlapi.OctetString(alert.uuid)),
(pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_level),
self.snmp_alert_level_type(
self.snmp_alert_level_type.namedValues.getValue(
classes.get(alert.klass.name, {}).get("level", alert.klass.level.name).lower()))),
(pysnmp.hlapi.ObjectIdentifier(self.snmp_alert_message),
pysnmp.hlapi.OctetString(alert.formatted))
)
)
)
if error_indication:
self.logger.warning("Failed to send SNMP trap: %s", error_indication)
| 6,714 | Python | .py | 120 | 39.5 | 119 | 0.572883 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,128 | slack.py | truenas_middleware/src/middlewared/middlewared/alert/service/slack.py | import html
import json
import html2text
import requests
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Dict, Str
from middlewared.utils.network import INTERNET_TIMEOUT
class SlackAlertService(ThreadedAlertService):
title = "Slack"
schema = Dict(
"slack_attributes",
Str("url", required=True, empty=False),
strict=True,
)
def send_sync(self, alerts, gone_alerts, new_alerts):
r = requests.post(
self.attributes["url"],
headers={"Content-type": "application/json"},
data=json.dumps({
"text": html.escape(
html2text.html2text(
self._format_alerts(
alerts, gone_alerts, new_alerts
)
), quote=False
),
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 969 | Python | .py | 30 | 22.033333 | 59 | 0.570204 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,129 | mattermost.py | truenas_middleware/src/middlewared/middlewared/alert/service/mattermost.py | import json
import requests
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Dict, Str
from middlewared.utils.network import INTERNET_TIMEOUT
class MattermostAlertService(ThreadedAlertService):
title = "Mattermost"
schema = Dict(
"mattermost_attributes",
Str("url", required=True, empty=False),
Str("username", required=True, empty=False),
Str("channel", default=""),
Str("icon_url", default=""),
strict=True,
)
def send_sync(self, alerts, gone_alerts, new_alerts):
r = requests.post(
self.attributes["url"],
headers={"Content-type": "application/json"},
data=json.dumps({
"username": self.attributes["username"],
"channel": self.attributes["channel"],
"icon_url": self.attributes["icon_url"],
"text": self._format_alerts(alerts, gone_alerts, new_alerts),
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 1,064 | Python | .py | 28 | 29.25 | 77 | 0.617847 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,130 | opsgenie.py | truenas_middleware/src/middlewared/middlewared/alert/service/opsgenie.py | import json
import requests
from middlewared.alert.base import ProThreadedAlertService, ellipsis
from middlewared.schema import Dict, Str
from middlewared.utils.network import INTERNET_TIMEOUT
class OpsGenieAlertService(ProThreadedAlertService):
title = "OpsGenie"
schema = Dict(
"opsgenie_attributes",
Str("api_key", required=True, empty=False),
Str("api_url", default=""),
strict=True,
)
def create_alert(self, alert):
r = requests.post(
(self.attributes.get("api_url") or "https://api.opsgenie.com") + "/v2/alerts",
headers={"Authorization": f"GenieKey {self.attributes['api_key']}",
"Content-type": "application/json"},
data=json.dumps({
"message": ellipsis(alert.formatted, 130),
"alias": alert.uuid,
"description": ellipsis(alert.formatted, 15000),
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
def delete_alert(self, alert):
r = requests.delete(
(self.attributes.get("api_url") or "https://api.opsgenie.com") + "/v2/alerts/" + alert.uuid,
params={"identifierType": "alias"},
headers={"Authorization": f"GenieKey {self.attributes['api_key']}"},
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 1,390 | Python | .py | 34 | 31.558824 | 104 | 0.607407 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,131 | pagerduty.py | truenas_middleware/src/middlewared/middlewared/alert/service/pagerduty.py | import json
import html2text
import requests
from middlewared.alert.base import ProThreadedAlertService, ellipsis
from middlewared.schema import Dict, Str
from middlewared.utils.network import INTERNET_TIMEOUT
class PagerDutyAlertService(ProThreadedAlertService):
title = "PagerDuty"
schema = Dict(
"pagerduty_attributes",
Str("service_key", required=True, empty=False),
Str("client_name", required=True, empty=False),
strict=True,
)
def create_alert(self, alert):
r = requests.post(
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
headers={"Content-type": "application/json"},
data=json.dumps({
"service_key": self.attributes["service_key"],
"event_type": "trigger",
"description": ellipsis(html2text.html2text(alert.formatted), 1024),
"incident_key": alert.uuid,
"client": self.attributes["client_name"],
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
def delete_alert(self, alert):
r = requests.post(
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
headers={"Content-type": "application/json"},
data=json.dumps({
"service_key": self.attributes["service_key"],
"event_type": "resolve",
"description": "",
"incident_key": alert.uuid,
"client": self.attributes["client_name"],
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 1,665 | Python | .py | 42 | 29.428571 | 84 | 0.596535 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,132 | aws_sns.py | truenas_middleware/src/middlewared/middlewared/alert/service/aws_sns.py | import boto3
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Dict, Password, Str
class AWSSNSAlertService(ThreadedAlertService):
title = "AWS SNS"
schema = Dict(
"awssns_attributes",
Str("region", required=True, empty=False),
Str("topic_arn", required=True, empty=False),
Str("aws_access_key_id", required=True, empty=False),
Password("aws_secret_access_key", required=True, empty=False),
strict=True,
)
def send_sync(self, alerts, gone_alerts, new_alerts):
client = boto3.client(
"sns",
region_name=self.attributes["region"],
aws_access_key_id=self.attributes["aws_access_key_id"],
aws_secret_access_key=self.attributes["aws_secret_access_key"],
)
client.publish(
TopicArn=self.attributes["topic_arn"],
Subject="Alerts",
Message=self._format_alerts(alerts, gone_alerts, new_alerts),
)
| 1,011 | Python | .py | 25 | 32 | 75 | 0.642857 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,133 | __init__.py | truenas_middleware/src/middlewared/middlewared/alert/service/__init__.py | # -*- coding=utf-8 -*-
import logging
logger = logging.getLogger(__name__)
__all__ = []
| 90 | Python | .py | 4 | 21 | 36 | 0.619048 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,134 | telegram.py | truenas_middleware/src/middlewared/middlewared/alert/service/telegram.py | import html
import json
import html2text
import requests
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Dict, Str, List, Int
from middlewared.utils.network import INTERNET_TIMEOUT
class TelegramAlertService(ThreadedAlertService):
title = "Telegram"
schema = Dict(
"telegram_attributes",
Str("bot_token", required=True, empty=False),
List("chat_ids", empty=False, items=[Int("chat_id")]),
strict=True,
)
def send_sync(self, alerts, gone_alerts, new_alerts):
token = self.attributes["bot_token"]
chat_ids = self.attributes["chat_ids"]
for chat_id in chat_ids:
r = requests.post(
f"https://api.telegram.org/bot{token}/sendMessage",
headers={"Content-type": "application/json"},
data=json.dumps({
"chat_id": chat_id,
"text": html.escape(html2text.html2text(self._format_alerts(alerts, gone_alerts, new_alerts))),
"parse_mode": "HTML",
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 1,178 | Python | .py | 30 | 29.8 | 115 | 0.611208 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,135 | influxdb_.py | truenas_middleware/src/middlewared/middlewared/alert/service/influxdb_.py | from influxdb import InfluxDBClient
from middlewared.alert.base import ThreadedAlertService
from middlewared.schema import Dict, Str
class InfluxDBAlertService(ThreadedAlertService):
title = "InfluxDB"
schema = Dict(
"influxdb_attributes",
Str("host", required=True, empty=False),
Str("username", required=True, empty=False),
Str("password", required=True, empty=False),
Str("database", required=True, empty=False),
Str("series_name", required=True, empty=False),
strict=True,
)
def send_sync(self, alerts, gone_alerts, new_alerts):
client = InfluxDBClient(self.attributes["host"], 8086, self.attributes["username"], self.attributes["password"],
self.attributes["database"])
client.write_points([
{
"measurement": self.attributes["series_name"],
"tags": {},
"time": alert.datetime.isoformat(),
"fields": {
"formatted": alert.formatted,
}
}
for alert in alerts
])
| 1,132 | Python | .py | 28 | 29.821429 | 120 | 0.591447 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,136 | victorops.py | truenas_middleware/src/middlewared/middlewared/alert/service/victorops.py | import json
import requests
from middlewared.alert.base import ProThreadedAlertService
from middlewared.schema import Dict, Str
from middlewared.utils.network import INTERNET_TIMEOUT
class VictorOpsAlertService(ProThreadedAlertService):
title = "VictorOps"
schema = Dict(
"victorops_attributes",
Str("api_key", required=True, empty=False),
Str("routing_key", required=True, empty=False),
strict=True,
)
def create_alert(self, alert):
r = requests.post(
f"https://alert.victorops.com/integrations/generic/20131114/alert/{self.attributes['api_key']}/"
f"{self.attributes['routing_key']}",
headers={"Content-type": "application/json"},
data=json.dumps({
"message_type": "CRITICAL",
"entity_id": alert.uuid,
"entity_display_name": alert.formatted,
"state_message": alert.formatted,
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
def delete_alert(self, alert):
r = requests.post(
f"https://alert.victorops.com/integrations/generic/20131114/alert/{self.attributes['api_key']}/"
f"{self.attributes['routing_key']}",
headers={"Content-type": "application/json"},
data=json.dumps({
"message_type": "RECOVERY",
"entity_id": alert.uuid,
"entity_display_name": alert.formatted,
"state_message": alert.formatted,
}),
timeout=INTERNET_TIMEOUT,
)
r.raise_for_status()
| 1,637 | Python | .py | 41 | 29.707317 | 108 | 0.598113 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,137 | mail.py | truenas_middleware/src/middlewared/middlewared/alert/service/mail.py | from middlewared.alert.base import AlertService
from middlewared.schema import Dict, Str
class MailAlertService(AlertService):
title = "Email"
schema = Dict(
"mail_attributes",
Str("email", default=""),
strict=True,
)
html = True
async def send(self, alerts, gone_alerts, new_alerts):
if self.attributes["email"]:
emails = [self.attributes["email"]]
else:
emails = await self.middleware.call("mail.local_administrators_emails")
if not emails:
self.logger.trace("No e-mail address configured for any of the local administrators, not sending email")
return
await self.middleware.call("mail.send", {
"subject": "Alerts",
"html": await self._format_alerts(alerts, gone_alerts, new_alerts),
"to": emails,
})
| 889 | Python | .py | 23 | 29.565217 | 120 | 0.613953 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,138 | volume_status.py | truenas_middleware/src/middlewared/middlewared/alert/source/volume_status.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.utils.zfs import query_imported_fast_impl
class VolumeStatusAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.CRITICAL
title = "Pool Status Is Not Healthy"
text = "Pool %(volume)s state is %(state)s: %(status)s%(devices)s"
proactive_support = True
class BootPoolStatusAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "Boot Pool Is Not Healthy"
text = "Boot pool status is %(status)s: %(status_detail)s."
proactive_support = True
class VolumeStatusAlertSource(AlertSource):
async def check(self):
if not await self.enabled():
return
try:
states = await self.middleware.call("cache.get", "VolumeStatusAlertSource.pools_states")
except KeyError:
states = None
current_states = await self.middleware.run_in_thread(query_imported_fast_impl)
await self.middleware.call("cache.put", "VolumeStatusAlertSource.pools_states", current_states)
alerts = None
if current_states == states:
try:
alerts = await self.middleware.call("cache.get", "VolumeStatusAlerts")
except KeyError:
pass
if alerts is None:
alerts = []
boot_pool = await self.middleware.call("boot.pool_name")
for pool in await self.middleware.call("zfs.pool.query", [["id", "=", boot_pool]]):
if not pool["healthy"]:
alerts.append([
"BootPoolStatusAlertClass",
{
"status": pool["status"],
"status_detail": pool["status_detail"],
},
])
for pool in await self.middleware.call("pool.query"):
if not pool["healthy"] or (pool["warning"] and pool["status_code"] != "FEAT_DISABLED"):
bad_vdevs = []
if pool["topology"]:
for vdev in await self.middleware.call("pool.flatten_topology", pool["topology"]):
if vdev["type"] == "DISK" and vdev["status"] != "ONLINE":
name = vdev["guid"]
if vdev.get("unavail_disk"):
name = f'{vdev["unavail_disk"]["model"]} {vdev["unavail_disk"]["serial"]}'
bad_vdevs.append(f"Disk {name} is {vdev['status']}")
if bad_vdevs:
devices = (f"<br>The following devices are not healthy:"
f"<ul><li>{'</li><li>'.join(bad_vdevs)}</li></ul>")
else:
devices = ""
alerts.append([
"VolumeStatusAlertClass",
{
"volume": pool["name"],
"state": pool["status"],
"status": pool["status_detail"],
"devices": devices,
}
])
await self.middleware.call("cache.put", "VolumeStatusAlerts", alerts)
return [
Alert(
{
"BootPoolStatusAlertClass": BootPoolStatusAlertClass,
"VolumeStatusAlertClass": VolumeStatusAlertClass,
}[alert[0]],
alert[1]
)
for alert in alerts
]
async def enabled(self):
if await self.middleware.call("system.is_enterprise"):
status = await self.middleware.call("failover.status")
return status in ("MASTER", "SINGLE")
return True
| 3,908 | Python | .py | 82 | 31.658537 | 110 | 0.5164 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,139 | web_ui_bind_address.py | truenas_middleware/src/middlewared/middlewared/alert/source/web_ui_bind_address.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class WebUiBindAddressV2AlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "The Web Interface Сould Not Bind to Configured Address"
text = "The Web interface could not bind to %(addresses)s. Using %(wildcard)s instead."
async def create(self, args):
return Alert(self.__class__, args)
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.args["family"] != query,
alerts
))
| 634 | Python | .py | 13 | 41.923077 | 98 | 0.708266 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,140 | ldap.py | truenas_middleware/src/middlewared/middlewared/alert/source/ldap.py | from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, Alert, AlertLevel, AlertSource
from middlewared.alert.schedule import IntervalSchedule
from middlewared.plugins.directoryservices import DSStatus, DSType
from middlewared.utils.directoryservices.health import DSHealthObj, KRB5HealthError, LDAPHealthError
class LDAPBindAlertClass(AlertClass):
category = AlertCategory.DIRECTORY_SERVICE
level = AlertLevel.WARNING
title = "LDAP Bind Is Not Healthy"
text = "%(ldaperr)s."
class LDAPBindAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=10))
run_on_backup_node = False
async def check(self):
if DSHealthObj.dstype is not DSType.LDAP:
return
try:
await self.middleware.call('directoryservices.health.check')
except (LDAPHealthError, KRB5HealthError):
# this is potentially recoverable
try:
await self.middleware.call('directoryservices.health.recover')
except Exception as e:
# Recovery failed, generate an alert
return Alert(
LDAPBindAlertClass,
{'ldaperr': str(e)},
key=None
)
except Exception:
# We shouldn't be raising other sorts of errors
self.logger.error("Unexpected error while performing health check.", exc_info=True)
| 1,465 | Python | .py | 32 | 36.1875 | 100 | 0.683731 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,141 | ssh_login_failures.py | truenas_middleware/src/middlewared/middlewared/alert/source/ssh_login_failures.py | from collections import deque
from datetime import datetime, timedelta
from systemd import journal
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
class SSHLoginFailuresAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "SSH Login Failures"
text = "%(count)d SSH login failures in the last 24 hours:\n%(failures)s"
class SSHLoginFailuresAlertSource(ThreadedAlertSource):
def check_sync(self):
j = journal.Reader()
j.add_match("SYSLOG_IDENTIFIER=sshd")
j.seek_realtime(datetime.now() - timedelta(days=1))
count = 0
last_messages = deque([], 4)
for record in j:
if record["MESSAGE"].startswith("Failed password for"):
count += 1
last_messages.append(
f"{record['__REALTIME_TIMESTAMP'].strftime('%d %b %H:%M:%S')}: {record['MESSAGE']}"
)
if count > 0:
return Alert(SSHLoginFailuresAlertClass, {
"count": count,
"failures": "\n".join(
([f"... first {count - len(last_messages)} messages skipped ..."] if count > len(last_messages)
else []) +
list(last_messages)
)
}, key=list(last_messages))
| 1,377 | Python | .py | 31 | 33.870968 | 115 | 0.601195 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,142 | failover.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import errno
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, UnavailableException
from middlewared.utils import ProductType
from middlewared.service_exception import CallError
class FailoverInterfaceNotFoundAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'Failover Internal Interface Not Found'
text = 'Failover internal interface not found. Contact support.'
products = (ProductType.SCALE_ENTERPRISE,)
class TrueNASVersionsMismatchAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'TrueNAS Software Versions Must Match Between Storage Controllers'
text = 'TrueNAS software versions must match between storage controllers.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverStatusCheckFailedAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'Failed to Check Failover Status with the Other Controller'
text = 'Failed to check failover status with the other controller: %s.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverFailedAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'Failover Failed'
text = 'Failover failed. Check /var/log/failover.log on both controllers.'
products = (ProductType.SCALE_ENTERPRISE,)
class VRRPStatesDoNotAgreeAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'Controllers VRRP States Do Not Agree'
text = 'Controllers VRRP states do not agree: %(error)s.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
async def check(self):
if not await self.middleware.call('failover.internal_interfaces'):
return [Alert(FailoverInterfaceNotFoundAlertClass)]
try:
if not await self.middleware.call('failover.call_remote', 'system.ready'):
raise UnavailableException()
local_version = await self.middleware.call('system.version')
remote_version = await self.middleware.call('failover.call_remote', 'system.version')
if local_version != remote_version:
return [Alert(TrueNASVersionsMismatchAlertClass)]
local = await self.middleware.call('failover.vip.get_states')
remote = await self.middleware.call('failover.call_remote', 'failover.vip.get_states')
if err := await self.middleware.call('failover.vip.check_states', local, remote):
return [Alert(VRRPStatesDoNotAgreeAlertClass, {'error': i}) for i in err]
except CallError as e:
if e.errno != errno.ECONNREFUSED:
return [Alert(FailoverStatusCheckFailedAlertClass, [str(e)])]
if await self.middleware.call('failover.status') in ('ERROR', 'UNKNOWN'):
return [Alert(FailoverFailedAlertClass)]
return []
| 3,276 | Python | .py | 62 | 46.032258 | 114 | 0.73325 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,143 | web_ui_root_login.py | truenas_middleware/src/middlewared/middlewared/alert/source/web_ui_root_login.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class WebUiRootLoginAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Root User Can Still Log In To The Web UI"
text = (
"Root user has their password disabled, but as there are no other users granted with a privilege of Local "
"Administrator, they can still log in to the Web UI. Please create a separate user for the administrative "
"purposes in order to forbid root from logging in to the Web UI."
)
exclude_from_list = True
async def create(self, args):
return Alert(self.__class__, args)
async def delete(self, alerts, query):
return []
| 781 | Python | .py | 15 | 45.933333 | 115 | 0.722733 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,144 | datasets.py | truenas_middleware/src/middlewared/middlewared/alert/source/datasets.py | from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
from middlewared.alert.schedule import IntervalSchedule
class EncryptedDatasetAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = 'Unencrypted datasets detected within encrypted datasets'
text = (
'The following datasets are not encrypted but are within an encrypted dataset: %(datasets)r which is '
'not supported behaviour and may lead to various issues.'
)
class UnencryptedDatasetsAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(hours=12))
async def check(self):
unencrypted_datasets = []
for dataset in await self.middleware.call('pool.dataset.query', [['encrypted', '=', True]]):
for child in dataset['children']:
if child['name'] in (
f'{child["pool"]}/ix-applications', f'{child["pool"]}/ix-apps'
) or child['name'].startswith((
f'{child["pool"]}/ix-applications/', f'{child["pool"]}/ix-apps/'
)):
continue
if not child['encrypted']:
unencrypted_datasets.append(child['name'])
if unencrypted_datasets:
return Alert(EncryptedDatasetAlertClass, {'datasets': ', '.join(unencrypted_datasets)})
| 1,422 | Python | .py | 27 | 42.592593 | 110 | 0.654401 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,145 | iscsi.py | truenas_middleware/src/middlewared/middlewared/alert/source/iscsi.py | from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.alert.schedule import IntervalSchedule
class ISCSIPortalIPAlertClass(AlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = 'IP Addresses Bound to an iSCSI Portal Were Not Found'
text = 'These IP addresses are bound to an iSCSI Portal but not found: %s.'
class ISCSIPortalIPAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=60))
async def check(self):
try:
started = await self.middleware.call('service.started', 'iscsitarget')
except Exception:
# during upgrade this crashed in `pystemd.dbusexc.DBusTimeoutError: [err -110]: b'Connection timed out'`
# so don't pollute the webUI with tracebacks
return
else:
if not started:
return
in_use_ips = {i['address'] for i in await self.middleware.call('interface.ip_in_use', {'any': True})}
portals = {p['id']: p for p in await self.middleware.call('iscsi.portal.query')}
ips = set()
for target in await self.middleware.call('iscsi.target.query'):
for group in target['groups']:
ips.update(
map(
lambda ip: ip['ip'],
filter(lambda a: a['ip'] not in in_use_ips, portals[group['portal']]['listen'])
)
)
if ips and await self.middleware.call('iscsi.global.alua_enabled'):
# When ALUA is enabled on HA, the STANDBY node will report the
# virtual IPs as missing. Remove them if the corresponding
# underlying IP is in use.
choices = await self.middleware.call('iscsi.portal.listen_ip_choices')
node = await self.middleware.call('failover.node')
if node in ['A', 'B']:
index = ['A', 'B'].index(node)
vips = {k: v.split('/')[index] for k, v in choices.items() if v.find('/') != -1}
ok = {ip for ip in ips if ip in vips and vips[ip] in in_use_ips}
ips -= ok
if ips:
return Alert(ISCSIPortalIPAlertClass, ', '.join(ips))
| 2,306 | Python | .py | 44 | 40.931818 | 116 | 0.606747 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,146 | failover_sync.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover_sync.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import (
Alert, AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel, OneShotAlertClass
)
from middlewared.utils import ProductType
class FailoverSyncFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = "Automatic Sync to Peer Failed"
text = (
"Tried for %(mins)d minutes to sync configuration information to "
"the standby storage controller but failed. Use Sync to Peer on the "
"System/Failover page to try and perform a manual sync."
)
products = (ProductType.SCALE_ENTERPRISE,)
async def create(self, args):
return Alert(FailoverSyncFailedAlertClass, {'mins': args['mins']})
async def delete(self, alerts, query):
return []
class FailoverKeysSyncFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
deleted_automatically = False
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = "Syncing Encryption Keys to Peer Failed"
text = (
"The automatic synchronization of encryption passphrases with the standby "
"controller has failed. Please go to System > Failover and manually sync to peer."
)
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverKMIPKeysSyncFailedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = "Syncing KMIP Keys to Peer Failed"
text = (
"The automatic synchronization of KMIP keys with the standby "
"controller has failed due to %(error)s. Please go to System > Failover and manually sync to peer."
)
products = (ProductType.SCALE_ENTERPRISE,)
async def create(self, args):
return Alert(FailoverKMIPKeysSyncFailedAlertClass, args)
async def delete(self, alerts, query):
return []
| 2,082 | Python | .py | 46 | 39.891304 | 107 | 0.741473 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,147 | sata_dom_wear.py | truenas_middleware/src/middlewared/middlewared/alert/source/sata_dom_wear.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, IntervalSchedule
from middlewared.utils import ProductType
class SATADOMWearWarningAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = "SATA DOM Lifetime: Less Than 20% Left"
text = "%(lifetime)d%% of lifetime left on SATA DOM %(disk)s."
products = (ProductType.SCALE_ENTERPRISE,)
class SATADOMWearCriticalAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "SATA DOM Lifetime: Less Than 10% Left"
text = "%(lifetime)d%% of lifetime left on SATA DOM %(disk)s."
products = (ProductType.SCALE_ENTERPRISE,)
class SATADOMWearAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(hours=1))
products = (ProductType.SCALE_ENTERPRISE,)
async def check(self):
dmi = await self.middleware.call("system.dmidecode_info")
if not dmi["system-product-name"].startswith(("TRUENAS-M", "TRUENAS-Z")):
return []
alerts = []
for disk in await self.middleware.call("boot.get_disks"):
if not disk.startswith("sda"):
continue
lifetime = await self.middleware.call("disk.sata_dom_lifetime_left", disk)
if lifetime is not None:
if lifetime <= 0.1:
alerts.append(Alert(SATADOMWearCriticalAlertClass, {
"disk": disk,
"lifetime": int(lifetime * 100 + 0.5),
}))
elif lifetime <= 0.2:
alerts.append(Alert(SATADOMWearWarningAlertClass, {
"disk": disk,
"lifetime": int(lifetime * 100 + 0.5),
}))
return alerts
| 2,038 | Python | .py | 43 | 37.651163 | 110 | 0.643469 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,148 | failover_interfaces.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover_interfaces.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
from middlewared.utils import ProductType
class NoCriticalFailoverInterfaceFoundAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'At Least 1 Network Interface Is Required To Be Marked Critical For Failover'
text = 'At least 1 network interface is required to be marked critical for failover.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverCriticalAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
async def check(self):
if not await self.middleware.call('interface.query', [('failover_critical', '=', True)]):
return [Alert(NoCriticalFailoverInterfaceFoundAlertClass)]
else:
return []
| 1,046 | Python | .py | 21 | 44.904762 | 97 | 0.76055 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,149 | proactive_support.py | truenas_middleware/src/middlewared/middlewared/alert/source/proactive_support.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.utils import ProductType
class ProactiveSupportAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Proactive Support Is Not Configured"
text = "%s"
products = (ProductType.SCALE_ENTERPRISE,)
class ProactiveSupportAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
async def check(self):
webui_page = 'System Settings->General->Support page'
support = await self.middleware.call('support.config')
available = await self.middleware.call('support.is_available')
if available and support['enabled'] is None:
return Alert(ProactiveSupportAlertClass, f'Proactive support is not configured. Review the {webui_page}.')
if support['enabled']:
# This is for people who had ix alert enabled before Proactive Support
# feature and have not filled all the new fields.
unfilled = []
for name, verbose_name in await self.middleware.call('support.fields'):
if not support[name]:
unfilled.append(verbose_name)
if unfilled:
return Alert(
ProactiveSupportAlertClass,
f'Please complete these fields on the {webui_page}: {", ".join(unfilled)}'
)
| 1,646 | Python | .py | 33 | 41 | 118 | 0.68162 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,150 | ipa.py | truenas_middleware/src/middlewared/middlewared/alert/source/ipa.py | import logging
from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, Alert, AlertLevel, AlertSource, SimpleOneShotAlertClass
from middlewared.alert.schedule import IntervalSchedule
from middlewared.plugins.directoryservices import DSStatus, DSType
from middlewared.utils.directoryservices.health import DSHealthObj, IPAHealthError, KRB5HealthError
log = logging.getLogger("ipa_check_alertmod")
class IPADomainBindAlertClass(AlertClass):
category = AlertCategory.DIRECTORY_SERVICE
level = AlertLevel.WARNING
title = "IPA Domain Connection Is Not Healthy"
text = "%(err)s."
class IPADomainBindAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=10))
run_on_backup_node = False
async def check(self):
if DSHealthObj.dstype is not DSType.IPA:
return
if DSHealthObj.status in (DSStatus.JOINING, DSStatus.LEAVING):
return
try:
await self.middleware.call('directoryservices.health.check')
except (KRB5HealthError, IPAHealthError):
# this is potentially recoverable
try:
await self.middleware.call('directoryservices.health.recover')
except Exception as e:
# Recovery failed, generate an alert
return Alert(
IPADomainBindAlertClass,
{'err': str(e)},
key=None
)
except Exception:
# We shouldn't be raising other sorts of errors
self.logger.error("Unexpected error while performing health check.", exc_info=True)
class IPALegacyConfigurationAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.DIRECTORY_SERVICE
level = AlertLevel.WARNING
title = "IPA domain configuration is using LDAP compatibility"
text = (
"Attempt to fully join IPA domain failed. TrueNAS will continue to act as "
"an IPA client but with diminished capabilities including lack of support "
"for kerberos security for NFS and SMB protocols. %(errmsg)s"
)
async def delete(self, alerts, query):
return []
| 2,202 | Python | .py | 47 | 38.276596 | 117 | 0.702287 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,151 | smb.py | truenas_middleware/src/middlewared/middlewared/alert/source/smb.py | import time
from middlewared.alert.base import AlertClass, AlertCategory, Alert, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule
from middlewared.service_exception import ValidationErrors
def generate_alert_text(auth_log):
alert_text = {}
for x in auth_log:
k = f'{x["clientAccount"] or x["becameAccount"]} - '
k += x["workstation"] or x["address"]
if k in alert_text:
alert_text[k]['cnt'] += 1
continue
entry = {
"client": k,
"address": x["address"],
"cnt": 1,
}
alert_text[k] = entry
return [f"{entry['client']} at {entry['address']} ({entry['cnt']} times)" for entry in alert_text.values()]
class SMBLegacyProtocolAlertClass(AlertClass):
category = AlertCategory.SHARING
level = AlertLevel.NOTICE
title = "SMB1 connections to TrueNAS server have been performed in last 24 hours"
text = "The following clients have established SMB1 sessions: %(err)s."
class NTLMv1AuthenticationAlertClass(AlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "NTLMv1 authentication has been attempted in the last 24 hours"
text = "The following clients have attempted NTLMv1 authentication: %(err)s"
class SMBPathAlertClass(AlertClass):
category = AlertCategory.SHARING
level = AlertLevel.CRITICAL
title = "SMB share path has unresolvable issues"
text = "SMB shares have path-related configuration issues that may impact service stability: %(err)s"
class SMBLegacyProtocolAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24 hours
run_on_backup_node = False
async def check(self):
if not await self.middleware.call('service.started', 'cifs'):
return
now = time.time()
if not (auth_log := await self.middleware.call('audit.query', {
'services': ['SMB'],
'query-filters': [
['event', '=', 'AUTHENTICATION'],
['message_timestamp', '>', now - 86400],
['event_data.serviceDescription', '=', 'SMB']
],
'query-options': {'select': [
['event_data.clientAccount', 'clientAccount'],
['event_data.becameAccount', 'becameAccount'],
['event_data.workstation', 'workstation'],
'address'
]}
})):
return
return Alert(
SMBLegacyProtocolAlertClass,
{'err': ', '.join(generate_alert_text(auth_log))},
key=None
)
class NTLMv1AuthenticationAlertSource(AlertSource):
schedule = CrontabSchedule(hour=0) # every 24 hours
run_on_backup_node = False
async def check(self):
if not await self.middleware.call('service.started', 'cifs'):
return
smb_conf = await self.middleware.call('smb.config')
if smb_conf['ntlmv1_auth']:
return
now = time.time()
if not (auth_log := await self.middleware.call('audit.query', {
'services': ['SMB'],
'query-filters': [
['event', '=', 'AUTHENTICATION'],
['message_timestamp', '>', now - 86400],
['event_data.serviceDescription', '=', 'SMB'],
['event_data.passwordType', '=', 'NTLMv1']
],
'query-options': {'select': [
['event_data.clientAccount', 'clientAccount'],
['event_data.becameAccount', 'becameAccount'],
['event_data.workstation', 'workstation'],
'address'
]}
})):
return
return Alert(
NTLMv1AuthenticationAlertClass,
{'err': ', '.join(generate_alert_text(auth_log))},
key=None
)
class SMBPathAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24 hours
run_on_backup_node = False
async def smb_path_alert_format(self, verrors):
errors = []
for e in verrors:
errors.append(f'{e[0].split(":")[0]}: {e[1]}')
return ', '.join(errors)
async def check(self):
verrors = ValidationErrors()
for share in await self.middleware.call('sharing.smb.query', [['enabled', '=', True], ['locked', '=', False]]):
try:
await self.middleware.call(
'sharing.smb.validate_path_field',
share, f'{share["name"]}:', verrors
)
except Exception:
self.middleware.logger.error('Failed to validate path field', exc_info=True)
if not verrors:
return
try:
msg = await self.smb_path_alert_format(verrors)
except Exception:
self.middleware.logger.error('Failed to format error message', exc_info=True)
return
return Alert(SMBPathAlertClass, {'err': msg}, key=None)
| 5,014 | Python | .py | 118 | 32.279661 | 119 | 0.5889 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,152 | legacy_mini_bmc.py | truenas_middleware/src/middlewared/middlewared/alert/source/legacy_mini_bmc.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
from middlewared.utils import ProductType
URL = "https://www.truenas.com/docs/hardware/legacyhardware/miniseries/freenas-minis-2nd-gen/freenasminibmcwatchdog/"
class TrueNASMiniBMCAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "Critical IPMI Firmware Update Available"
text = (
"A critical IPMI firmware update is available for this system. Please see "
f"<a href=\"{URL}\" target=\"_blank\">"
"ASRock Rack C2750D4I BMC Watchdog Issue</a> for details."
)
products = (ProductType.SCALE,)
class TrueNASMiniBMCAlertSource(AlertSource):
products = (ProductType.SCALE,)
async def check(self):
dmi = await self.middleware.call("system.dmidecode_info")
if "freenas" in dmi["system-product-name"].lower() and dmi["baseboard-product-name"] == "C2750D4I":
if (fwver := (await self.middleware.call("ipmi.mc.info")).get("firmware_revision", None)):
try:
fwver = [int(i) for i in fwver.split(".")]
if len(fwver) < 2 or not (fwver[0] == 0 and fwver[1] < 30):
return
except ValueError:
return
return Alert(TrueNASMiniBMCAlertClass)
| 1,376 | Python | .py | 26 | 43.5 | 117 | 0.656739 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,153 | sensors.py | truenas_middleware/src/middlewared/middlewared/alert/source/sensors.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
from middlewared.utils import ProductType
class SensorAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "Sensor Value Is Outside of Working Range"
text = "Sensor %(name)s is %(relative)s %(level)s value: %(value)s %(event)s"
products = (ProductType.SCALE_ENTERPRISE,)
class PowerSupplyAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "Power Supply Error"
text = "%(psu)s is %(state)s showing: %(errors)s"
products = (ProductType.SCALE_ENTERPRISE,)
class SensorsAlertSource(AlertSource):
async def should_alert(self):
if (await self.middleware.call('system.dmidecode_info'))['system-product-name'].startswith('TRUENAS-R'):
# r-series
return True
elif await self.middleware.call('failover.hardware') == 'ECHOWARP':
# m-series
return True
return False
async def check(self):
alerts = []
if not await self.should_alert():
return alerts
for i in await self.middleware.call('ipmi.sensors.query'):
if i['state'] != 'Nominal' and i['reading'] != 'N/A':
if i['type'] == 'Power Supply' and i['event']:
alerts.append(Alert(
PowerSupplyAlertClass,
{'psu': i['name'], 'state': i['state'], 'errors': ', '.join(i['event'])}
))
elif (alert := await self.produce_sensor_alert(i)) is not None:
alerts.append(alert)
return alerts
async def produce_sensor_alert(self, sensor):
reading = sensor['reading']
for key in ('lower-non-recoverable', 'lower-critical', 'lower-non-critical'):
if sensor[key] != 'N/A' and reading < sensor[key]:
relative = 'below'
level = 'recommended' if key == 'lower-non-critical' else 'critical'
return Alert(SensorAlertClass, {
'name': sensor['name'],
'relative': relative,
'level': level,
'value': reading,
'event': ', '.join(sensor['event'])
})
for key in ('upper-non-recoverable', 'upper-critical', 'upper-non-critical'):
if sensor[key] != 'N/A' and reading > sensor[key]:
relative = 'above'
level = 'recommended' if key == 'upper-non-critical' else 'critical'
return Alert(SensorAlertClass, {
'name': sensor['name'],
'relative': relative,
'level': level,
'value': reading,
'event': ', '.join(sensor['event'])
})
| 3,081 | Python | .py | 65 | 35.476923 | 112 | 0.571952 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,154 | ipmi_sel.py | truenas_middleware/src/middlewared/middlewared/alert/source/ipmi_sel.py | from collections import defaultdict
from datetime import datetime, timedelta
from middlewared.alert.base import AlertClass, DismissableAlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.alert.schedule import IntervalSchedule
def remove_deasserted_records(records):
records = records.copy()
assertions = defaultdict(lambda: defaultdict(set))
for i, record in enumerate(records):
event_assertions = assertions[record["name"]][record["event"]]
if record["event_direction"] == "Assertion Event":
event_assertions.add(i)
if record["event_direction"] == "Deassertion Event":
for j in event_assertions:
records[j] = None
records[i] = None
event_assertions.clear()
return list(filter(None, records))
class IPMISELAlertClass(AlertClass, DismissableAlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = "IPMI System Event"
text = "Sensor: '%(name)s' had an '%(event_direction)s' (%(event)s)"
async def dismiss(self, alerts, alert):
datetimes = [a.datetime for a in alerts if a.datetime <= alert.datetime]
if await self.middleware.call("keyvalue.has_key", IPMISELAlertSource.dismissed_datetime_kv_key):
d = await self.middleware.call("keyvalue.get", IPMISELAlertSource.dismissed_datetime_kv_key)
d = d.replace(tzinfo=None)
datetimes.append(d)
await self.middleware.call("keyvalue.set", IPMISELAlertSource.dismissed_datetime_kv_key, max(datetimes))
return [a for a in alerts if a.datetime > alert.datetime]
class IPMISELSpaceLeftAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = "IPMI System Event Log Low Space Left"
text = "IPMI System Event Log low space left: %(free)s (%(used)s)."
class IPMISELAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=5))
dismissed_datetime_kv_key = "alert:ipmi_sel:dismissed_datetime"
async def get_sensor_values(self):
# https://github.com/openbmc/ipmitool/blob/master/include/ipmitool/ipmi_sel.h#L297
sensor_types = (
"Redundancy State",
"Temperature",
"Voltage",
"Current",
"Fan",
"Physical Security",
"Platform Security",
"Processor",
"Power Supply",
"Memory",
"System Firmware Error",
"Critical Interrupt",
"Management Subsystem Health",
"Battery",
)
sensor_events_to_alert_on = (
("Power Unit", "Soft-power control failure"),
("Power Unit", "Failure detected"),
("Power Unit", "Predictive failure"),
("Event Logging Disabled", "Log full"),
("Event Logging Disabled", "Log almost full"),
("System Event", "Undetermined system hardware failure"),
("Cable/Interconnect", "Config Error"),
)
sensor_events_to_ignore = (
("Redundancy State", "Fully Redundant"),
("Processor", "Presence detected"),
("Power Supply", "Presence detected"),
("Power Supply", "Fully Redundant"),
)
return sensor_types, sensor_events_to_alert_on, sensor_events_to_ignore
async def produce_sel_elist_alerts(self):
stypes, do_alert, ignore = await self.get_sensor_values()
records = []
for i in (await (await self.middleware.call("ipmi.sel.elist")).wait()):
found_alert1 = i["type"].startswith(stypes)
found_alert2 = any(i["type"].startswith(s) and i["event"] == e for s, e in do_alert)
ignore_alert = any(i["type"].startswith(s) and i["event"] == e for s, e in ignore)
if (found_alert1 or found_alert2) and not ignore_alert:
try:
i.update({"datetime": datetime.strptime(f"{i['date']}{i['time']}", "%b-%d-%Y%H:%M:%S")})
except ValueError:
# no guarantee of the format that is used in the ipmi sel
continue
else:
records.append(i)
records = remove_deasserted_records(records)
alerts = []
if records:
if await self.middleware.call("keyvalue.has_key", self.dismissed_datetime_kv_key):
dismissed_datetime = (
(await self.middleware.call("keyvalue.get", self.dismissed_datetime_kv_key)).replace(tzinfo=None)
)
else:
# Prevent notifying about existing alerts on first install/upgrade
dismissed_datetime = max(record["datetime"] for record in records)
await self.middleware.call("keyvalue.set", self.dismissed_datetime_kv_key, dismissed_datetime)
alerts_by_key = {}
for record in sorted(
filter(lambda x: x["datetime"] > dismissed_datetime, records),
key=lambda x: x["datetime"],
):
record.pop("id")
dt = record.pop("datetime")
alert = Alert(
IPMISELAlertClass,
{"name": record["name"], "event_direction": record["event_direction"], "event": record["event"]},
key=[record, dt.isoformat()],
datetime=dt,
)
alerts_by_key[alert.key] = alert
alerts = list(alerts_by_key.values())
return alerts
async def produce_sel_low_space_alert(self):
info = (await (await self.middleware.call("ipmi.sel.info")).wait())
alloc_tot = alloc_us = None
if (free_bytes := info.get("free_space_remaining")) is not None:
free_bytes = free_bytes.split(" ", 1)[0]
if (alloc_tot := info.get("number_of_possible_allocation_units")) is not None:
if (alloc_us := info.get("allocation_unit_size")) is not None:
alloc_us = alloc_us.split(" ", 1)[0]
alert = None
upper_threshold = 90 # percent
if all((i is not None and i.isdigit()) for i in (free_bytes, alloc_tot, alloc_us)):
free_bytes = int(free_bytes)
total_bytes_avail = int(alloc_us) * int(alloc_tot)
used_bytes = total_bytes_avail - free_bytes
if (used_bytes / 100) > upper_threshold:
alert = Alert(
IPMISELSpaceLeftAlertClass,
{"free": f"{free_bytes} bytes free", "used": f"{used_bytes} bytes used"},
key=None,
)
return alert
async def check(self):
if not await self.middleware.call("truenas.is_ix_hardware"):
return
alerts = []
alerts.extend(await self.produce_sel_elist_alerts())
platform = await self.middleware.call('truenas.get_chassis_hardware')
if platform.startswith(('TRUENAS-F', 'TRUENAS-H', 'TRUENAS-R30')):
# the f, h and r30 platforms use a FIFO for sel so it will
# never "run out of space" since the newest log overwrites
# the oldest log
return alerts
if (low_space_alert := await self.produce_sel_low_space_alert()) is not None:
alerts.append(low_space_alert)
return alerts
| 7,425 | Python | .py | 149 | 38.073826 | 117 | 0.593875 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,155 | certificates.py | truenas_middleware/src/middlewared/middlewared/alert/source/certificates.py | from datetime import datetime
from middlewared.alert.base import AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.alert.schedule import CrontabSchedule
from middlewared.utils.time_utils import utc_now
class CertificateIsExpiringAlertClass(AlertClass):
category = AlertCategory.CERTIFICATES
level = AlertLevel.NOTICE
title = "Certificate Is Expiring"
text = "Certificate %(name)r is expiring within %(days)d days."
class CertificateIsExpiringSoonAlertClass(AlertClass):
category = AlertCategory.CERTIFICATES
level = AlertLevel.WARNING
title = "Certificate Is Expiring Soon"
text = "Certificate %(name)r is expiring within %(days)d days."
class CertificateExpiredAlertClass(AlertClass):
category = AlertCategory.CERTIFICATES
level = AlertLevel.CRITICAL
title = "Certificate Has Expired"
text = "Certificate %(name)r has expired."
class CertificateParsingFailedAlertClass(AlertClass):
category = AlertCategory.CERTIFICATES
level = AlertLevel.WARNING
title = "Certificate Parsing Failed"
text = "Failed to parse %(type)s %(name)r."
class CertificateRevokedAlertClass(AlertClass):
category = AlertCategory.CERTIFICATES
level = AlertLevel.CRITICAL
title = 'Certificate Revoked'
text = '%(service)s %(type)s has been revoked. Please replace the certificate immediately.'
class WebUiCertificateSetupFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
# this is consumed in nginx.conf in the etc plugin
# you don't have to specify the `AlertClass` verbiage
# of the class name when calling it
category = AlertCategory.CERTIFICATES
level = AlertLevel.CRITICAL
title = "Web UI HTTPS Certificate Setup Failed"
text = "Web UI HTTPS certificate setup failed."
class CertificateChecksAlertSource(AlertSource):
schedule = CrontabSchedule(hour=0) # every 24 hours
run_on_backup_node = False
async def _get_service_certs(self):
_type = 'certificate'
service_certs = [
{
'id': (await self.middleware.call('ftp.config'))['ssltls_certificate'],
'service': 'FTP',
'type': _type,
},
{
'id': (await self.middleware.call('system.general.config'))['ui_certificate']['id'],
'service': 'Web UI',
'type': _type,
},
{
'id': (await self.middleware.call('system.advanced.config'))['syslog_tls_certificate'],
'service': 'Syslog',
'type': _type,
},
]
return service_certs
async def check(self):
alerts = []
# system certs/cas
certs = await self.middleware.call('certificate.query', [['certificate', '!=', None]])
certs.extend(await self.middleware.call('certificateauthority.query'))
# service certs/cas
check_for_revocation = await self._get_service_certs()
parsed = {}
for cert in certs:
# make the sure certs have been parsed correctly
if not cert['parsed']:
alerts.append(Alert(
CertificateParsingFailedAlertClass,
{"type": cert["cert_type"].capitalize(), "name": cert["name"]},
))
else:
# check the parsed certificate(s) for expiration
if cert['cert_type'].capitalize() == 'CERTIFICATE':
diff = (datetime.strptime(cert['until'], '%a %b %d %H:%M:%S %Y') - utc_now()).days
if diff < 10:
if diff >= 0:
alerts.append(Alert(
CertificateIsExpiringSoonAlertClass if diff <= 2 else CertificateIsExpiringAlertClass,
{'name': cert['name'], 'days': diff}, key=[cert['name']],
))
else:
alerts.append(Alert(
CertificateExpiredAlertClass,
{'name': cert['name']}, key=[cert['name']]
))
parsed[cert['id']] = cert['revoked']
# check the parsed certificate(s) for revocation
for i in filter(lambda i: parsed.get(i['id']), check_for_revocation):
alerts.append(Alert(
CertificateRevokedAlertClass,
{'service': i['service'], 'type': i['type']}
))
return alerts
| 4,589 | Python | .py | 98 | 35.387755 | 118 | 0.604073 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,156 | admin_user.py | truenas_middleware/src/middlewared/middlewared/alert/source/admin_user.py | from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, IntervalSchedule
from middlewared.plugins.account import ADMIN_UID
from middlewared.service_exception import MatchNotFound
class AdminUserIsOverriddenAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Admin User Is Overridden"
text = "NSS query results are different for the locally set up `%(username)s` user."
class AdminUserAlertSource(AlertSource):
"""
There are ways (unsupported) via auxiliary parameters that users can intentionally enable mappings for LDAP and AD
that go below UID 1000.
"""
schedule = IntervalSchedule(timedelta(hours=24))
async def check(self):
try:
admin = await self.middleware.call(
"datastore.query",
"account.bsdusers",
[
["uid", "=", ADMIN_UID],
],
{"get": True, "prefix": "bsdusr_"}
)
except MatchNotFound:
return
user_obj = await self.middleware.call("user.get_user_obj", {"uid": ADMIN_UID})
if (
(user_obj["pw_name"] != admin["username"]) or
(user_obj["pw_gid"] != admin["group"]["bsdgrp_gid"]) or
(user_obj["pw_gecos"] != admin["full_name"]) or
(user_obj["pw_dir"] != admin["home"])
):
return Alert(AdminUserIsOverriddenAlertClass, {"username": admin["username"]})
| 1,569 | Python | .py | 35 | 35.228571 | 118 | 0.623607 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,157 | lagg.py | truenas_middleware/src/middlewared/middlewared/alert/source/lagg.py | from collections import defaultdict
try:
import netif
except ImportError:
netif = None
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
class LAGGInactivePortsAlertClass(AlertClass):
category = AlertCategory.NETWORK
level = AlertLevel.CRITICAL
title = "Ports are Not ACTIVE on LAGG Interface"
text = "These ports are not ACTIVE on LAGG interface %(name)s: %(ports)s. Please check cabling and switch."
class LAGGNoActivePortsAlertClass(AlertClass):
category = AlertCategory.NETWORK
level = AlertLevel.CRITICAL
title = "There are No ACTIVE Ports on LAGG Interface"
text = "There are no ACTIVE ports on LAGG interface %(name)s. Please check cabling and switch."
class LAGGStatus(ThreadedAlertSource):
count = defaultdict(int)
def check_sync(self):
if not netif:
return []
alerts = []
for iface in netif.list_interfaces().values():
if not isinstance(iface, netif.LaggInterface):
continue
active = []
inactive = []
for name, flags in iface.ports:
if netif.LaggPortFlags.ACTIVE not in flags:
inactive.append(name)
else:
active.append(name)
# ports that are not ACTIVE and LACP
if inactive and iface.protocol == netif.AggregationProtocol.LACP:
# Only alert if this has happened more than twice, see #24160
self.count[iface.name] += 1
if self.count[iface.name] > 2:
alerts.append(Alert(
LAGGInactivePortsAlertClass,
{"name": iface.name, "ports": ", ".join(inactive)},
))
# For FAILOVER protocol we should have one ACTIVE port
elif len(active) != 1 and iface.protocol == netif.AggregationProtocol.FAILOVER:
# Only alert if this has happened more than twice, see #24160
self.count[iface.name] += 1
if self.count[iface.name] > 2:
alerts.append(Alert(
LAGGNoActivePortsAlertClass,
{"name": iface.name},
))
else:
self.count[iface.name] = 0
return alerts
| 2,380 | Python | .py | 53 | 32.830189 | 111 | 0.598446 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,158 | usb_storage.py | truenas_middleware/src/middlewared/middlewared/alert/source/usb_storage.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from pathlib import Path
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.utils import ProductType
class USBStorageAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'A USB Storage Device Has Been Connected to This System'
text = ('A USB storage device %r has been connected to this system. Please remove that USB device to '
'prevent problems with system boot or HA failover.')
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class USBStorageAlertSource(ThreadedAlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
def check_sync(self):
alerts = []
for usb in filter(lambda x: x.stem.startswith('usb-'), Path('/dev/disk/by-id').iterdir()):
if '-part' not in usb.as_posix():
alerts.append(Alert(USBStorageAlertClass, usb.resolve().as_posix()))
return alerts
| 1,163 | Python | .py | 23 | 45.086957 | 106 | 0.734334 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,159 | vmware_snapshot.py | truenas_middleware/src/middlewared/middlewared/alert/source/vmware_snapshot.py | from middlewared.alert.base import AlertClass, OneShotAlertClass, AlertCategory, AlertLevel, Alert
class VMWareSnapshotCreateFailedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.WARNING
title = "Creating VMWare Snapshot Failed"
text = "Creating VMWare snapshot %(snapshot)s of VM %(vm)s at %(hostname)s failed: %(error)s."
deleted_automatically = False
async def create(self, args):
return Alert(VMWareSnapshotCreateFailedAlertClass, args)
async def delete(self, alerts, query):
pass
class VMWareSnapshotDeleteFailedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.WARNING
title = "VMWare Snapshot Deletion Failed"
text = "Deletion of VMWare snapshot %(snapshot)s of VM %(vm)s on %(hostname)s failed: %(error)s."
deleted_automatically = False
async def create(self, args):
return Alert(VMWareSnapshotDeleteFailedAlertClass, args)
async def delete(self, alerts, query):
pass
| 1,062 | Python | .py | 21 | 44.904762 | 101 | 0.753637 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,160 | kmip.py | truenas_middleware/src/middlewared/middlewared/alert/source/kmip.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel
class KMIPConnectionFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.KMIP
level = AlertLevel.CRITICAL
title = 'Failed to Communicate with KMIP Server'
text = 'Failed to connect to %(server)s KMIP Server: %(error)s.'
deleted_automatically = False
class KMIPZFSDatasetsSyncFailureAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.KMIP
level = AlertLevel.CRITICAL
title = 'Failed to Sync ZFS Keys with KMIP Server'
text = 'Failed to sync %(datasets)s dataset(s) keys with KMIP Server.'
deleted_automatically = False
class KMIPSEDDisksSyncFailureAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.KMIP
level = AlertLevel.CRITICAL
title = 'Failed to Sync SED Keys with KMIP Server'
text = 'Failed to sync %(disks)s disk(s) keys with KMIP Server.'
deleted_automatically = False
class KMIPSEDGlobalPasswordSyncFailureAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.KMIP
level = AlertLevel.CRITICAL
title = 'Failed to Sync SED Global Password with KMIP Server'
text = 'Failed to sync SED global password with KMIP Server.'
deleted_automatically = False
| 1,506 | Python | .py | 29 | 47.724138 | 97 | 0.784836 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,161 | deprecated_service.py | truenas_middleware/src/middlewared/middlewared/alert/source/deprecated_service.py | from middlewared.alert.base import Alert, AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel
URL = "https://www.truenas.com/docs/scale/scaledeprecatedfeatures/"
class DeprecatedServiceAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "Deprecated Service is Running"
text = (
"The following active service is deprecated %(service)s. "
"This service is scheduled for removal in a future version of SCALE. "
f"Before upgrading, please check {URL} to confirm whether or not "
"the service has been removed in the next version of SCALE."
)
async def create(self, args):
return Alert(DeprecatedServiceAlertClass, args, key=args['service'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.args['service'] != query,
alerts
))
| 941 | Python | .py | 19 | 42.578947 | 104 | 0.712105 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,162 | failover_nics.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover_nics.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.utils import ProductType
TITLE = 'Missing Network Interface On '
TEXT = 'Network interfaces %(interfaces)s present on '
class NetworkCardsMismatchOnStandbyNodeAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = TITLE + 'Standby Storage Controller'
text = TEXT + 'active storage controller but missing on standby storage controller.'
products = (ProductType.SCALE_ENTERPRISE,)
class NetworkCardsMismatchOnActiveNodeAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = TITLE + 'Active Storage Controller'
text = TEXT + 'standby storage controller but missing on active storage controller.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverNetworkCardsAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
async def check(self):
if (interfaces := await self.middleware.call('failover.mismatch_nics')):
if interfaces['missing_remote']:
return [Alert(
NetworkCardsMismatchOnStandbyNodeAlertClass, {'interfaces': ', '.join(interfaces['missing_remote'])}
)]
if interfaces['missing_local']:
return [Alert(
NetworkCardsMismatchOnActiveNodeAlertClass, {'interfaces': ', '.join(interfaces['missing_local'])}
)]
return []
| 1,740 | Python | .py | 35 | 42.742857 | 120 | 0.71875 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,163 | jbof.py | truenas_middleware/src/middlewared/middlewared/alert/source/jbof.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import datetime
from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource, SimpleOneShotAlertClass
from middlewared.alert.schedule import IntervalSchedule
from middlewared.plugins.enclosure_.enums import ElementStatus, ElementType
from middlewared.utils import ProductType
class JBOFTearDownFailureAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = "JBOF removal may require reboot"
text = "Incomplete removal of JBOF requires a reboot to cleanup."
async def delete(self, alerts, query):
return []
class JBOFRedfishCommAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'Failed to Communicate with JBOF'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) Failed to communicate with redfish interface.'
products = (ProductType.SCALE_ENTERPRISE,)
class JBOFInvalidDataAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'JBOF has invalid data'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) does not provide valid data for: %(keys)s'
products = (ProductType.SCALE_ENTERPRISE,)
class JBOFElementWarningAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'JBOF element non-critical'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) %(etype)s %(key)s is noncritical: %(value)s'
products = (ProductType.SCALE_ENTERPRISE,)
class JBOFElementCriticalAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'JBOF element critical'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) %(etype)s %(key)s is critical: %(value)s'
products = (ProductType.SCALE_ENTERPRISE,)
class JBOFAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
schedule = IntervalSchedule(datetime.timedelta(minutes=5))
def produce_alerts(self, jbof_config, jbof_data, alerts):
for jbof in jbof_config:
jbof_id_dict = {'desc': jbof['description'], 'ip1': jbof['mgmt_ip1'], 'ip2': jbof['mgmt_ip2']}
data = None
# First check that each configured JBOF has enclosure data returned.
for _data in jbof_data:
if jbof['uuid'] == _data['id']:
# Matched UUID
data = _data
break
if data is None:
# Did not find data for this JBOF
alerts.append(Alert(JBOFRedfishCommAlertClass, jbof_id_dict))
continue
# Make sure the data seems to have the correct shape
elements = data.get('elements')
if not elements or not isinstance(elements, dict):
alerts.append(Alert(JBOFInvalidDataAlertClass, {'keys': 'elements'} | jbof_id_dict))
continue
bad_keys = []
for etype in ElementType:
if edata := elements.get(etype.value):
if not isinstance(edata, dict):
bad_keys.append(etype.value)
continue
for key, v in edata.items():
match v['status']:
case ElementStatus.NONCRITICAL.value:
alerts.append(Alert(JBOFElementWarningAlertClass, {'etype': etype.value,
'key': key,
'value': v.get('value', '')
} | jbof_id_dict))
case ElementStatus.CRITICAL.value:
alerts.append(Alert(JBOFElementCriticalAlertClass, {'etype': etype.value,
'key': key,
'value': v.get('value', '')
} | jbof_id_dict))
case _:
pass
if bad_keys:
alerts.append(Alert(JBOFInvalidDataAlertClass, {'keys': ','.join(bad_keys)} | jbof_id_dict))
async def check(self):
alerts = []
jbof_config = await self.middleware.call('jbof.query')
if jbof_config:
jbof_data = await self.middleware.call('enclosure2.map_jbof')
self.produce_alerts(jbof_config, jbof_data, alerts)
return alerts
| 4,944 | Python | .py | 92 | 38.315217 | 117 | 0.570393 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,164 | license_status.py | truenas_middleware/src/middlewared/middlewared/alert/source/license_status.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from collections import defaultdict
from datetime import date, timedelta
import textwrap
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.alert.schedule import IntervalSchedule
from middlewared.utils import ProductType
from middlewared.utils.license import LICENSE_ADDHW_MAPPING
class LicenseAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "TrueNAS License Issue"
text = "%s"
products = (ProductType.SCALE_ENTERPRISE,)
class LicenseIsExpiringAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "TrueNAS License Is Expiring"
text = "%s"
products = (ProductType.SCALE_ENTERPRISE,)
class LicenseHasExpiredAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "TrueNAS License Has Expired"
text = "%s"
products = (ProductType.SCALE_ENTERPRISE,)
class LicenseStatusAlertSource(ThreadedAlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
schedule = IntervalSchedule(timedelta(hours=24))
def check_sync(self):
alerts = []
local_license = self.middleware.call_sync('system.license')
if local_license is None:
return Alert(LicenseAlertClass, "Your TrueNAS has no license, contact support.")
# check if this node's system serial matches the serial in the license
local_serial = self.middleware.call_sync('system.dmidecode_info')['system-serial-number']
if local_serial not in (local_license['system_serial'], local_license['system_serial_ha']):
alerts.append(Alert(LicenseAlertClass, 'System serial does not match license.'))
standby_license = standby_serial = None
try:
if local_license['system_serial_ha']:
standby_license = self.middleware.call_sync('failover.call_remote', 'system.license')
standby_serial = self.middleware.call_sync(
'failover.call_remote', 'system.dmidecode_info')['system-serial-number']
except Exception:
pass
if standby_license and standby_serial is not None:
# check if the remote node's system serial matches the serial in the license
if standby_serial not in (standby_license['system_serial'], standby_license['system_serial_ha']):
alerts.append(Alert(LicenseAlertClass, 'System serial of standby node does not match license.',))
model = self.middleware.call_sync('truenas.get_chassis_hardware').removeprefix('TRUENAS-').split('-')[0]
if model == 'UNKNOWN':
alerts.append(Alert(LicenseAlertClass, 'TrueNAS is running on unsupported hardware.'))
elif any((
# f-series has 2 license models F60 and F60-NR (NR is single-node only)
(local_license['model'][0] == 'F' and model != local_license['model'].split('-')[0]),
(model != local_license['model'])
)):
alerts.append(Alert(
LicenseAlertClass,
(
f'Your license was issued for model {local_license["model"]!r} '
f'but the system was detected as model {model!r}'
)
))
enc_nums = defaultdict(lambda: 0)
for enc in filter(lambda x: not x['controller'], self.middleware.call_sync('enclosure2.query')):
enc_nums[enc['model']] += 1
if local_license['addhw']:
for quantity, code in local_license['addhw']:
if code not in LICENSE_ADDHW_MAPPING:
self.middleware.logger.warning('Unknown additional hardware code %d', code)
continue
name = LICENSE_ADDHW_MAPPING[code]
if name == 'ES60':
continue
if enc_nums[name] != quantity:
alerts.append(Alert(
LicenseAlertClass,
(
'License expects %(license)s units of %(name)s Expansion shelf but found %(found)s.' % {
'license': quantity,
'name': name,
'found': enc_nums[name]
}
)
))
elif enc_nums:
alerts.append(Alert(
LicenseAlertClass,
'Unlicensed Expansion shelf detected. This system is not licensed for additional expansion shelves.'
))
for days in [0, 14, 30, 90, 180]:
if local_license['contract_end'] <= date.today() + timedelta(days=days):
serial_numbers = ", ".join(list(filter(None, [local_license['system_serial'],
local_license['system_serial_ha']])))
contract_start = local_license['contract_start'].strftime("%B %-d, %Y")
contract_expiration = local_license['contract_end'].strftime("%B %-d, %Y")
contract_type = local_license['contract_type'].lower()
customer_name = local_license['customer_name']
if days == 0:
alert_klass = LicenseHasExpiredAlertClass
alert_text = textwrap.dedent("""\
SUPPORT CONTRACT EXPIRATION. To reactivate and continue to receive technical support and
assistance, contact iXsystems @ telephone: 1-855-473-7449
""")
subject = "Your TrueNAS support contract has expired"
opening = textwrap.dedent("""\
As of today, your support contract has ended. You will no longer be eligible for technical
support and assistance for your TrueNAS system.
""")
encouraging = textwrap.dedent("""\
It is still not too late to renew your contract but you must do so as soon as possible by
contacting your authorized TrueNAS Reseller or iXsystems (sales@iXsystems.com) today to avoid
additional costs and lapsed-contract fees.
""")
else:
alert_klass = LicenseIsExpiringAlertClass
alert_text = textwrap.dedent(f"""\
RENEW YOUR SUPPORT contract. To continue to receive technical support and assistance without
any service interruptions, please renew your support contract by {contract_expiration}.
""")
days_left = (local_license['contract_end'] - date.today()).days
subject = f"Your TrueNAS support contract will expire in {days_left} days"
if days == 14:
opening = textwrap.dedent(f"""\
This is the final reminder regarding the impending expiration of your TrueNAS
{contract_type} support contract. As of today, it is set to expire in 2 weeks.
""")
encouraging = textwrap.dedent("""\
We encourage you to urgently contact your authorized TrueNAS Reseller or iXsystems
(sales@iXsystems.com) directly to renew your contract before expiration so that you continue
to enjoy the peace of mind and benefits that come with our support contracts.
""")
else:
opening = textwrap.dedent(f"""\
Your TrueNAS {contract_type} support contract will expire in {days_left} days.
When that happens, technical support and assistance for this particular TrueNAS storage
array will no longer be available. Please review the wide array of services that are
available to you as an active support contract customer at:
https://www.ixsystems.com/support/ and click on the “TrueNAS Arrays” tab.
""")
encouraging = textwrap.dedent("""\
We encourage you to contact your authorized TrueNAS Reseller or iXsystems directly
(sales@iXsystems.com) to renew your contract before expiration. Doing so ensures that
you continue to enjoy the peace of mind and benefits that come with support coverage.
""")
alerts.append(Alert(
alert_klass,
alert_text,
mail={
"cc": ["support-renewal@ixsystems.com"],
"subject": subject,
"text": textwrap.dedent("""\
Hello, {customer_name}
{opening}
Product: {chassis_hardware}
Serial Numbers: {serial_numbers}
Support Contract Start Date: {contract_start}
Support Contract Expiration Date: {contract_expiration}
{encouraging}
If the contract expires, you will still be able to access your TrueNAS systems. However,
you will no longer be eligible for support from iXsystems. If you choose to renew your
support contract after it has expired, there are additional costs associated with
contract reactivation and lapsed-contract fees.
Sincerely,
iXsystems
Web: support.iXsystems.com
Email: support@iXsystems.com
Telephone: 1-855-473-7449
""").format(**{
"customer_name": customer_name,
"opening": opening,
"chassis_hardware": model,
"serial_numbers": serial_numbers,
"contract_start": contract_start,
"contract_expiration": contract_expiration,
"encouraging": encouraging,
})
},
))
break
return alerts
| 10,885 | Python | .py | 187 | 40.160428 | 120 | 0.55617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,165 | auth.py | truenas_middleware/src/middlewared/middlewared/alert/source/auth.py | from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule
from middlewared.utils import ProductType
from middlewared.utils.audit import UNAUTHENTICATED
from time import time
class AdminSessionAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Administrator account activity"
text = (
"The root or default system administrator account was used to authenticate "
"to the UI / API %(count)d times in the last 24 hours:\n%(sessions)s.\n"
"To improve security, create one or more administrator accounts (see "
"[documentation](https://www.truenas.com/docs/scale/scaletutorials/credentials/adminroles/)) "
"with unique usernames and passwords and disable password access for default "
"administrator accounts (**root**, **admin**, or **truenas_admin**)."
)
class APIFailedLoginAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "API Login Failures"
text = (
"%(count)d API login failures in the last 24 hours:\n%(sessions)s"
)
def audit_entry_to_msg(entry):
return (
f'(username={entry["username"]},'
f'session_id={entry["session"]},'
f'address={entry["address"]})'
)
class AdminSessionAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24 hours
run_on_backup_node = True
products = (ProductType.SCALE_ENTERPRISE,)
async def check(self):
now = int(time())
admin_logins = await self.middleware.call('audit.query', {
'services': ['MIDDLEWARE'],
'query-filters': [
['message_timestamp', '>', now - 86400],
['event', '=', 'AUTHENTICATION'],
['username', 'in', ['root', 'admin', 'truenas_admin']],
['success', '=', True]
],
'query-options': {
'select': [
'message_timestamp',
'event',
'session',
'username',
'address',
'success'
]
}
})
if not admin_logins:
return
audit_msg = ','.join([audit_entry_to_msg(entry) for entry in admin_logins])
return Alert(
AdminSessionAlertClass,
{'count': len(admin_logins), 'sessions': audit_msg},
key=None
)
class APIFailedLoginAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24 hours
run_on_backup_node = True
async def check(self):
now = int(time())
auth_failures = await self.middleware.call('audit.query', {
'services': ['MIDDLEWARE'],
'query-filters': [
['message_timestamp', '>', now - 86400],
['event', '=', 'AUTHENTICATION'],
['username', '!=', UNAUTHENTICATED],
['success', '=', False]
],
'query-options': {
'select': [
'message_timestamp',
'event',
'session',
'username',
'address',
'success'
]
}
})
if not auth_failures:
return
audit_msg = ','.join([audit_entry_to_msg(entry) for entry in auth_failures])
return Alert(
APIFailedLoginAlertClass,
{'count': len(auth_failures), 'sessions': audit_msg},
key=None
)
| 3,686 | Python | .py | 95 | 28.010526 | 102 | 0.55801 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,166 | applications.py | truenas_middleware/src/middlewared/middlewared/alert/source/applications.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class FailuresInAppMigrationAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.APPLICATIONS
level = AlertLevel.ERROR
title = 'App(s) failed to migrate'
text = 'App(s) failed to migrate, please check /var/log/app_migrations.log for more details.'
async def create(self, args):
return Alert(FailuresInAppMigrationAlertClass, args)
async def delete(self, alerts, query):
return []
class ApplicationsConfigurationFailedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.CRITICAL
category = AlertCategory.APPLICATIONS
title = 'Unable to Configure Applications'
text = 'Failed to configure docker for Applications: %(error)s'
async def create(self, args):
return Alert(ApplicationsConfigurationFailedAlertClass, args)
async def delete(self, alerts, query):
return []
class ApplicationsStartFailedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.CRITICAL
category = AlertCategory.APPLICATIONS
title = 'Unable to Start Applications'
text = 'Failed to start docker for Applications: %(error)s'
async def create(self, args):
return Alert(ApplicationsStartFailedAlertClass, args)
async def delete(self, alerts, query):
return []
class AppUpdateAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.APPLICATIONS
level = AlertLevel.INFO
title = 'Application Update Available'
text = 'An update is available for "%(name)s" application.'
async def create(self, args):
return Alert(AppUpdateAlertClass, args, _key=args['name'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != query,
alerts
))
| 2,015 | Python | .py | 44 | 39.659091 | 98 | 0.738351 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,167 | pools.py | truenas_middleware/src/middlewared/middlewared/alert/source/pools.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class PoolUSBDisksAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = 'Pool consuming USB disks'
text = '%(pool)r is consuming USB devices %(disks)r which is not recommended.'
async def get_usb_disks(self, pool_name, disks):
try:
return [disk for disk in filter(
lambda d: d in disks and disks[d]['bus'] == 'USB',
await self.middleware.call('zfs.pool.get_disks', pool_name)
)]
except Exception:
return []
async def create(self, args):
pool_name = args['pool_name']
disks = args['disks']
if usb_disks := await self.get_usb_disks(pool_name, disks):
return Alert(PoolUSBDisksAlertClass, {'pool': pool_name, 'disks': ', '.join(usb_disks)}, key=pool_name)
async def delete(self, alerts, query):
return list(filter(lambda x: x.args['pool'] != query, alerts))
class PoolUpgradedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.NOTICE
title = "New Feature Flags Are Available for Pool"
text = (
"New ZFS version or feature flags are available for pool '%s'. Upgrading pools is a one-time process that can "
"prevent rolling the system back to an earlier TrueNAS version. It is recommended to read the TrueNAS release "
"notes and confirm you need the new ZFS feature flags before upgrading a pool."
)
async def is_upgraded(self, pool_name):
try:
return await self.middleware.call('zfs.pool.is_upgraded', pool_name)
except Exception:
return
async def create(self, args):
pool = args['pool_name']
if pool == await self.middleware.call('boot.pool_name'):
# We don't want this alert for the boot pool as it has certain features disabled by design
return
if await self.is_upgraded(pool) is False:
# only alert if it's False explicitly since None means
# the pool couldn't be found
return Alert(PoolUpgradedAlertClass, pool, key=pool)
async def delete(self, alerts, query):
return list(filter(lambda x: x.args != query, alerts))
| 2,374 | Python | .py | 46 | 42.891304 | 119 | 0.661631 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,168 | enclosure_status.py | truenas_middleware/src/middlewared/middlewared/alert/source/enclosure_status.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from dataclasses import dataclass
from middlewared.utils import ProductType
from middlewared.alert.base import (
AlertClass,
AlertCategory,
AlertLevel,
Alert,
AlertSource,
)
@dataclass(slots=True, frozen=True, kw_only=True)
class BadElement:
enc_name: str
descriptor: str
status: str
value: str
value_raw: int
def args(self):
return [self.enc_name, self.descriptor, self.status, self.value, self.value_raw]
class EnclosureUnhealthyAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "Enclosure Status Is Not Healthy"
text = 'Enclosure (%s): Element "%s" is reporting a status of "%s" with a value of "%s". (raw value "%s")'
products = (ProductType.SCALE_ENTERPRISE,)
class EnclosureHealthyAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.INFO
title = "Enclosure Status Is Healthy"
text = "Enclosure (%s) is healthy."
products = (ProductType.SCALE_ENTERPRISE,)
class EnclosureStatusAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
bad = ("critical", "noncritical", "unknown", "unrecoverable")
bad_elements: list | list[tuple[BadElement, int]] = list()
async def should_report(self, ele_type: str, ele_value: dict[str]):
"""We only want to raise an alert for an element's status
if it meets a certain criteria"""
if not ele_value["value"]:
# if we don't have an actual value, doesn't
# matter what status the element is reporting
# we'll skip it so we don't raise alarm to
# end-user unnecessarily
return False
elif ele_value["status"].lower() not in self.bad:
return False
return True
async def check(self):
good_enclosures, bad_elements = [], []
for enc in await self.middleware.call("enclosure2.query"):
good_enclosures.append([f"{enc['name']} (id: {enc['id']})"])
enc["elements"].pop("Array Device Slot") # dont care about disk slots
for element_type, element_values in enc["elements"].items():
for ele_value in element_values.values():
if await self.should_report(element_type, ele_value):
current_bad_element = BadElement(
enc_name=enc["name"],
descriptor=ele_value["descriptor"],
status=ele_value["status"],
value=ele_value["value"],
value_raw=ele_value["value_raw"],
)
for previous_bad_element, count in self.bad_elements:
if previous_bad_element == current_bad_element:
bad_elements.append((current_bad_element, count + 1))
break
else:
bad_elements.append((current_bad_element, 1))
self.bad_elements = bad_elements
alerts = []
for current_bad_element, count in bad_elements:
# We only report unhealthy enclosure elements if
# they were unhealthy 5 probes in a row (1 probe = 1 minute)
if count >= 5:
try:
good_enclosures.remove(current_bad_element.enc_name)
except ValueError:
pass
alerts.append(
Alert(EnclosureUnhealthyAlertClass, args=current_bad_element.args())
)
for enclosure in good_enclosures:
alerts.append(Alert(EnclosureHealthyAlertClass, args=enclosure))
return alerts
| 4,002 | Python | .py | 89 | 33.786517 | 110 | 0.604108 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,169 | active_directory.py | truenas_middleware/src/middlewared/middlewared/alert/source/active_directory.py | from datetime import timedelta
import errno
import logging
from middlewared.alert.base import AlertClass, AlertCategory, Alert, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule, IntervalSchedule
from middlewared.plugins.directoryservices import DSStatus
from middlewared.service_exception import CallError
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.health import DSHealthObj, ADHealthError, KRB5HealthError
log = logging.getLogger("activedirectory_check_alertmod")
class ActiveDirectoryDomainBindAlertClass(AlertClass):
category = AlertCategory.DIRECTORY_SERVICE
level = AlertLevel.WARNING
title = "Active Directory Bind Is Not Healthy"
text = "%(wberr)s."
class ActiveDirectoryDomainHealthAlertClass(AlertClass):
category = AlertCategory.DIRECTORY_SERVICE
level = AlertLevel.WARNING
title = "Active Directory Domain Validation Failed"
text = "Domain validation failed with error: %(verrs)s"
class ActiveDirectoryDomainHealthAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1)
run_on_backup_node = False
async def check(self):
if DSHealthObj.dstype is not DSType.AD:
return
conf = await self.middleware.call("activedirectory.config")
try:
await self.middleware.call("activedirectory.check_nameservers", conf["domainname"], conf["site"])
except CallError as e:
return Alert(
ActiveDirectoryDomainHealthAlertClass,
{'verrs': e.errmsg},
key=None
)
class ActiveDirectoryDomainBindAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=10))
run_on_backup_node = False
async def check(self):
if DSHealthObj.dstype is not DSType.AD:
return
if DSHealthObj.status in (DSStatus.JOINING, DSStatus.LEAVING):
return
try:
await self.middleware.call('directoryservices.health.check')
except (KRB5HealthError, ADHealthError):
# this is potentially recoverable
try:
await self.middleware.call('directoryservices.health.recover')
except Exception as e:
# Recovery failed, generate an alert
return Alert(
ActiveDirectoryDomainBindAlertClass,
{'wberr': str(e)},
key=None
)
except Exception:
# We shouldn't be raising other sorts of errors
self.logger.error("Unexpected error while performing health check.", exc_info=True)
| 2,681 | Python | .py | 59 | 36.661017 | 109 | 0.70234 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,170 | replication.py | truenas_middleware/src/middlewared/middlewared/alert/source/replication.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
class SnapshotFailedAlertClass(AlertClass):
category = AlertCategory.TASKS
level = AlertLevel.CRITICAL
title = "Snapshot Task Failed"
text = "Snapshot Task For Dataset \"%(name)s\" failed: %(message)s."
class ReplicationSuccessAlertClass(AlertClass):
category = AlertCategory.TASKS
level = AlertLevel.INFO
title = "Replication Succeeded"
text = "Replication \"%(name)s\" succeeded."
class ReplicationFailedAlertClass(AlertClass):
category = AlertCategory.TASKS
level = AlertLevel.CRITICAL
title = "Replication Failed"
text = "Replication \"%(name)s\" failed: %(message)s."
class ReplicationAlertSource(AlertSource):
async def check(self):
alerts = []
for snapshottask in await self.middleware.call("pool.snapshottask.query", [["enabled", "=", True]]):
if snapshottask["state"]["state"] == "ERROR":
alerts.append(
Alert(
SnapshotFailedAlertClass,
{
"name": snapshottask["dataset"],
"message": snapshottask["state"]["error"],
},
key=[snapshottask["id"], snapshottask["state"]["datetime"].isoformat()],
datetime=snapshottask["state"]["datetime"],
)
)
for replication in await self.middleware.call("replication.query", [["enabled", "=", True]]):
if replication["state"]["state"] == "FINISHED":
alerts.append(
Alert(
ReplicationSuccessAlertClass,
{
"name": replication["name"],
},
key=[replication["id"], replication["state"]["datetime"].isoformat()],
datetime=replication["state"]["datetime"],
)
)
if replication["state"]["state"] == "ERROR":
alerts.append(
Alert(
ReplicationFailedAlertClass,
{
"name": replication["name"],
"message": replication["state"]["error"],
},
key=[replication["id"], replication["state"]["datetime"].isoformat()],
datetime=replication["state"]["datetime"],
)
)
return alerts
| 2,637 | Python | .py | 57 | 30.421053 | 108 | 0.512451 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,171 | api_key.py | truenas_middleware/src/middlewared/middlewared/alert/source/api_key.py | import json
from middlewared.alert.base import Alert, AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel
class ApiKeyRevokedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "API Key Revoked"
text = (
"%(key_name)s: API key has been revoked and must either be renewed or deleted. "
"Once the maintenance is complete, API client configuration must be updated to "
"use the renwed API key."
)
async def create(self, args):
return Alert(ApiKeyRevokedAlertClass, args, key=args['key_name'])
async def delete(self, alerts, key_name_set):
remaining = []
for alert in alerts:
if json.loads(alert.key) not in key_name_set:
continue
remaining.append(alert)
return remaining
| 874 | Python | .py | 20 | 36.15 | 104 | 0.690673 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,172 | snapshot_count.py | truenas_middleware/src/middlewared/middlewared/alert/source/snapshot_count.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.alert.schedule import CrontabSchedule
from middlewared.utils.path import FSLocation, path_location
class SnapshotTotalCountAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = "Too Many Snapshots Exist"
text = (
"Your system has more snapshots (%(count)d) than recommended (%(max)d). Performance or functionality "
"might degrade."
)
class SnapshotCountAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = "Too Many Snapshots Exist For Dataset"
text = (
"SMB share %(dataset)s has more snapshots (%(count)d) than recommended (%(max)d). File Explorer may not "
"display all snapshots in the Previous Versions tab."
)
class SnapshotCountAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1)
run_on_backup_node = False
async def _check_total(self, snapshot_counts: dict[str, int]) -> list[Alert]:
"""Return an `Alert` if the total number of snapshots exceeds the limit."""
max_total = await self.middleware.call("pool.snapshottask.max_total_count")
total = sum(snapshot_counts.values())
if total > max_total:
return [Alert(
SnapshotTotalCountAlertClass,
{"count": total, "max": max_total},
key=None,
)]
return []
async def _check_smb(self, snapshot_counts: dict[str, int]) -> list[Alert]:
"""Return an `Alert` for every dataset shared over smb whose number of snapshots exceeds the limit."""
max_ = await self.middleware.call("pool.snapshottask.max_count")
to_alert = list()
for share in await self.middleware.call("sharing.smb.query"):
if path_location(share["path"]) != FSLocation.LOCAL:
continue
path = share["path"].removeprefix("/mnt/")
count = snapshot_counts.get(path, 0)
if count > max_:
to_alert.append(Alert(
SnapshotCountAlertClass,
{"dataset": path, "count": count, "max": max_},
key=path,
))
return to_alert
async def check(self):
snapshot_counts = await self.middleware.call("zfs.snapshot.count")
alerts = await self._check_smb(snapshot_counts)
alerts.extend(await self._check_total(snapshot_counts))
return alerts
| 2,566 | Python | .py | 54 | 38.277778 | 113 | 0.646259 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,173 | scheduled_reboot.py | truenas_middleware/src/middlewared/middlewared/alert/source/scheduled_reboot.py | from middlewared.alert.base import Alert, AlertCategory, AlertClass, SimpleOneShotAlertClass, AlertLevel
class FailoverRebootAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Failover Event Caused System Reboot"
text = (
"%(fqdn)s had a failover event. The system was rebooted to ensure a "
"proper failover occurred. The operating system successfully came "
"back online at %(now)s."
)
async def create(self, args):
return Alert(FailoverRebootAlertClass, {'fqdn': args['fqdn'], 'now': args['now']})
async def delete(self, *args, **kwargs):
return []
class FencedRebootAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Fenced Caused System Reboot"
text = (
'%(fqdn)s had a failover event. The system was rebooted because persistent '
'SCSI reservations were lost and/or cleared. The operating system successfully '
'came back online at %(now)s.'
)
async def create(self, args):
return Alert(FencedRebootAlertClass, {'fqdn': args['fqdn'], 'now': args['now']})
async def delete(self, *args, **kwargs):
return []
| 1,288 | Python | .py | 27 | 41.37037 | 104 | 0.699122 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,174 | quota.py | truenas_middleware/src/middlewared/middlewared/alert/source/quota.py | from datetime import timedelta
import logging
import os
try:
from bsd import getmntinfo
except ImportError:
getmntinfo = None
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.alert.schedule import IntervalSchedule
from middlewared.utils.size import format_size
from middlewared.plugins.zfs_.utils import TNUserProp
logger = logging.getLogger(__name__)
class QuotaWarningAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = "Quota Exceeded on Dataset"
text = "%(name)s exceeded on dataset %(dataset)s. Used %(used_fraction).2f%% (%(used)s of %(quota_value)s)."
class QuotaCriticalAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.CRITICAL
title = "Critical Quota Exceeded on Dataset"
text = "%(name)s exceeded on dataset %(dataset)s. Used %(used_fraction).2f%% (%(used)s of %(quota_value)s)."
class QuotaAlertSource(ThreadedAlertSource):
schedule = IntervalSchedule(timedelta(minutes=5))
def check_sync(self):
alerts = []
datasets = self.middleware.call_sync("zfs.dataset.query_for_quota_alert")
pool_sizes = {}
for d in datasets:
d["name"] = d["name"]["rawvalue"]
if "/" not in d["name"]:
pool_sizes[d["name"]] = int(d["available"]["rawvalue"]) + int(d["used"]["rawvalue"])
for k, default in TNUserProp.quotas():
try:
d[k] = int(d[k]["rawvalue"])
except (KeyError, ValueError):
d[k] = default
# call this outside the for loop since we don't need to check
# for every dataset that could be potentially be out of quota...
hostname = self.middleware.call_sync("system.hostname")
datasets = sorted(datasets, key=lambda ds: ds["name"])
for dataset in datasets:
for quota_property in ["quota", "refquota"]:
warn_prop = TNUserProp[f"{quota_property.upper()}_WARN"]
crit_prop = TNUserProp[f"{quota_property.upper()}_CRIT"]
try:
quota_value = int(dataset[quota_property]["rawvalue"])
except (AttributeError, KeyError, ValueError):
continue
if quota_value == 0:
continue
if quota_property == "quota":
# We can't use "used" property since it includes refreservation
# But if "refquota" is smaller than "quota", then "available" will be reported with regards to
# that smaller value, and we will get false positive
try:
refquota_value = int(dataset["refquota"]["rawvalue"])
except (AttributeError, KeyError, ValueError):
continue
else:
if refquota_value and refquota_value < quota_value:
continue
# Quota larger than dataset available size will never be exceeded,
# but will break out logic
if quota_value > pool_sizes[dataset["name"].split("/")[0]]:
continue
used = quota_value - int(dataset["available"]["rawvalue"])
elif quota_property == "refquota":
used = int(dataset["usedbydataset"]["rawvalue"])
else:
raise RuntimeError()
used_fraction = 100 * used / quota_value
critical_threshold = dataset[crit_prop.value]
warning_threshold = dataset[warn_prop.value]
if critical_threshold != 0 and used_fraction >= critical_threshold:
klass = QuotaCriticalAlertClass
elif warning_threshold != 0 and used_fraction >= warning_threshold:
klass = QuotaWarningAlertClass
else:
continue
quota_name = quota_property[0].upper() + quota_property[1:]
args = {
"name": quota_name,
"dataset": dataset["name"],
"used_fraction": used_fraction,
"used": format_size(used),
"quota_value": format_size(quota_value),
}
mail = None
owner = self._get_owner(dataset)
if owner != 0:
try:
self.middleware.call_sync(
'user.get_user_obj', {'uid': owner}
)
except KeyError:
to = None
logger.debug("Unable to query user with uid %r", owner)
else:
try:
bsduser = self.middleware.call_sync(
"datastore.query",
"account.bsdusers",
[["bsdusr_uid", "=", owner]],
{"get": True},
)
to = bsduser["bsdusr_email"] or None
except IndexError:
to = None
if to is not None:
mail = {
"to": [to],
"subject": f"{hostname}: {quota_name} exceeded on dataset {dataset['name']}",
"text": klass.text % args
}
alerts.append(Alert(
klass,
args=args,
key=[dataset["name"], quota_property],
mail=mail,
))
return alerts
def _get_owner(self, dataset):
mountpoint = None
if dataset["mounted"]["value"] == "yes":
if dataset["mountpoint"]["value"] == "legacy":
for m in (getmntinfo() if getmntinfo else []):
if m.source == dataset["name"]:
mountpoint = m.dest
break
else:
mountpoint = dataset["mountpoint"]["value"]
if mountpoint is None:
logger.debug("Unable to get mountpoint for dataset %r, assuming owner = root", dataset["name"])
uid = 0
else:
try:
stat_info = os.stat(mountpoint)
except Exception:
logger.debug("Unable to stat mountpoint %r, assuming owner = root", mountpoint)
uid = 0
else:
uid = stat_info.st_uid
return uid
| 6,815 | Python | .py | 144 | 30.875 | 114 | 0.507076 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,175 | ntp.py | truenas_middleware/src/middlewared/middlewared/alert/source/ntp.py | from datetime import timedelta
from middlewared.alert.base import (Alert, AlertCategory, AlertClass,
AlertLevel, AlertSource)
from middlewared.alert.schedule import IntervalSchedule
from middlewared.plugins.ntp import NTPPeer
class NTPHealthCheckAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "NTP Health Check Failed"
text = "NTP health check failed - %(reason)s"
class NTPHealthCheckAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(hours=12))
run_on_backup_node = False
async def check(self):
if (await self.middleware.call("system.time_info"))["uptime_seconds"] < 300:
return
try:
peers = [NTPPeer(p) for p in (await self.middleware.call("system.ntpserver.peers"))]
except Exception:
self.middleware.logger.warning("Failed to retrieve peers.", exc_info=True)
peers = []
if not peers:
return
active_peer = [x for x in peers if x.is_active()]
if not active_peer:
return Alert(
NTPHealthCheckAlertClass,
{'reason': f'No Active NTP peers: {[{str(x)} for x in peers]}'}
)
peer = active_peer[0]
if peer.offset_in_secs < 300:
return
msg = f'{peer.remote} has an offset of {peer.offset_in_secs}, which exceeds permitted value of 5 minutes.'
return Alert(NTPHealthCheckAlertClass, {'reason': msg})
| 1,534 | Python | .py | 34 | 35.794118 | 114 | 0.646743 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,176 | discovery_auth.py | truenas_middleware/src/middlewared/middlewared/alert/source/discovery_auth.py | from middlewared.alert.base import AlertCategory, AlertClass, AlertLevel, SimpleOneShotAlertClass
UPGRADE_ALERTS = ['ISCSIDiscoveryAuthMixed', 'ISCSIDiscoveryAuthMultipleCHAP', 'ISCSIDiscoveryAuthMultipleMutualCHAP']
class ISCSIDiscoveryAuthMixedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "iSCSI Discovery Authorization Global"
text = "Prior to upgrade had specified iSCSI discovery auth on only some portals, now applies globally. May need to update client configuration when using %(ips)s"
class ISCSIDiscoveryAuthMultipleCHAPAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "iSCSI Discovery Authorization merged"
text = "Prior to upgrade different portals had different iSCSI discovery auth, now applies globally."
class ISCSIDiscoveryAuthMultipleMutualCHAPAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "iSCSI Discovery Authorization Multiple Mutual CHAP"
text = "Multiple mutual CHAP peers defined for discovery auth, but only first one (\"%(peeruser)s\") applies. May need to update client configuration."
| 1,281 | Python | .py | 17 | 71.117647 | 168 | 0.814638 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,177 | failover_remote_inaccessible.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover_remote_inaccessible.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import time
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, UnavailableException
from middlewared.utils import ProductType
from middlewared.utils.crypto import generate_token
class FailoverRemoteSystemInaccessibleAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = 'Other Controller is Inaccessible'
text = 'Other TrueNAS controller is inaccessible. Contact support. Incident ID: %s.'
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
proactive_support_notify_gone = True
class FailoverRemoteSystemInaccessibleAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
def __init__(self, middleware):
super().__init__(middleware)
self.last_available = time.monotonic()
self.incident_id = None
async def check(self):
try:
await self.middleware.call('failover.call_remote', 'core.ping', [], {'timeout': 2})
except Exception:
if time.monotonic() - self.last_available > 4 * 3600:
if self.incident_id is None:
self.incident_id = generate_token(16, url_safe=True)
return [Alert(FailoverRemoteSystemInaccessibleAlertClass, args=[self.incident_id])]
else:
raise UnavailableException()
self.last_available = time.monotonic()
self.incident_id = None
return []
| 1,703 | Python | .py | 37 | 38.945946 | 114 | 0.706699 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,178 | zpool_capacity.py | truenas_middleware/src/middlewared/middlewared/alert/source/zpool_capacity.py | from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, UnavailableException
from middlewared.alert.schedule import IntervalSchedule
class ZpoolCapacityNoticeAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.NOTICE
title = "Pool Space Usage Is Above 70%"
text = (
"Space usage for pool \"%(volume)s\" is %(capacity)d%%. "
"Optimal pool performance requires used space remain below 80%%."
)
proactive_support = True
class ZpoolCapacityWarningAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = "Pool Space Usage Is Above 80%"
text = (
"Space usage for pool \"%(volume)s\" is %(capacity)d%%. "
"Optimal pool performance requires used space remain below 80%%."
)
proactive_support = True
class ZpoolCapacityCriticalAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.CRITICAL
title = "Pool Space Usage Is Above 90%"
text = (
"Space usage for pool \"%(volume)s\" is %(capacity)d%%. "
"Optimal pool performance requires used space remain below 80%%."
)
proactive_support = True
class ZpoolCapacityAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(minutes=5))
async def check(self):
alerts = []
for pool in await self.middleware.call("zfs.pool.query"):
try:
capacity = int(pool["properties"]["capacity"]["parsed"])
except (KeyError, ValueError):
continue
for target_capacity, klass in [
(90, ZpoolCapacityCriticalAlertClass),
(80, ZpoolCapacityWarningAlertClass),
(70, ZpoolCapacityNoticeAlertClass),
]:
if capacity >= target_capacity:
alerts.append(
Alert(
klass,
{
"volume": pool["name"],
"capacity": capacity,
},
key=[pool["name"]],
)
)
break
elif capacity == target_capacity - 1:
# If pool capacity is 89%, 79%, 69%, leave the alert in its previous state.
# In other words, don't flap alert in case if pool capacity is oscilating around threshold value.
raise UnavailableException()
return alerts
| 2,639 | Python | .py | 61 | 31.262295 | 117 | 0.587593 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,179 | memory_errors.py | truenas_middleware/src/middlewared/middlewared/alert/source/memory_errors.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule
from middlewared.utils import ProductType
from middlewared.utils.size import format_size
class MemoryErrorsAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'Uncorrected Memory Errors Detected'
text = '%(count)d total uncorrected errors detected for %(loc)s.'
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class MemorySizeMismatchAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'Memory Size Mismatch Detected'
text = 'Memory size on this controller %(r1)s doesn\'t match other controller %(r2)s'
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class MemoryErrorsAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24hrs
async def check(self):
alerts = []
for mem_ctrl, info in (await self.middleware.call('hardware.memory.error_info')).items():
location = f'memory controller {mem_ctrl}'
if (val := info['uncorrected_errors_with_no_dimm_info']) is not None and val > 0:
# this means that there were uncorrected errors where no additional information
# is available. These errors occur when the system detects an uncorrectable memory
# error, but specific details about the error are not provided or accessible.
# Because of this fact, we'll just report the error count without the DIMM information.
alerts.append(Alert(MemoryErrorsAlertClass, {'count': val, 'loc': location}))
elif (val := info['uncorrected_errors']) is not None and val > 0:
# this means that there were uncorrected errors where the dimm information was able
# to be obtained.
for dimm_key in filter(lambda x: x.startswith(('dimm', 'rank')), info):
if (val2 := info[dimm_key]['uncorrected_errors']) is not None and val2 > 0:
# the specific dimm
alerts.append(Alert(
MemoryErrorsAlertClass, {'count': val2, 'loc': location + f' on dimm {dimm_key}'}
))
return alerts
class MemorySizeMismatchAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24hrs
run_on_backup_node = False
async def check(self):
alerts = []
if not await self.middleware.call('failover.licensed'):
return alerts
r1 = (await self.middleware.call('system.mem_info'))['physmem_size']
if r1 is None:
return alerts
try:
r2 = await self.middleware.call(
'failover.call_remote', 'system.mem_info', {'raise_connect_error': False}
)
if r2['physmem_size'] is None:
return alerts
except Exception:
return alerts
if r1 != r2:
alerts.append(Alert(
MemorySizeMismatchAlertClass,
{'r1': format_size(r1), 'r2': format_size(r2)}
))
return alerts
| 3,460 | Python | .py | 68 | 40.823529 | 109 | 0.648104 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,180 | kdump.py | truenas_middleware/src/middlewared/middlewared/alert/source/kdump.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, SimpleOneShotAlertClass
class KdumpNotReadyAlertClass(AlertClass, SimpleOneShotAlertClass):
deleted_automatically = False
level = AlertLevel.WARNING
category = AlertCategory.SYSTEM
title = 'System Not Ready For Kdump'
text = 'System is not ready for Kdump, please refer to kdump-config status.'
| 391 | Python | .py | 7 | 51.714286 | 97 | 0.801047 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,181 | cores.py | truenas_middleware/src/middlewared/middlewared/alert/source/cores.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
class CoreFilesArePresentAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Core Files Detected"
text = (
"Core files for executables have been found in /var/db/system/cores/."
"Please open the shell, copy any core files present in /var/db/system/cores/ "
"and then generate a system debug. Next, create a ticket at https://ixsystems.atlassian.net/ "
"and attach the core files and debug. After creating the ticket, the core files can be removed "
"from the system by opening shell and entering 'rm /var/db/system/cores/*'."
)
products = ("SCALE",)
class CoreFilesArePresentAlertSource(AlertSource):
products = ("SCALE",)
async def should_alert(self, core):
if core["corefile"] != "present" or not core["unit"]:
# no core file on disk, no investigation
# not associated to a unit? probably impossible but better safe than sorry
return False
return core["unit"].startswith((
# NFS related service(s)
"nfs-blkmap.service",
"nfs-idmapd.service",
"nfs-mountd.service",
"nfsdcld.service",
"rpc-statd.service",
"rpcbind.service",
# SMB related service(s)
"smbd.service",
"winbind.service",
"nmbd.service",
"wsdd.service",
# SCST related service(s)
"scst.service",
# ZFS related (userspace) service(s)
"zfs-zed.service",
))
async def check(self):
corefiles = []
for coredump in await self.middleware.call("system.coredumps"):
if await self.should_alert(coredump):
corefiles.append(f"{coredump['exe']} ({coredump['time']})")
if corefiles:
return Alert(CoreFilesArePresentAlertClass, {"corefiles": ', '.join(corefiles)})
| 2,046 | Python | .py | 45 | 35.577778 | 104 | 0.620171 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,182 | dif_formatted_disks.py | truenas_middleware/src/middlewared/middlewared/alert/source/dif_formatted_disks.py | from middlewared.alert.base import Alert, AlertClass, AlertCategory, OneShotAlertClass, AlertLevel
class DifFormattedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'Disk(s) Are Formatted With Data Integrity Feature (DIF).'
text = 'Disk(s): %s are formatted with Data Integrity Feature (DIF) which is unsupported.'
async def create(self, disks):
return Alert(DifFormattedAlertClass, ', '.join(disks), key=None)
async def delete(self, alerts, query):
return []
| 569 | Python | .py | 10 | 51.5 | 98 | 0.745946 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,183 | syslog_ng.py | truenas_middleware/src/middlewared/middlewared/alert/source/syslog_ng.py | import subprocess
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
class SyslogNgAlertClass(AlertClass):
category = AlertCategory.REPORTING
level = AlertLevel.WARNING
title = "syslog-ng Is Not Running"
text = "%s"
class SyslogNgAlertSource(ThreadedAlertSource):
def check_sync(self):
p1 = subprocess.Popen(["/usr/sbin/service", "syslog-ng", "status"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, encoding="utf8")
status = p1.communicate()[0]
if p1.returncode == 1:
return Alert(SyslogNgAlertClass, status)
| 655 | Python | .py | 14 | 39.285714 | 100 | 0.701258 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,184 | nfs_bindaddr.py | truenas_middleware/src/middlewared/middlewared/alert/source/nfs_bindaddr.py | from middlewared.alert.base import AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel
class NFSBindAddressAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "NFS Services Could Not Bind to Specific IP Addresses, Using 0.0.0.0"
text = "NFS services could not bind to specific IP addresses, using 0.0.0.0."
| 401 | Python | .py | 6 | 62.833333 | 97 | 0.796438 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,185 | deprecated_config.py | truenas_middleware/src/middlewared/middlewared/alert/source/deprecated_config.py | import json
from middlewared.alert.base import Alert, AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel
URL = "https://www.truenas.com/docs/scale/scaledeprecatedfeatures/"
class DeprecatedServiceConfigurationAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "Deprecated Service Configuration Detected"
text = (
"The following service configuration is deprecated %(config)s. "
"This functionality is scheduled for removal in a future version of SCALE. "
f"Before upgrading, please check {URL} for more information."
)
async def create(self, args):
return Alert(DeprecatedServiceConfigurationAlertClass, args, key=args['config'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: json.loads(alert.key) != str(query),
alerts
))
| 934 | Python | .py | 19 | 42.578947 | 104 | 0.732673 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,186 | catalogs.py | truenas_middleware/src/middlewared/middlewared/alert/source/catalogs.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class CatalogNotHealthyAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.WARNING
category = AlertCategory.APPLICATIONS
title = 'Catalog Not Healthy'
text = '%(apps)s Applications in %(catalog)s Catalog are not healthy.'
async def create(self, args):
return Alert(CatalogNotHealthyAlertClass, args, key=args['catalog'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.args['catalog'] != query,
alerts
))
class CatalogSyncFailedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.CRITICAL
category = AlertCategory.APPLICATIONS
title = 'Unable to Sync Catalog'
text = 'Failed to sync %(catalog)s catalog: %(error)s'
async def create(self, args):
return Alert(CatalogSyncFailedAlertClass, args, key=args['catalog'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.args['catalog'] != query,
alerts
))
| 1,199 | Python | .py | 27 | 37.481481 | 98 | 0.704467 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,187 | mseries_nvdimm_and_bios.py | truenas_middleware/src/middlewared/middlewared/alert/source/mseries_nvdimm_and_bios.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import datetime
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.alert.schedule import IntervalSchedule
from middlewared.utils import ProductType
WEBUI_SUPPORT_FORM = (
'Please contact iXsystems Support using the "File Ticket" button in the System Settings->General->Support form'
)
class NVDIMMAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'There Is An Issue With NVDIMM'
text = 'NVDIMM: "%(dev)s" is reporting "%(value)s" with status "%(status)s".'
products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMESLifetimeWarningAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'NVDIMM Energy Source Lifetime Is Less Than 20%'
text = 'NVDIMM Energy Source Remaining Lifetime for %(dev)s is %(value)d%%.'
products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMESLifetimeCriticalAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'NVDIMM Energy Source Lifetime Is Less Than 10%'
text = 'NVDIMM Energy Source Remaining Lifetime for %(dev)s is %(value)d%%.'
products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMMemoryModLifetimeWarningAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'NVDIMM Memory Module Lifetime Is Less Than 20%'
text = 'NVDIMM Memory Module Remaining Lifetime for %(dev)s is %(value)d%%.'
products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMMemoryModLifetimeCriticalAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'NVDIMM Memory Module Lifetime Is Less Than 10%'
text = 'NVDIMM Memory Module Remaining Lifetime for %(dev)s is %(value)d%%.'
products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMInvalidFirmwareVersionAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'Invalid NVDIMM Firmware Version'
text = f'NVDIMM: "%(dev)s" is running invalid firmware. {WEBUI_SUPPORT_FORM}'
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class NVDIMMRecommendedFirmwareVersionAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = 'NVDIMM Firmware Version Should Be Upgraded'
text = (
'NVDIMM: "%(dev)s" is running firmware version "%(rv)s" which can be upgraded to '
f'"%(uv)s". {WEBUI_SUPPORT_FORM}'
)
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class OldBiosVersionAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = 'Old BIOS Version'
text = f'This system is running an old BIOS version. {WEBUI_SUPPORT_FORM}'
products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class NVDIMMAndBIOSAlertSource(ThreadedAlertSource):
schedule = IntervalSchedule(datetime.timedelta(minutes=5))
products = (ProductType.SCALE_ENTERPRISE,)
def produce_alerts(self, nvdimm, alerts, old_bios):
persistency_restored = 0x4
arm_info = 0x40
dev = nvdimm['dev']
old_bios_alert_already_generated = old_bios
for _hex, vals in nvdimm['critical_health_info'].items():
hex_int = int(_hex, 16)
if hex_int & ~(persistency_restored | arm_info):
alerts.append(Alert(
NVDIMMAlertClass,
{'dev': dev, 'value': _hex, 'status': ','.join(vals)}
))
if nvdimm['specrev'] >= 22 and not (hex_int & arm_info):
alerts.append(Alert(
NVDIMMAlertClass,
{'dev': dev, 'value': 'ARM STATUS', 'status': 'NOT ARMED'}
))
for i in ('nvm_health_info', 'nvm_error_threshold_status', 'nvm_warning_threshold_status'):
for _hex, vals in nvdimm[i].items():
if int(_hex, 16) != 0:
alerts.append(Alert(
NVDIMMAlertClass,
{'dev': dev, 'value': _hex, 'status': ','.join(vals)}
))
if (val := int(nvdimm['nvm_lifetime'].rstrip('%'))) < 20:
alert = NVDIMMMemoryModLifetimeWarningAlertClass if val > 10 else NVDIMMMemoryModLifetimeCriticalAlertClass
alerts.append(Alert(alert, {'dev': dev, 'value': val}))
if nvdimm['index'] == 0 and (val := int(nvdimm['es_lifetime'].rstrip('%'))) < 20:
# we only check this value for the 0th slot nvdimm since M60 has 2 and the way
# they're physically cabled, prevents monitoring the 2nd nvdimm's energy source
# (it always reports -1%)
alert = NVDIMMESLifetimeWarningAlertClass if val > 10 else NVDIMMESLifetimeCriticalAlertClass
alerts.append(Alert(alert, {'dev': dev, 'value': val}))
if 'not_armed' in nvdimm['state_flags']:
alerts.append(Alert(
NVDIMMAlertClass,
{'dev': dev, 'value': 'ARM STATUS', 'status': 'NOT ARMED'}
))
if (run_fw := nvdimm['running_firmware']) is not None:
if run_fw not in nvdimm['qualified_firmware']:
alerts.append(Alert(NVDIMMInvalidFirmwareVersionAlertClass, {'dev': dev}))
elif run_fw != nvdimm['recommended_firmware']:
alerts.append(Alert(
NVDIMMRecommendedFirmwareVersionAlertClass,
{'dev': dev, 'rv': run_fw, 'uv': nvdimm['recommended_firmware']}
))
if not old_bios_alert_already_generated and nvdimm['old_bios']:
alerts.append(Alert(OldBiosVersionAlertClass))
old_bios_alert_already_generated = True
def check_sync(self):
alerts = []
sys = ('TRUENAS-M40', 'TRUENAS-M50', 'TRUENAS-M60')
if self.middleware.call_sync('truenas.get_chassis_hardware').startswith(sys):
old_bios = self.middleware.call_sync('mseries.bios.is_old_version')
if old_bios:
alerts.append(Alert(OldBiosVersionAlertClass))
for nvdimm in self.middleware.call_sync('mseries.nvdimm.info'):
try:
self.produce_alerts(nvdimm, alerts, old_bios)
except Exception:
self.middleware.logger.exception('Unexpected failure processing NVDIMM alerts')
return alerts
| 6,729 | Python | .py | 130 | 42.607692 | 119 | 0.65951 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,188 | nfs_host.py | truenas_middleware/src/middlewared/middlewared/alert/source/nfs_host.py | from middlewared.alert.base import AlertCategory, AlertClass, AlertLevel, SimpleOneShotAlertClass
class NFSHostnameLookupFailAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.WARNING
title = "NFS shares reference hosts that could not be resolved"
text = "NFS shares refer to the following unresolvable hosts: %(hosts)s"
async def delete(self, alerts, query):
return []
| 451 | Python | .py | 8 | 51.5 | 97 | 0.784091 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,189 | vmware_login.py | truenas_middleware/src/middlewared/middlewared/alert/source/vmware_login.py | from middlewared.alert.base import AlertClass, OneShotAlertClass, AlertCategory, AlertLevel, Alert
class VMWareLoginFailedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.WARNING
title = "VMWare Login Failed"
text = "VMWare login to %(hostname)s failed: %(error)s."
async def create(self, args):
return Alert(VMWareLoginFailedAlertClass, args)
async def delete(self, alerts, query):
hostname = query
return list(filter(
lambda alert: alert.args["hostname"] != hostname,
alerts
))
| 609 | Python | .py | 14 | 36.428571 | 98 | 0.705085 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,190 | rsync.py | truenas_middleware/src/middlewared/middlewared/alert/source/rsync.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, OneShotAlertClass, Alert
class RsyncSuccessAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.TASKS
level = AlertLevel.INFO
title = 'Rsync Task Succeeded'
text = 'Rsync "%(direction)s" task for "%(path)s" succeeded.'
async def create(self, args):
return Alert(RsyncSuccessAlertClass, args, key=args['id'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != str(query),
alerts
))
class RsyncFailedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.TASKS
level = AlertLevel.CRITICAL
title = 'Rsync Task Failed'
text = 'Rsync "%(direction)s" task for "%(path)s" failed.'
async def create(self, args):
return Alert(RsyncFailedAlertClass, args, key=args['id'])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != str(query),
alerts
))
| 1,129 | Python | .py | 27 | 34.814815 | 98 | 0.684066 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,191 | truecommand.py | truenas_middleware/src/middlewared/middlewared/alert/source/truecommand.py | from middlewared.alert.base import Alert, AlertClass, AlertCategory, AlertLevel, OneShotAlertClass
class TruecommandConnectionDisabledAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = 'TrueCommand API Key Disabled by iX Portal'
text = 'TrueCommand API Key has been disabled by iX Portal: %(error)s'
async def create(self, args):
return Alert(TruecommandConnectionDisabledAlertClass, args)
async def delete(self, alerts, query):
return []
class TruecommandConnectionPendingAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.SYSTEM
level = AlertLevel.INFO
title = 'Pending Confirmation From iX Portal for TrueCommand API Key'
text = 'Confirmation is pending for TrueCommand API Key from iX Portal: %(error)s'
async def create(self, args):
return Alert(TruecommandConnectionPendingAlertClass, args)
async def delete(self, alerts, query):
return []
class TruecommandConnectionHealthAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = 'TrueCommand Service Failed Scheduled Health Check'
text = 'TrueCommand service failed scheduled health check, please confirm NAS ' \
'has been registered with TrueCommand and TrueCommand is able to access NAS.'
async def create(self, args):
return Alert(TruecommandConnectionHealthAlertClass, args)
async def delete(self, alerts, query):
return []
class TruecommandContainerHealthAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = 'TrueCommand Container Failed Scheduled Health Check'
text = 'TrueCommand container failed scheduled health check, please contact Truecommand support.'
async def create(self, args):
return Alert(TruecommandContainerHealthAlertClass, args)
async def delete(self, alerts, query):
return []
| 2,174 | Python | .py | 42 | 45.833333 | 101 | 0.765152 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,192 | update.py | truenas_middleware/src/middlewared/middlewared/alert/source/update.py | from datetime import timedelta
from middlewared.alert.base import Alert, AlertClass, AlertCategory, AlertLevel, AlertSource
from middlewared.alert.schedule import IntervalSchedule
class HasUpdateAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.INFO
title = "Update Available"
text = "A system update is available. Go to System Settings → Update to download and apply the update."
class HasUpdateAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(hours=1))
run_on_backup_node = False
async def check(self):
try:
if (await self.middleware.call("update.check_available"))["status"] == "AVAILABLE":
return Alert(HasUpdateAlertClass)
except Exception:
pass
| 783 | Python | .py | 17 | 39.764706 | 109 | 0.736842 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,193 | audit.py | truenas_middleware/src/middlewared/middlewared/alert/source/audit.py | from datetime import timedelta
import logging
from middlewared.alert.base import AlertClass, AlertCategory, Alert, AlertLevel, AlertSource, SimpleOneShotAlertClass
from middlewared.alert.schedule import IntervalSchedule
log = logging.getLogger("audit_check_alertmod")
# -------------- OneShot Alerts ------------------
class AuditBackendSetupAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.AUDIT
level = AlertLevel.ERROR
title = "Audit Service Backend Failed"
text = "Audit service failed backend setup: %(service)s. See /var/log/middlewared.log"
class AuditSetupAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.AUDIT
level = AlertLevel.ERROR
title = "Audit Service Setup Failed"
text = "Audit service failed to complete setup. See /var/log/middlewared.log"
# --------------- Monitored Alerts ----------------
class AuditServiceHealthAlertClass(AlertClass):
category = AlertCategory.AUDIT
level = AlertLevel.ERROR
title = "Audit Service Health Failure"
text = "Failed to perform audit query: %(verrs)s"
class AuditServiceHealthAlertSource(AlertSource):
'''
Run simple query every 20 minutes as a heath check
'''
schedule = IntervalSchedule(timedelta(minutes=20))
run_on_backup_node = False
async def check(self):
try:
await self.middleware.call(
'audit.query', {
"query-options": {"count": True}
}
)
except Exception as e:
return Alert(
AuditServiceHealthAlertClass,
{'verrs': str(e)},
key=None
)
| 1,694 | Python | .py | 41 | 34.317073 | 117 | 0.671942 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,194 | failover_disks.py | truenas_middleware/src/middlewared/middlewared/alert/source/failover_disks.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
from middlewared.utils import ProductType
TITLE = 'Disks Missing On '
TEXT = 'Disks with serial %(serials)s present on '
class DisksAreNotPresentOnStandbyNodeAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = TITLE + 'Standby Storage Controller'
text = TEXT + 'active storage controller but missing on standby storage controller.'
products = (ProductType.SCALE_ENTERPRISE,)
class DisksAreNotPresentOnActiveNodeAlertClass(AlertClass):
category = AlertCategory.HA
level = AlertLevel.CRITICAL
title = TITLE + 'Active Storage Controller'
text = TEXT + 'standby storage controller but missing on active storage controller.'
products = (ProductType.SCALE_ENTERPRISE,)
class FailoverDisksAlertSource(AlertSource):
products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
async def check(self):
if (md := await self.middleware.call('failover.mismatch_disks')):
if md['missing_remote']:
return [Alert(
DisksAreNotPresentOnStandbyNodeAlertClass, {'serials': ', '.join(md['missing_remote'])}
)]
if md['missing_local']:
return [Alert(
DisksAreNotPresentOnActiveNodeAlertClass, {'serials': ', '.join(md['missing_local'])}
)]
return []
| 1,664 | Python | .py | 35 | 40.571429 | 107 | 0.705556 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,195 | scrub_paused.py | truenas_middleware/src/middlewared/middlewared/alert/source/scrub_paused.py | from datetime import datetime, timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
class ScrubPausedAlertClass(AlertClass):
category = AlertCategory.STORAGE
level = AlertLevel.WARNING
title = "Scrub Is Paused"
text = "Scrub for pool %r is paused for more than 8 hours."
class ScrubPausedAlertSource(ThreadedAlertSource):
run_on_backup_node = False
async def check(self):
alerts = []
for pool in await self.middleware.call("pool.query"):
if pool["scan"] is not None:
if pool["scan"]["pause"] is not None:
if pool["scan"]["pause"] < datetime.now() - timedelta(hours=8):
alerts.append(Alert(ScrubPausedAlertClass, pool["name"]))
return alerts
| 825 | Python | .py | 17 | 40.117647 | 100 | 0.673317 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,196 | smart.py | truenas_middleware/src/middlewared/middlewared/alert/source/smart.py | from middlewared.alert.base import AlertClass, OneShotAlertClass, AlertCategory, AlertLevel, Alert
class SMARTAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "S.M.A.R.T. Error"
text = "%(message)s."
proactive_support = True
deleted_automatically = False
async def create(self, args):
if not args["device"].startswith("/dev/"):
args["device"] = f"/dev/{args['device']}"
return Alert(SMARTAlertClass, args)
async def delete(self, alerts, query):
device = query
if not device.startswith("/dev/"):
device = f"/dev/{device}"
return list(filter(
lambda alert: alert.args["device"] != device,
alerts
))
| 798 | Python | .py | 20 | 32.05 | 98 | 0.642393 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,197 | smartd.py | truenas_middleware/src/middlewared/middlewared/alert/source/smartd.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
class SmartdAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.WARNING
title = "smartd Is Not Running"
text = "smartd is not running."
class SmartdAlertSource(ThreadedAlertSource):
def check_sync(self):
if self.middleware.call_sync("datastore.query", "services.services", [("srv_service", "=", "smartd"),
("srv_enable", "=", True)]):
if self.middleware.call_sync("system.vm"):
return
if self.middleware.call_sync("system.is_enterprise"):
if self.middleware.call_sync("failover.status") != "MASTER":
return
if not self.middleware.call_sync("service.started", "smartd"):
return Alert(SmartdAlertClass)
| 943 | Python | .py | 17 | 41.764706 | 109 | 0.606522 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,198 | ups.py | truenas_middleware/src/middlewared/middlewared/alert/source/ups.py | from middlewared.alert.base import SimpleOneShotAlertClass, AlertClass, AlertCategory, AlertLevel
class UPSBatteryLowAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.ALERT
title = 'UPS Battery LOW'
text = 'UPS %(ups)s battery level low.%(body)s'
deleted_automatically = False
keys = []
class UPSOnlineAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.INFO
title = 'UPS On Line Power'
text = 'UPS %(ups)s is on line power.%(body)s'
deleted_automatically = False
keys = []
class UPSOnBatteryAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.CRITICAL
title = 'UPS On Battery'
text = 'UPS %(ups)s is on battery power.%(body)s'
deleted_automatically = False
keys = []
class UPSCommbadAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.CRITICAL
title = 'UPS Communication Lost'
text = 'Communication with UPS %(ups)s lost.%(body)s'
deleted_automatically = False
keys = []
class UPSCommokAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.INFO
title = 'UPS Communication Established'
text = 'Communication with UPS %(ups)s established.%(body)s'
deleted_automatically = False
keys = []
class UPSReplbattAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.UPS
level = AlertLevel.CRITICAL
title = 'UPS Battery Needs Replacement'
text = 'UPS %(ups)s Battery needs replacement.%(body)s'
deleted_automatically = False
keys = []
| 1,733 | Python | .py | 43 | 35.534884 | 97 | 0.739833 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,199 | nfs_exportsd.py | truenas_middleware/src/middlewared/middlewared/alert/source/nfs_exportsd.py | from middlewared.alert.base import AlertCategory, AlertClass, AlertLevel, SimpleOneShotAlertClass
class NFSblockedByExportsDirAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.ERROR
title = "NFS start is blocked by entries in /etc/exports.d"
text = "/etc/exports.d contains entries that must be removed: %(entries)s"
async def delete(self, alerts, query):
return []
class NFSexportMappingInvalidNamesAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SHARING
level = AlertLevel.ERROR
title = "NFS export entry blocked"
text = "NFS shares have invalid names:\n%(share_list)s"
async def delete(self, alerts, query):
return []
| 760 | Python | .py | 15 | 45.533333 | 97 | 0.76184 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |