id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10,400
|
event.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/event.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from datetime import datetime
from io import StringIO
from logging import getLogger
from traceback import format_exc
# Watchdog
from watchdog.events import DirCreatedEvent
# Zato
from zato.common.util.api import hot_deploy, spawn_greenlet
if 0:
from bunch import Bunch
from zato.server.file_transfer.api import FileTransferAPI
from zato.server.file_transfer.observer.base import BaseObserver, PathCreatedEvent
from zato.server.file_transfer.snapshot import BaseRemoteSnapshotMaker
Bunch = Bunch
FileTransferAPI = FileTransferAPI
BaseObserver = BaseObserver
BaseRemoteSnapshotMaker = BaseRemoteSnapshotMaker
PathCreatedEvent = PathCreatedEvent
# ################################################################################################################################
# ################################################################################################################################_s
logger = getLogger('zato')
# ################################################################################################################################
# ################################################################################################################################_s
singleton = object()
# ################################################################################################################################
# ################################################################################################################################
class FileTransferEvent:
""" Encapsulates information about a file picked up from file system.
"""
# When this event tooks place
timestamp: datetime
# True if it was a creation event, False if it was a modification event
is_create: bool
# This is the directory where the file is located
base_dir = 'not-set' # type: str
# This is the directory of the file relative to the server's base directory.
# It will stay None if self.full_path is an absolute directory.
relative_dir = 'not-set' # type: str
# This is the file name only
file_name = 'not-set' # type: str
# Full path to the file
full_path = 'not-set' # type: str
channel_name = 'not-set' # type: str
ts_utc = 'not-set' # type: str
raw_data = 'not-set' # type: str
data = singleton # type: str
has_raw_data = 'not-set' # type: bool
has_data = 'not-set' # type: bool
parse_error = 'not-set' # type: str
# ################################################################################################################################
# ################################################################################################################################
class FileTransferEventHandler:
def __init__(self, manager:'FileTransferAPI', channel_name:'str', config:'Bunch') -> 'None':
self.manager = manager
self.channel_name = channel_name
self.config = config
# Some parsers will require for input data to be a StringIO objects instead of plain str.
self.config.parser_needs_string_io = self._check_if_parser_needs_string_io(self.config)
# ################################################################################################################################
def _check_if_parser_needs_string_io(self, config:'Bunch'):
return config.should_parse_on_pickup and \
config.parse_with and \
config.parse_with == 'py:csv.reader'
# ################################################################################################################################
def _on_path_event_observed(
self,
transfer_event, # type: PathCreatedEvent
observer, # type: BaseObserver
snapshot_maker=None, # type: BaseRemoteSnapshotMaker | None
*,
is_create # type: bool
) -> 'None':
try:
# Ignore the event if it points to the directory itself,
# as inotify will send CLOSE_WRITE when it is not a creation of a file
# but a fact that a directory has been deleted that the event is about.
# Note that we issue a log entry only if the path is not one of what
# we observe, i.e. when one of our own directories is deleted, we do not log it here.
# The path must have existed since we are being called
# and we need to check why it does not exist anymore ..
if not observer.path_exists(transfer_event.src_path, snapshot_maker):
# .. if this type of an observer does not wait for paths, we can return immediately ..
if not observer.should_wait_for_deleted_paths:
return
# .. if it is one of the paths that we observe, it means that it has been just deleted,
# so we need to run a background inspector which will wait until it is created once again ..
if transfer_event.src_path in self.config.pickup_from_list:
self.manager.wait_for_deleted_path(transfer_event.src_path)
else:
logger.info('Ignoring local file event; path not in pickup_from_list `%s` (%r -> %r)',
transfer_event.src_path, self.config.name, self.config.pickup_from_list)
# .. in either case, there is nothing else we can do here.
return
# Get file name to check if we should handle it ..
file_name = os.path.basename(transfer_event.src_path) # type: str
# .. return if we should not.
if not self.manager.should_handle(self.config.name, file_name):
return
event = FileTransferEvent()
event.timestamp = datetime.utcnow()
event.is_create = is_create
event.full_path = transfer_event.src_path
event.file_name = file_name
event.base_dir = os.path.dirname(event.full_path)
event.relative_dir = self.manager.build_relative_dir(event.full_path)
event.channel_name = self.channel_name
if self.config.is_hot_deploy:
if transfer_event.is_directory:
if isinstance(transfer_event, DirCreatedEvent):
logger.info('About to add a new hot-deployment directory -> %s', event.full_path)
self.manager.add_pickup_dir(event.full_path, f'File transfer -> {self.channel_name}')
else:
_ = spawn_greenlet(hot_deploy, self.manager.server, event.file_name, event.full_path,
self.config.should_delete_after_pickup, should_deploy_in_place=self.config.should_deploy_in_place)
return
if self.config.should_read_on_pickup:
if snapshot_maker:
raw_data = snapshot_maker.get_file_data(event.full_path)
else:
f = open(event.full_path, 'rb')
raw_data = f.read()
f.close
event.raw_data = raw_data if isinstance(raw_data, str) else raw_data.decode(self.config.data_encoding) # type: str
event.has_raw_data = True
if self.config.should_parse_on_pickup:
try:
data_to_parse = StringIO(event.raw_data) if self.config.parser_needs_string_io else event.raw_data
parser = self.manager.get_parser(self.config.parse_with)
event.data = parser(data_to_parse)
event.has_data = True
except Exception:
exception = format_exc()
event.parse_error = exception
logger.warning('File transfer parsing error (%s) e:`%s`', self.config.name, exception)
# Invokes all callbacks for the event
spawn_greenlet(self.manager.invoke_callbacks, event, self.config.service_list, self.config.topic_list,
self.config.outconn_rest_list)
# Performs cleanup actions
self.manager.post_handle(event, self.config, observer, snapshot_maker)
except Exception:
logger.warning('Exception in pickup event handler `%s` (%s) `%s`',
self.config.name, transfer_event.src_path, format_exc())
# ################################################################################################################################
def on_created(
self,
transfer_event, # type: PathCreatedEvent
observer, # type: BaseObserver
snapshot_maker=None # type: BaseRemoteSnapshotMaker | None
) -> 'None':
# Call the parent function indicating that it was a creation
self._on_path_event_observed(
transfer_event,
observer,
snapshot_maker,
is_create=True
)
# ################################################################################################################################
def on_modified(
self,
transfer_event, # type: PathCreatedEvent
observer, # type: BaseObserver
snapshot_maker=None # type: BaseRemoteSnapshotMaker | None
) -> 'None':
# Call the parent function indicating that it was a modification
self._on_path_event_observed(
transfer_event,
observer,
snapshot_maker,
is_create=False
)
# ################################################################################################################################
# ################################################################################################################################
| 10,086
|
Python
|
.py
| 179
| 46
| 132
| 0.500609
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,401
|
api.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/api.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import logging
import os
from datetime import datetime
from http.client import OK
from importlib import import_module
from mimetypes import guess_type as guess_mime_type
from pathlib import PurePath
from re import IGNORECASE
from sys import maxsize
from traceback import format_exc
# gevent
from gevent import sleep
from gevent.lock import RLock
# globre
import globre
# Zato
from zato.common.api import FILE_TRANSFER
from zato.common.typing_ import cast_
from zato.common.util.api import new_cid, spawn_greenlet
from zato.common.util.platform_ import is_linux, is_non_linux
from zato.server.file_transfer.common import source_type_ftp, source_type_local, source_type_sftp, \
source_type_to_snapshot_maker_class
from zato.server.file_transfer.event import FileTransferEventHandler, singleton
from zato.server.file_transfer.observer.base import BackgroundPathInspector, PathCreatedEvent
from zato.server.file_transfer.observer.local_ import LocalObserver
from zato.server.file_transfer.observer.ftp import FTPObserver
from zato.server.file_transfer.observer.sftp import SFTPObserver
# ################################################################################################################################
# ################################################################################################################################
if 0:
from bunch import Bunch
from requests import Response
from zato.common.typing_ import any_, anydict, anylist, list_
from zato.server.base.parallel import ParallelServer
from zato.server.base.worker import WorkerStore
from zato.server.file_transfer.event import FileTransferEvent
from zato.server.file_transfer.observer.base import BaseObserver
from zato.server.file_transfer.snapshot import BaseRemoteSnapshotMaker
# ################################################################################################################################
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
source_type_to_observer_class = {
source_type_ftp: FTPObserver,
source_type_local: LocalObserver,
source_type_sftp: SFTPObserver,
}
# ################################################################################################################################
# ################################################################################################################################
suffix_ignored = (
'.swp',
'~',
)
# ################################################################################################################################
# ################################################################################################################################
class FileTransferAPI:
""" Manages file transfer observers and callbacks.
"""
def __init__(self, server:'ParallelServer', worker_store:'WorkerStore') -> 'None':
self.server = server
self.worker_store = worker_store
self.update_lock = RLock()
self.keep_running = True
# A list of all observer objects
self.observer_list:'list_[BaseObserver]' = []
# A mapping of channel_id to an observer object associated with the channel.
# Note that only non-inotify observers are added here.
self.observer_dict = {}
# Caches parser objects by their name
self._parser_cache = {}
# Information about what local paths should be ignored, i.e. we should not send events about them.
self._local_ignored = set()
if is_linux:
# inotify_simple
from inotify_simple import flags as inotify_flags, INotify
self.inotify_lock = RLock()
self.inotify = INotify()
self.inotify_flags = inotify_flags.CLOSE_WRITE
self.inotify_wd_to_path = {}
self.inotify_path_to_observer_list = {}
# Inotify is used only under Linux
self.observer_start_args = self.inotify, self.inotify_flags, self.inotify_lock, self.inotify_wd_to_path
else:
self.observer_start_args = ()
# Maps channel name to a list of globre patterns for the channel's directories
self.pattern_matcher_dict = {}
# ################################################################################################################################
def add_pickup_dir(self, path:'str', source:'str') -> 'None':
self.server.add_pickup_conf_from_local_path(path, source)
# ################################################################################################################################
def _create(self, config:'Bunch') -> 'None':
""" Low-level implementation of self.create.
"""
flags = globre.EXACT
if not config.is_case_sensitive:
flags |= IGNORECASE
file_patterns = config.file_patterns
pattern_matcher_list = [file_patterns] if not isinstance(file_patterns, list) else file_patterns
pattern_matcher_list = [globre.compile(elem, flags) for elem in file_patterns]
self.pattern_matcher_dict[config.name] = pattern_matcher_list
# This is optional and usually will not exist
config.is_recursive = config.get('is_recursive') or False
# This will be a list in the case of pickup.conf and not a list if read from ODB-based file transfer channels
if isinstance(config.pickup_from_list, list):
pickup_from_list = config.pickup_from_list # type: ignore
else:
pickup_from_list = str(config.pickup_from_list) # type: any_
pickup_from_list = [elem.strip() for elem in pickup_from_list.splitlines()]
pickup_from_list = cast_('anylist', pickup_from_list)
# Make sure that a parser is given if we are to parse any input ..
if config.should_parse_on_pickup:
# .. log a warning and disable parsing if no parser was configured when it was expected.
if not config.parse_with:
logger.warning('Parsing is enabled but no parser is declared for file transfer channel `%s` (%s)',
config.name, config.source_type)
config.should_parse_on_pickup = False
# Create an observer object ..
observer_class = source_type_to_observer_class[config.source_type]
observer = observer_class(self, config) # type: ignore
observer = cast_('BaseObserver', observer)
# .. and add it to data containers ..
self.observer_list.append(observer)
# .. but do not add it to the mapping dict because locally-defined observers (from pickup.conf)
# may not have any ID, or to be more precise, the may have the same ID.
if not observer.is_notify:
self.observer_dict[observer.channel_id] = observer
# .. finally, set up directories and callbacks for the observer.
event_handler = FileTransferEventHandler(self, config.name, config)
observer.set_up(event_handler, pickup_from_list, recursive=config.is_recursive)
# ################################################################################################################################
def create(self, config:'Bunch') -> 'None':
""" Creates a file transfer channel (but does not start it).
"""
with self.update_lock:
self._create(config)
# ################################################################################################################################
def _delete(self, config:'Bunch') -> 'None':
""" Low-level implementation of self.delete.
"""
# Observer object to delete ..
observer_to_delete = None
# .. paths under which the observer may be listed (used only under Linux with inotify).
observer_path_list = [] # type: anylist
# .. have we preferred to use inotify for this channel ..
prefer_inotify = self.is_notify_preferred(config)
# .. stop its main loop ..
for observer in self.observer_list:
observer = cast_('LocalObserver', observer)
if observer.channel_id == config.id:
needs_log = observer.is_local and (not prefer_inotify)
observer.stop(needs_log=needs_log)
observer_to_delete = observer
observer_path_list[:] = observer.path_list
break
else:
raise ValueError('Could not find observer matching ID `%s` (%s)', config.id, config.type_)
# .. if the object was found ..
if observer_to_delete:
# .. delete it from the main list ..
self.observer_list.remove(observer_to_delete)
# .. delete it from the mapping of channels to observers as well ..
if not observer_to_delete.is_local:
self.observer_dict.pop(observer_to_delete.channel_id)
# .. for local transfer under Linux, delete it from any references among paths being observed via inotify.
if prefer_inotify and config.source_type == source_type_local:
for path in observer_path_list:
observer_list = self.inotify_path_to_observer_list.get(path) or [] # type: anylist
observer_list.remove(observer_to_delete)
# ################################################################################################################################
def delete(self, config:'Bunch') -> 'None':
""" Deletes a file transfer channel.
"""
with self.update_lock:
self._delete(config)
# ################################################################################################################################
def edit(self, config:'Bunch') -> 'None':
""" Edits a file transfer channel by deleting and recreating it.
"""
with self.update_lock:
# Delte the channel first ..
self._delete(config)
# .. recreate it ..
self._create(config)
# .. and start it if it is enabled ..
if config.is_active:
# .. but only if it is a local one because any other is triggerd by our scheduler ..
if config.source_type == source_type_local:
self.start_observer(config.name, True)
# .. we can now find our new observer object ..
observer = self.get_observer_by_channel_id(config.id) # type: BaseObserver
# .. to finally store a message that we are done.
logger.info('%s file observer `%s` set up successfully (%s) (%s)',
observer.observer_type_name_title, observer.name, observer.observer_type_impl, observer.path_list)
# ################################################################################################################################
def get_py_parser(self, name:'str') -> 'any_':
""" Imports a Python object that represents a parser.
"""
parts = name.split('.')
module_path, callable_name = '.'.join(parts[0:-1]), parts[-1]
return getattr(import_module(module_path), callable_name)
# ################################################################################################################################
def get_service_parser(self, name:'str') -> 'None':
""" Returns a service that will act as a parser.
"""
raise NotImplementedError()
# ################################################################################################################################
def get_parser(self, parser_name:'str') -> 'any_':
""" Returns a parser by name (may possibly return an already cached one).
"""
if parser_name in self._parser_cache:
return self._parser_cache[parser_name]
type, name = parser_name.strip().split(':')
parser = self.get_py_parser(name) if type == 'py' else self.get_service_parser(name)
self._parser_cache[parser_name] = parser
return parser
# ################################################################################################################################
def get_observer_by_channel_id(self, channel_id:'int') -> 'BaseObserver':
return self.observer_dict[channel_id]
# ################################################################################################################################
def should_handle(self, channel_name:'str', file_name:'str') -> 'bool':
# Check all the patterns configured ..
for pattern in self.pattern_matcher_dict[channel_name]:
# .. we have a match against a pattern ..
if pattern.match(file_name):
# .. however, we may be still required to ignore this file ..
for elem in suffix_ignored:
if file_name.endswith(elem):
return False
else:
return True
return False
# ################################################################################################################################
def invoke_callbacks(
self,
event, # type: FileTransferEvent
service_list, # type: anylist
topic_list, # type: anylist
outconn_rest_list # type: anylist
) -> 'None':
# Do not invoke callbacks if the path is to be ignored
if self.is_local_path_ignored(event.full_path):
return
config = self.worker_store.get_channel_file_transfer_config(event.channel_name)
request = {
'full_path': event.full_path,
'file_name': event.file_name,
'relative_dir': event.relative_dir,
'base_dir': event.base_dir,
'channel_name': event.channel_name,
'ts_utc': datetime.utcnow().isoformat(),
'raw_data': event.raw_data,
'data': event.data if event.data is not singleton else None,
'has_raw_data': event.has_raw_data,
'has_data': event.has_data,
'parse_error': event.parse_error,
'config': config,
}
# Services
self.invoke_service_callbacks(service_list, request)
# Topics
self.invoke_topic_callbacks(topic_list, request)
# REST outgoing connections
self.invoke_rest_outconn_callbacks(outconn_rest_list, request)
# ################################################################################################################################
def invoke_service_callbacks(self, service_list:'anylist', request:'anydict') -> 'None':
for item in service_list: # type: str
try:
_ = spawn_greenlet(self.server.invoke, item, request)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
def invoke_topic_callbacks(self, topic_list:'anylist', request:'anydict') -> 'None':
for item in topic_list:
item = cast_('str', item)
try:
_ = spawn_greenlet(self.server.invoke, item, request)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
def _invoke_rest_outconn_callback(self, item_id:'str', request:'anydict') -> 'None':
cid = new_cid()
item = self.worker_store.get_outconn_rest_by_id(item_id) # type: any_
ping_response = item.ping(cid, return_response=True, log_verbose=True) # type: Response
if ping_response.status_code != OK:
logger.warning('Could not ping file transfer connection for `%s` (%s); config:`%s`, r:`%s`, h:`%s`',
request['full_path'], request['config'].name, item.config, ping_response.text, ping_response.headers)
else:
file_name = request['file_name']
mime_type = guess_mime_type(file_name, strict=False)
mime_type = mime_type[0] if mime_type[0] else 'application/octet-stream'
payload = request['raw_data']
params = {'file_name': file_name, 'mime_type': mime_type}
headers = {
'X-Zato-File-Name': file_name,
'X-Zato-Mime-Type': mime_type,
}
response = item.conn.post(cid, payload, params, headers=headers) # type: Response
if response.status_code != OK:
logger.warning('Could not send file `%s` (%s) to `%s` (p:`%s`, h:`%s`), r:`%s`, h:`%s`',
request['full_path'], request['config'].name, item.config, params, headers,
response.text, response.headers)
# ################################################################################################################################
def invoke_rest_outconn_callbacks(self, outconn_rest_list:'anylist', request:'anydict') -> 'None':
for item_id in outconn_rest_list: # type: int
_ = spawn_greenlet(self._invoke_rest_outconn_callback, item_id, request)
# ################################################################################################################################
def post_handle(
self,
event, # type: FileTransferEvent
config, # type: Bunch
observer, # type: BaseObserver
snapshot_maker # type: BaseRemoteSnapshotMaker
) -> 'None':
""" Runs after callback services have been already invoked, performs clean up if configured to.
"""
if config.move_processed_to:
observer.move_file(event.full_path, config.move_processed_to, observer, snapshot_maker)
if config.should_delete_after_pickup:
observer.delete_file(event.full_path, snapshot_maker)
# ################################################################################################################################
def _run_linux_inotify_loop(self) -> 'None':
while self.keep_running:
try:
for event in self.inotify.read(0):
try:
# Build a full path to the file we are processing
dir_name = self.inotify_wd_to_path[event.wd]
src_path = os.path.normpath(os.path.join(dir_name, event.name))
# Get a list of all observer objects interested in that file ..
observer_list:'anylist' = self.inotify_path_to_observer_list[dir_name]
# .. and notify each one.
for observer in observer_list: # type: LocalObserver
observer.event_handler.on_created(PathCreatedEvent(src_path, is_dir=False), observer)
except Exception:
logger.warning('Exception in inotify handler `%s`', format_exc())
except Exception:
logger.warning('Exception in inotify.read() `%s`', format_exc())
finally:
sleep(0.25) # type: ignore
# ################################################################################################################################
def _run(self, name:'str'='', log_after_started:'bool'=False) -> 'None':
# Under Linux, for each observer, map each of its watched directories
# to the actual observer object so that when an event is emitted
# we will know, based on the event's full path, which observers to notify.
self.inotify_path_to_observer_list = {}
for observer in self.observer_list: # type: LocalObserver
for path in observer.path_list: # type: str
observer_list:'anylist' = self.inotify_path_to_observer_list.setdefault(path, [])
observer_list.append(observer)
# Maps missing paths to all the observers interested in it.
missing_path_to_inspector = {}
# Start the observer objects, creating inotify watch descriptors (wd) in background ..
for observer in self.observer_list: # type: BaseObserver
try:
# Skip non-local observers
if not observer.is_local:
continue
# Filter out unneeded names
if name and name != observer.name:
continue
# Quickly check if any of the observer's path is missing and if it is, do not start it now.
# Instead, we will run a background task that will wait until the path becomes available and when it is,
# it will add start the observer itself.
for path in observer.path_list:
if not observer.is_path_valid(path):
path_observer_list:'anylist' = missing_path_to_inspector.setdefault(path, [])
path_observer_list.append(BackgroundPathInspector(path, observer, self.observer_start_args))
# Inotify-based observers are set up here but their main loop is in _run_linux_inotify_loop ..
if self.is_notify_preferred(observer.channel_config):
observer.start(self.observer_start_args)
# .. whereas snapshot observers are started here.
else:
self._run_snapshot_observer(observer)
if log_after_started:
logger.info('Started file observer `%s` path:`%s`', observer.name, observer.path_list)
except Exception:
logger.warning('File observer `%s` could not be started, path:`%s`, e:`%s`',
observer.name, observer.path_list, format_exc())
# If there are any paths missing for any observer ..
if missing_path_to_inspector:
# .. wait for each such path in background.
self.run_inspectors(missing_path_to_inspector)
# Under Linux, run the inotify main loop for each watch descriptor created for paths that do exist.
# Note that if we are not on Linux, each observer.start call above already ran a new greenlet with an observer
# for a particular directory.
if self.is_notify_preferred(observer.channel_config): # type: ignore
_ = spawn_greenlet(self._run_linux_inotify_loop)
# ################################################################################################################################
def get_inspector_list_by_path(self, path:'str') -> 'anydict':
# Maps the input path to inspectors.
path_to_inspector = {}
# For each observer defined ..
for observer in self.observer_list: # type: BaseObserver
# .. check if our input path is among the paths defined for that observer ..
if path in observer.path_list:
# .. it was, so we append an inspector for the path, pointing to current observer.
path_observer_list:'anylist' = path_to_inspector.setdefault(path, [])
path_observer_list.append(BackgroundPathInspector(path, observer, self.observer_start_args))
return path_to_inspector
# ################################################################################################################################
def run_inspectors(self, path_to_inspector_list:'anydict') -> 'None':
# type: (dict) -> None
# Run background inspectors waiting for each path from the list
for _ignored_path, inspector_list in path_to_inspector_list.items(): # type: (str, list)
for inspector in inspector_list: # type: BackgroundPathInspector
inspector.start()
# ################################################################################################################################
def wait_for_deleted_path(self, path:'str') -> 'None':
path_to_inspector = self.get_inspector_list_by_path(path)
self.run_inspectors(path_to_inspector)
# ################################################################################################################################
def run(self):
self._run()
# ################################################################################################################################
def start_observer(self, name:'str', log_after_started:'bool'=False) -> 'None':
self._run(name, log_after_started)
# ################################################################################################################################
def _run_snapshot_observer(self, observer:'BaseObserver', max_iters:'int'=maxsize) -> 'None':
if not observer.is_active:
return
source_type = observer.channel_config.source_type # type: str
snapshot_maker_class = source_type_to_snapshot_maker_class[source_type]
snapshot_maker = snapshot_maker_class(self, observer.channel_config) # type: any_
snapshot_maker = cast_('BaseRemoteSnapshotMaker', snapshot_maker)
snapshot_maker.connect()
for item in observer.path_list: # type: (str)
_ = spawn_greenlet(observer.observe_with_snapshots, snapshot_maker, item, max_iters, False)
# ################################################################################################################################
def run_snapshot_observer(self, channel_id:'int', max_iters:'int') -> 'None':
observer = self.get_observer_by_channel_id(channel_id) # type: BaseObserver
self._run_snapshot_observer(observer, max_iters)
# ################################################################################################################################
def build_relative_dir(self, path:'str') -> 'str':
""" Builds a path based on input relative to the server's top-level directory.
I.e. it extracts what is known as pickup_from in pickup.conf from the incoming path.
"""
# By default, we have no result
relative_dir = FILE_TRANSFER.DEFAULT.RelativeDir
try:
server_base_dir = PurePath(self.server.base_dir)
path = PurePath(path) # type: ignore
relative_dir = path.relative_to(server_base_dir) # type: ignore
except Exception as e:
# This is used when the .relative_do was not able to build relative_dir
if isinstance(e, ValueError):
log_func = logger.info
else:
log_func = logger.warning
log_func('Could not build relative_dir from `%s` and `%s` (%s)', self.server.base_dir, path, e.args[0])
else:
# No ValueError = relative_dir was extracted, but it still contains the file name
# so we need to get the directory leading to it.
relative_dir = os.path.dirname(relative_dir)
finally:
# Now, we can return the result
return relative_dir
# ################################################################################################################################
def add_local_ignored_path(self, path:'str') -> 'None':
self._local_ignored.add(path)
# ################################################################################################################################
def remove_local_ignored_path(self, path:'str') -> 'None':
try:
self._local_ignored.remove(path)
except KeyError:
logger.info('Path `%s` not among `%s`', path, sorted(self._local_ignored))
# ################################################################################################################################
def is_local_path_ignored(self, path:'str') -> 'bool':
return path in self._local_ignored
# ################################################################################################################################
def is_notify_preferred(self, channel_config:'Bunch') -> 'bool':
""" Returns True if inotify is the preferred notification method for input configuration and current OS.
"""
# This will be set, for instance, if we run under Vagrant or a similar tool,
# and we need to share our pickup directories with the host. In such a case,
# we cannot rely on inotify at all.
env_key = 'ZATO_HOT_DEPLOY_PREFER_SNAPSHOTS'
# Our preference is not to use inotify
if os.environ.get(env_key):
return False
# We do not prefer inotify only if we need recursive scans or if we are not under Linux ..
if channel_config.is_recursive or is_non_linux:
return False
# .. otherwise, we prefer inotify.
else:
return True
# ################################################################################################################################
| 29,638
|
Python
|
.py
| 493
| 50.026369
| 130
| 0.507718
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,402
|
snapshot.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/snapshot.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from datetime import datetime
from logging import getLogger
from pathlib import Path
from traceback import format_exc
# ciso8601
try:
from zato.common.util.api import parse_datetime
except ImportError:
from dateutil.parser import parse as parse_datetime
# Zato
from zato.common.json_ import dumps
from zato.common.odb.query.generic import FTPFileTransferWrapper, SFTPFileTransferWrapper
from zato.common.typing_ import cast_
from zato.server.connection.file_client.base import PathAccessException
from zato.server.connection.file_client.ftp import FTPFileClient
from zato.server.connection.file_client.sftp import SFTPFileClient
# ################################################################################################################################
# ################################################################################################################################
if 0:
from bunch import Bunch
from zato.common.typing_ import any_, anydict, anylist
from zato.server.connection.file_client.base import BaseFileClient
from zato.server.connection.ftp import FTPStore
from zato.server.file_transfer.api import FileTransferAPI
from zato.server.file_transfer.observer.base import BaseObserver
BaseFileClient = BaseFileClient
BaseObserver = BaseObserver
Bunch = Bunch
FileTransferAPI = FileTransferAPI
FTPStore = FTPStore
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger('zato')
# ################################################################################################################################
# ################################################################################################################################
# This must be more than 1 because 1 second is the minimum time between two invocations of a scheduled job.
default_interval = 1.1
# ################################################################################################################################
# ################################################################################################################################
class ItemInfo:
""" Information about a single file as found by a snapshot maker.
"""
full_path: 'str'
name: 'str'
size: 'int' = -1
last_modified: 'datetime'
is_dir: 'bool'
is_file: 'bool'
# ################################################################################################################################
def to_dict(self):
return {
'full_path': self.full_path,
'name': self.name,
'size': self.size,
'last_modified': self.last_modified.isoformat(),
}
# ################################################################################################################################
# ################################################################################################################################
class DirSnapshot:
""" Represents the state of a given directory, i.e. a list of files in it.
"""
def __init__(self, path:'str') -> 'None':
self.path = path
self.file_data = {}
# ################################################################################################################################
def add_item(self, data:'anylist') -> 'None':
for item in data:
item = cast_('anydict', item)
item_info = ItemInfo()
item_info.full_path = self.get_full_path(item)
item_info.name = item['name']
item_info.size = item['size']
item_info.is_dir = item['is_dir']
item_info.is_file = item['is_file']
# This may be either string or a datetime object
last_modified = item['last_modified']
item_info.last_modified = last_modified if isinstance(last_modified, datetime) else parse_datetime(last_modified)
self.file_data[item_info.full_path] = item_info
# ################################################################################################################################
def get_full_path(self, item:'anydict') -> 'str':
# Notifiers will sometimes have access to a full path to a file (e.g. local ones)
# but sometimes they will only have the file name and the full path will have to be built
# using the path assigned to us in the initialiser.
full_path = item.get('full_path')
if full_path:
return full_path
else:
return os.path.join(self.path, item['name'])
# ################################################################################################################################
def to_dict(self) -> 'anydict':
dir_snapshot_file_list = []
out = {'dir_snapshot_file_list': dir_snapshot_file_list}
for value in self.file_data.values(): # type: (ItemInfo)
value_as_dict = value.to_dict() # type: anydict
dir_snapshot_file_list.append(value_as_dict)
return out
# ################################################################################################################################
def to_json(self) -> 'str':
return dumps(self.to_dict())
# ################################################################################################################################
@staticmethod
def from_sql_dict(path:'str', sql_dict:'anydict') -> 'DirSnapshot':
""" Builds a DirSnapshot object out of a dict read from the ODB.
"""
snapshot = DirSnapshot(path)
snapshot.add_item(sql_dict['dir_snapshot_file_list'])
return snapshot
# ################################################################################################################################
# ################################################################################################################################
class DirSnapshotDiff:
""" A difference between two DirSnapshot objects, i.e. all the files created and modified.
"""
def __init__(self, previous_snapshot:'DirSnapshot', current_snapshot:'DirSnapshot') -> 'None':
# These will be new for sure ..
self.files_created = set()
# .. used to prepare a list of files that were potentially modified.
self.files_modified = set()
# We require for both snapshots to exist, otherwise we just return.
if not (previous_snapshot and current_snapshot):
return
# New files ..
self.files_created = set(current_snapshot.file_data) - set(previous_snapshot.file_data)
# .. now, go through each file in the current snapshot and compare its timestamps and file size
# with what was found the previous time. If either is different,
# it means that the file was modified. In case that the file was modified
# but the size remains the size and at the same time the timestamp is the same too,
# we will not be able to tell the difference and the file will not be reported as modified
# (we would have to download it and check its contents to cover such a case).
for current in current_snapshot.file_data.values():
current = cast_('ItemInfo', current)
previous = previous_snapshot.file_data.get(current.full_path)
if previous:
size_differs = current.size != previous.size
last_modified_differs = current.last_modified != previous.last_modified
if size_differs or last_modified_differs:
self.files_modified.add(current.full_path)
# ################################################################################################################################
# ################################################################################################################################
class AbstractSnapshotMaker:
file_client: 'BaseFileClient'
def __init__(self, file_transfer_api:'FileTransferAPI', channel_config:'any_') -> 'None':
self.file_transfer_api = file_transfer_api
self.channel_config = channel_config
self.odb = self.file_transfer_api.server.odb
# ################################################################################################################################
def connect(self) -> 'None':
raise NotImplementedError('Must be implemented in subclasses')
# ################################################################################################################################
def get_snapshot(self, *args:'any_', **kwargs:'any_') -> 'None':
raise NotImplementedError('Must be implemented in subclasses')
# ################################################################################################################################
def get_file_data(self, *args:'any_', **kwargs:'any_') -> 'None':
raise NotImplementedError('Must be implemented in subclasses')
# ################################################################################################################################
def store_snapshot(self, snapshot:'DirSnapshot') -> 'None':
pass
# ################################################################################################################################
# ################################################################################################################################
class LocalSnapshotMaker(AbstractSnapshotMaker):
def connect(self):
# Not used with local snapshots
pass
def get_snapshot(self, path:'str', *args:'any_', **kwargs:'any_') -> 'DirSnapshot':
# Output to return
snapshot = DirSnapshot(path)
# All files found in path
file_list = [] # type: anylist
# Recursively, get all files
listing = Path(path).rglob('*')
for item in listing:
full_path = str(item)
stat = item.stat()
file_list.append({
'full_path': full_path,
'name': item.name,
'is_dir': item.is_dir(),
'is_file': item.is_file(),
'size': stat.st_size,
'last_modified': datetime.fromtimestamp(stat.st_mtime)
})
snapshot.add_item(file_list)
return snapshot
# ################################################################################################################################
def get_file_data(self, path:'str') -> 'bytes':
with open(path, 'rb') as f:
return f.read()
# ################################################################################################################################
# ################################################################################################################################
class BaseRemoteSnapshotMaker(AbstractSnapshotMaker):
""" Functionality shared by FTP and SFTP.
"""
transfer_wrapper_class:'any_' = None
worker_config_out_name = '<invalid-worker_config_out_name>'
source_id_attr_name = '<invalid-source_id_attr_name>'
file_client_class:'any_' = None
has_get_by_id = False
# ################################################################################################################################
def connect(self) -> 'None':
# Extract all the configuration ..
store = getattr(self.file_transfer_api.server.worker_store.worker_config, self.worker_config_out_name)
source_id = int(self.channel_config[self.source_id_attr_name])
# Some connection types will directly expose this method ..
if self.has_get_by_id:
outconn = store.get_by_id(source_id)
# .. while some will not.
else:
for value in store.values():
config = value['config']
if config['id'] == source_id:
outconn = value.conn
break
else:
raise ValueError('ID not found in `{}`'.format(store.values()))
# .. connect to the remote server ..
self.file_client = self.file_client_class(outconn, self.channel_config)
# .. and confirm that the connection works.
self.file_client.ping()
# ################################################################################################################################
def _get_current_snapshot(self, path:'str') -> 'DirSnapshot':
# First, get a list of files under path ..
result = self.file_client.list(path) # type: anydict
# .. create a new container for the snapshot ..
snapshot = DirSnapshot(path)
if result:
# .. now, populate with what we found ..
snapshot.add_item(result['file_list'])
# .. and return the result.
return snapshot
# ################################################################################################################################
def get_snapshot(
self,
path, # type: str
ignored_is_recursive, # type: bool
is_initial, # type: bool
needs_store # type: bool
) -> 'DirSnapshot | None':
# We are not sure yet if we are to need it.
session = None
# A combination of our channel's ID and directory we are checking is unique
name = '{}; {}'.format(self.channel_config.id, path)
try:
# If we otherwise know that we will access the database,
# we can create a new SQL session here.
if needs_store:
session = self.odb.session()
wrapper = self.transfer_wrapper_class(session, self.file_transfer_api.server.cluster_id)
# If this is the observer's initial snapshot ..
if is_initial:
# .. we need to check if we may perhaps have it in the ODB ..
already_existing = wrapper.get(name)
# .. if we do, we can return it ..
if already_existing:
return DirSnapshot.from_sql_dict(path, already_existing)
# .. otherwise, we return the current state of the remote resource.
else:
return self._get_current_snapshot(path)
# .. this is not the initial snapshot so we need to make one ..
snapshot = self._get_current_snapshot(path)
# .. store it if we are told to ..
if needs_store:
wrapper.store(name, snapshot.to_json())
# .. and return the result to our caller.
return snapshot
except PathAccessException as e:
logger.warning('%s. File transfer channel `%s` (%s); e:`%s`',
e.args[0], self.channel_config.name, self.channel_config.source_type, format_exc())
except Exception:
logger.warning('Exception caught in get_snapshot (%s), e:`%s`', self.channel_config.source_type, format_exc())
raise
finally:
if session:
session.close()
# ################################################################################################################################
def get_file_data(self, path:'str') -> 'bytes':
return self.file_client.get(path)
# ################################################################################################################################
# ################################################################################################################################
class FTPSnapshotMaker(BaseRemoteSnapshotMaker):
transfer_wrapper_class = FTPFileTransferWrapper
worker_config_out_name = 'out_ftp'
source_id_attr_name = 'ftp_source_id'
file_client_class = FTPFileClient
has_get_by_id = True
# ################################################################################################################################
# ################################################################################################################################
class SFTPSnapshotMaker(BaseRemoteSnapshotMaker):
transfer_wrapper_class = SFTPFileTransferWrapper
worker_config_out_name = 'out_sftp'
source_id_attr_name = 'sftp_source_id'
file_client_class = SFTPFileClient
# ################################################################################################################################
# ################################################################################################################################
| 17,058
|
Python
|
.py
| 299
| 49.013378
| 130
| 0.439013
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,403
|
__init__.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2020, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,404
|
common.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/common.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.common.api import FILE_TRANSFER
from zato.server.file_transfer.snapshot import FTPSnapshotMaker, LocalSnapshotMaker, SFTPSnapshotMaker
# ################################################################################################################################
# ################################################################################################################################
source_type_ftp = FILE_TRANSFER.SOURCE_TYPE.FTP.id
source_type_local = FILE_TRANSFER.SOURCE_TYPE.LOCAL.id
source_type_sftp = FILE_TRANSFER.SOURCE_TYPE.SFTP.id
source_type_to_snapshot_maker_class = {
source_type_ftp: FTPSnapshotMaker,
source_type_local: LocalSnapshotMaker,
source_type_sftp: SFTPSnapshotMaker,
}
# ################################################################################################################################
# ################################################################################################################################
| 1,165
|
Python
|
.py
| 20
| 56.3
| 130
| 0.421793
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,405
|
sftp.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/observer/sftp.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from logging import getLogger
# Zato
from .base import BaseObserver
# ################################################################################################################################
if 0:
from zato.server.file_transfer.event import FileTransferEvent
from zato.server.file_transfer.snapshot import BaseRemoteSnapshotMaker
BaseRemoteSnapshotMaker = BaseRemoteSnapshotMaker
FileTransferEvent = FileTransferEvent
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class SFTPObserver(BaseObserver):
""" An observer checking remote SFTP directories.
"""
observer_type_impl = 'sftp-snapshot'
observer_type_name = 'SFTP'
observer_type_name_title = observer_type_name
# ################################################################################################################################
def path_exists(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'bool':
return snapshot_maker.file_client.path_exists(path)
# ################################################################################################################################
def path_is_directory(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'bool':
raise NotImplementedError()
# ################################################################################################################################
def is_path_valid(self, path:'str') -> 'bool':
raise NotImplementedError()
# ################################################################################################################################
def move_file(
self,
path_from, # type: str
path_to, # type: str
event, # type: FileTransferEvent
snapshot_maker # type: BaseRemoteSnapshotMaker
) -> 'None':
""" Moves a file to a selected directory.
"""
#
# 1) If we have the data to be moved in the event, we can just store it
# on the FTP server and delete the path from which it was read.
#
# 2) If we have no data in the event, we tell the file to move the file itself
#
# The reason we do not always choose path 2) is that a client move_file
# needs to download the file first before it stores it in path_to,
# and we can avoid this unnecessary step in path 1) whenever it is possible.
#
#
# Case 1)
if event.has_raw_data:
snapshot_maker.file_client.store(path_to, event.raw_data)
snapshot_maker.file_client.delete_file(path_from)
# Case 2)
else:
snapshot_maker.file_client.move_file(path_from, path_to)
# ################################################################################################################################
def delete_file(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'None':
""" Deletes a file pointed to by path.
"""
snapshot_maker.file_client.delete_file(path)
# ################################################################################################################################
# ################################################################################################################################
| 3,973
|
Python
|
.py
| 70
| 50.828571
| 130
| 0.396596
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,406
|
local_.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/observer/local_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from traceback import format_exc
# stdlib
import os
from logging import getLogger
from shutil import copy as shutil_copy
# Zato
from zato.common.api import FILE_TRANSFER
from .base import BaseObserver
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, anytuple
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class LocalObserver(BaseObserver):
""" A local file-system observer.
"""
observer_type_name = 'local'
observer_type_name_title = observer_type_name.title()
should_wait_for_deleted_paths = True
def __init__(self, *args:'any_', **kwargs:'any_') -> 'None':
super().__init__(*args, **kwargs)
if self.manager.is_notify_preferred(self.channel_config):
self.set_local_inotify_observer()
else:
self.set_local_snapshot_observer()
# ################################################################################################################################
def set_local_inotify_observer(self):
self.observer_type_impl = FILE_TRANSFER.SOURCE_TYPE_IMPL.LOCAL_INOTIFY
self._observe_func = self.observe_with_inotify
# ################################################################################################################################
def set_local_snapshot_observer(self):
self.observer_type_impl = FILE_TRANSFER.SOURCE_TYPE_IMPL.LOCAL_SNAPSHOT
self._observe_func = self.observe_with_snapshots
# ################################################################################################################################
def path_exists(self, path:'str', _ignored_snapshot_maker:'any_'=None) -> 'bool':
return os.path.exists(path)
# ################################################################################################################################
def path_is_directory(self, path:'str', _ignored_snapshot_maker:'any_'=None) -> 'bool':
return os.path.isdir(path)
# ################################################################################################################################
def move_file(self, path_from:'str', path_to:'str', _ignored_event:'any_', _ignored_snapshot_maker:'any_') -> 'bool':
""" Moves a file to a selected directory.
"""
shutil_copy(path_from, path_to)
# ################################################################################################################################
def delete_file(self, path:'str', _ignored_snapshot_maker:'any_') -> 'None':
""" Deletes a file pointed to by path.
"""
os.remove(path)
# ################################################################################################################################
def observe_with_inotify(self, path:'str', observer_start_args:'anytuple') -> 'None':
""" Local observer's main loop for Linux, uses inotify.
"""
try:
inotify, inotify_flags, lock_func, wd_to_path_map = observer_start_args
# Create a new watch descriptor
wd = inotify.add_watch(path, inotify_flags)
# .. and map the input path to wd for use in higher-level layers.
with lock_func:
wd_to_path_map[wd] = path
except Exception:
logger.warning("Exception in inotify observer's main loop `%s`", format_exc())
# ################################################################################################################################
def is_path_valid(self, path):
# type: (str) -> bool
return self.path_exists(path) and self.path_is_directory(path)
# ################################################################################################################################
# ################################################################################################################################
| 4,828
|
Python
|
.py
| 78
| 56.102564
| 130
| 0.363868
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,407
|
__init__.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/observer/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2020, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,408
|
ftp.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/observer/ftp.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from logging import getLogger
# Zato
from .base import BaseObserver
# ################################################################################################################################
if 0:
from zato.server.file_transfer.event import FileTransferEvent
from zato.server.file_transfer.snapshot import BaseRemoteSnapshotMaker
BaseRemoteSnapshotMaker = BaseRemoteSnapshotMaker
FileTransferEvent = FileTransferEvent
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class FTPObserver(BaseObserver):
""" An observer checking remote FTP directories.
"""
observer_type_impl = 'ftp-snapshot'
observer_type_name = 'FTP'
observer_type_name_title = observer_type_name
# ################################################################################################################################
def path_exists(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'bool':
return snapshot_maker.file_client.path_exists(path)
# ################################################################################################################################
def path_is_directory(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'bool':
raise NotImplementedError()
# ################################################################################################################################
def is_path_valid(self, path:'str') -> 'bool':
raise NotImplementedError()
# ################################################################################################################################
def move_file(
self,
path_from, # type: str
path_to, # type: str
event, # type: FileTransferEvent
snapshot_maker # type: BaseRemoteSnapshotMaker
) -> 'None':
""" Moves a file to a selected directory.
"""
#
# 1) If we have the data to be moved in the event, we can just store it
# on the FTP server and delete the path from which it was read.
#
# 2) If we have no data in the event, we tell the file to move the file itself
#
# The reason we do not always choose path 2) is that a client move_file
# needs to download the file first before it stores it in path_to,
# and we can avoid this unnecessary step in path 1) whenever it is possible.
#
#
# Case 1)
if event.has_raw_data:
snapshot_maker.file_client.store(path_to, event.raw_data)
snapshot_maker.file_client.delete_file(path_from)
# Case 2)
else:
snapshot_maker.file_client.move_file(path_from, path_to)
# ################################################################################################################################
def delete_file(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'None':
""" Deletes a file pointed to by path.
"""
snapshot_maker.file_client.delete_file(path)
# ################################################################################################################################
# ################################################################################################################################
| 3,968
|
Python
|
.py
| 70
| 50.771429
| 130
| 0.395973
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,409
|
base.py
|
zatosource_zato/code/zato-server/src/zato/server/file_transfer/observer/base.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from datetime import datetime
from logging import getLogger
from sys import maxsize
from traceback import format_exc
# gevent
from gevent import sleep
# Watchdog
from watchdog.events import DirCreatedEvent, DirModifiedEvent, FileCreatedEvent, FileModifiedEvent
# Zato
from zato.common.api import FILE_TRANSFER
from zato.common.typing_ import cast_
from zato.common.util.api import spawn_greenlet
from zato.common.util.file_transfer import path_string_list_to_list
from zato.server.file_transfer.common import source_type_to_snapshot_maker_class
from zato.server.file_transfer.snapshot import default_interval, DirSnapshotDiff
# ################################################################################################################################
if 0:
from bunch import Bunch
from zato.common.typing_ import any_, anylist, anytuple, callable_
from zato.server.file_transfer.api import FileTransferAPI
from zato.server.file_transfer.snapshot import BaseRemoteSnapshotMaker
Bunch = Bunch
BaseRemoteSnapshotMaker = BaseRemoteSnapshotMaker
FileTransferAPI = FileTransferAPI
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class PathCreatedEvent:
def __init__(self, src_path:'str', is_dir:'bool') -> 'None':
self.src_path = src_path
self.is_dir = is_dir
# ################################################################################################################################
# ################################################################################################################################
class BaseObserver:
# Type hints
_observe_func: 'callable_'
event_handler: 'any_'
observer_type_impl = '<observer-type-impl-not-set>'
observer_type_name = '<observer-type-name-not-set>'
observer_type_name_title = observer_type_name.upper()
should_wait_for_deleted_paths = False
def __init__(self, manager:'FileTransferAPI', channel_config:'Bunch') -> 'None':
self.manager = manager
self.channel_config = channel_config
self.channel_id = channel_config.id
self.source_type = channel_config.source_type
self.is_local = self.source_type == FILE_TRANSFER.SOURCE_TYPE.LOCAL.id
self.is_notify = self.observer_type_impl == FILE_TRANSFER.SOURCE_TYPE_IMPL.LOCAL_INOTIFY
self.name = channel_config.name
self.is_active = channel_config.is_active
self.path_list = ['<initial-observer>']
self.is_recursive = False
self.keep_running = True
if pickup_interval := (os.environ.get('Zato_Hot_Deploy_Interval') or os.environ.get('Zato_Hot_Deployment_Interval')):
pickup_interval = int(pickup_interval)
else:
pickup_interval = default_interval
self.sleep_time = pickup_interval
# ################################################################################################################################
def set_up(self, event_handler:'any_', path_list:'anylist', recursive:'bool') -> 'None':
self.event_handler = event_handler
self.path_list = path_string_list_to_list('.', path_list)
self.is_recursive = recursive
# ################################################################################################################################
def start(self, observer_start_args:'anytuple') -> 'None':
if self.is_active:
_ = spawn_greenlet(self._start, observer_start_args)
else:
logger.info('Skipping an inactive file transfer channel `%s` (%s)', self.name, self.path_list)
# ################################################################################################################################
def stop(self, needs_log:'bool'=True) -> 'None':
if needs_log:
logger.info('Stopping %s file transfer observer `%s`', self.observer_type_name, self.name)
self.keep_running = False
# ################################################################################################################################
def _start(self, observer_start_args:'any_') -> 'None':
snapshot_maker = source_type_to_snapshot_maker_class[self.source_type]
snapshot_maker = cast_('BaseRemoteSnapshotMaker', snapshot_maker)
snapshot_maker.connect()
for path in self.path_list:
# Start only for paths that are valid - all invalid ones
# are handled by a background path inspector.
if self.is_path_valid(path):
logger.info('Starting %s file observer `%s` for `%s` (%s)',
self.observer_type_name, path, self.name, self.observer_type_impl)
_ = spawn_greenlet(self._observe_func, snapshot_maker, path, maxsize, True, observer_start_args)
else:
logger.info('Skipping invalid path `%s` for `%s` (%s)', path, self.name, self.observer_type_impl)
# ################################################################################################################################
def is_path_valid(self, *args:'any_', **kwargs:'any_') -> 'bool':
""" Returns True if path can be used as a source for file transfer (e.g. it exists and it is a directory).
"""
raise NotImplementedError('Must be implemented by subclasses')
# ################################################################################################################################
def path_exists(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker | None'=None) -> 'bool':
""" Returns True if path exists, False otherwise.
"""
raise NotImplementedError('Must be implemented by subclasses')
# ################################################################################################################################
def path_is_directory(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker | None'=None) -> 'bool':
""" Returns True if path is a directory, False otherwise.
"""
raise NotImplementedError('Must be implemented by subclasses')
# ################################################################################################################################
def get_dir_snapshot(path, is_recursive:'bool') -> 'str': # type: ignore
""" Returns an implementation-specific snapshot of a directory.
"""
raise NotImplementedError()
# ################################################################################################################################
def move_file(self, path_from:'str', path_to:'str', event:'any_', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'None':
""" Moves a file to a selected directory.
"""
raise NotImplementedError()
# ################################################################################################################################
def delete_file(self, path:'str', snapshot_maker:'BaseRemoteSnapshotMaker') -> 'None':
""" Deletes a file pointed to by path.
"""
raise NotImplementedError()
# ################################################################################################################################
def wait_for_path(self, path:'str', observer_start_args:'anytuple') -> 'None':
# Local aliases
utcnow = datetime.utcnow
# How many times we have tried to find the correct path and since when
idx = 0
start = utcnow()
log_every = 2
# A flag indicating if path currently exists
is_ok = False
# This becomes True only if we learn that there is something wrong with path
error_found = False
# Wait until the directory exists (possibly it does already but we do not know it yet)
while not is_ok:
idx += 1
# Honour the main loop's status
if not self.keep_running:
logger.info('Stopped `%s` path lookup function for %s file transfer observer `%s` (not found) (%s)',
path, self.observer_type_name, self.name, self.observer_type_impl)
return
if self.path_exists(path):
if self.path_is_directory(path):
is_ok = True
else:
# Indicate that there was an erorr with path
error_found = True
if idx == 1 or (idx % log_every == 0):
logger.info('%s transfer path `%s` is not a directory (%s) (c:%s d:%s t:%s)',
self.observer_type_name_title,
path,
self.name,
idx,
utcnow() - start,
self.observer_type_impl
)
else:
# Indicate that there was an erorr with path
error_found = True
if idx == 1 or (idx % log_every == 0):
logger.info('%s transfer path `%r` does not exist (%s) (c:%s d:%s t:%s)',
self.observer_type_name_title,
path,
self.name,
idx,
utcnow() - start,
self.observer_type_impl
)
if is_ok:
# Log only if had an error previously, otherwise it would emit too much to logs ..
if error_found:
logger.info('%s file transfer path `%s` found successfully (%s) (c:%s d:%s t:%s)',
self.observer_type_name_title,
path,
self.name,
idx,
utcnow() - start,
self.observer_type_impl
)
# .. and start the observer now.
self.start(observer_start_args)
else:
sleep(5)
# ################################################################################################################################
def observe_with_snapshots(
self,
snapshot_maker, # type: BaseRemoteSnapshotMaker
path, # type: str
max_iters=maxsize, # type: int
log_stop_event=True, # type: bool
*args, # type: any_
**kwargs # type: any_
) -> 'None':
""" An observer's main loop that uses snapshots.
"""
try:
# How many times to run the loop - either given on input or, essentially, infinitely.
current_iter = 0
# Local aliases to avoid namespace lookups in self
timeout = self.sleep_time
handler_func = self.event_handler.on_created
is_recursive = self.is_recursive
# Take an initial snapshot
snapshot = snapshot_maker.get_snapshot(path, is_recursive, True, True)
while self.keep_running:
if current_iter == max_iters:
break
try:
# The latest snapshot ..
new_snapshot = snapshot_maker.get_snapshot(path, is_recursive, False, False)
# .. difference between the old and new will return, in particular, new or modified files ..
diff = DirSnapshotDiff(snapshot, new_snapshot) # type: ignore
for path_created in diff.files_created:
# .. ignore Python's own directorries ..
if '__pycache__' in path_created:
continue
if os.path.isdir(path_created):
class_ = DirCreatedEvent
else:
class_ = FileCreatedEvent
event = class_(path_created)
handler_func(event, self, snapshot_maker)
for path_modified in diff.files_modified:
# .. ignore Python's own directorries ..
if '__pycache__' in path_modified:
continue
if os.path.isdir(path_modified):
class_ = DirModifiedEvent
else:
class_ = FileModifiedEvent
event = class_(path_modified)
handler_func(event, self, snapshot_maker)
# .. a new snapshot which will be treated as the old one in the next iteration
snapshot = snapshot_maker.get_snapshot(path, is_recursive, False, True)
# Note that this will be caught only with local files not with FTP, SFTP etc.
except FileNotFoundError:
# Log the error ..
logger.warning('Path not found caught in %s file observer main loop (%s) `%s` (%s t:%s)',
self.observer_type_name, path, format_exc(), self.name, self.observer_type_impl)
# .. start a background inspector which will wait for the path to become available ..
self.manager.wait_for_deleted_path(path)
# .. and end the main loop.
return
except Exception as e:
logger.warning('Exception %s in %s file observer main loop `%s` e:`%s (%s t:%s)',
type(e), self.observer_type_name, path, format_exc(), self.name, self.observer_type_impl)
finally:
# Update loop counter after we completed current iteration
current_iter += 1
# Sleep for a while but only if we are a local observer because any other
# will be triggered from the scheduler and we treat the scheduler job's interval
# as the sleep time.
if self.is_local:
sleep(timeout) # type: ignore
except Exception:
logger.warning('Exception in %s file observer `%s` e:`%s (%s t:%s)',
self.observer_type_name, path, format_exc(), self.name, self.observer_type_impl)
if log_stop_event:
logger.warning('Stopped %s file transfer observer `%s` for `%s` (snapshot:%s/%s)',
self.observer_type_name, self.name, path, current_iter, max_iters) # type: ignore
# ################################################################################################################################
# ################################################################################################################################
class BackgroundPathInspector:
def __init__(
self,
path, # type: str
observer, # type: BaseObserver
observer_start_args=None # type: anytuple | None
) -> 'None':
self.path = path
self.observer = observer
self.observer_start_args = observer_start_args
def start(self):
if self.observer.is_active:
_ = spawn_greenlet(self.observer.wait_for_path, self.path, self.observer_start_args)
# ################################################################################################################################
# ################################################################################################################################
| 16,267
|
Python
|
.py
| 283
| 45.130742
| 130
| 0.465832
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,410
|
__init__.py
|
zatosource_zato/code/zato-server/src/zato/server/groups/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,411
|
ctx.py
|
zatosource_zato/code/zato-server/src/zato/server/groups/ctx.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from logging import getLogger
from uuid import uuid4
# gevent
from gevent.lock import RLock
# Zato
from zato.common.api import Groups, Sec_Def_Type
from zato.common.crypto.api import is_string_equal
from zato.common.groups import Member
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import anydict, boolnone, dict_, intanydict, intlist, intnone, intset, list_, strlist
from zato.server.base.parallel import ParallelServer
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class _BasicAuthSecDef:
security_id: 'int'
username: 'str'
password: 'str'
# ################################################################################################################################
# ################################################################################################################################
class _APIKeySecDef:
security_id: 'int'
header_value: 'str'
# ################################################################################################################################
# ################################################################################################################################
class SecurityGroupsCtx:
""" An instance of this class is attached to each channel using security groups.
"""
# ID of a channel this ctx object is attached to
channel_id: 'int'
# IDs of all the security groups attached to this channel
security_groups: 'intset'
# Maps group IDs to security IDs
group_to_sec_map: 'intanydict'
# Maps usernames to _BasicAuthSecDef objects
basic_auth_credentials: 'dict_[str, _BasicAuthSecDef]'
# Maps header values to _APIKeySecDef objects
apikey_credentials: 'dict_[str, _APIKeySecDef]'
def __init__(self, server:'ParallelServer') -> 'None':
self.server = server
self.group_to_sec_map = {}
self.security_groups = set()
self.basic_auth_credentials = {}
self.apikey_credentials = {}
self._lock = RLock()
# ################################################################################################################################
def check_security_basic_auth(self, cid:'str', channel_name:'str', username:'str', password:'str') -> 'intnone':
if sec_info := self.basic_auth_credentials.get(username):
if is_string_equal(password, sec_info.password):
return sec_info.security_id
else:
logger.info(f'Invalid password; username={username}; channel={channel_name}; cid={cid}')
else:
logger.info(f'Username not found; username={username}; channel={channel_name}; cid={cid}')
# ################################################################################################################################
def _get_basic_auth_by_security_id(self, security_id:'int') -> '_BasicAuthSecDef | None':
for value in self.basic_auth_credentials.values():
if value.security_id == security_id:
return value
# ################################################################################################################################
def _get_apikey_by_security_id(self, security_id:'int') -> '_APIKeySecDef | None':
for value in self.apikey_credentials.values():
if value.security_id == security_id:
return value
# ################################################################################################################################
def _has_security_id(self, security_id:'int') -> 'bool':
""" Returns True if input security_id is among any groups that we handle. Returns False otherwise.
"""
for sec_def_ids in self.group_to_sec_map.values():
if security_id in sec_def_ids:
return True
# If we are here, it means that do not have that security ID
return False
# ################################################################################################################################
def _after_auth_created(
self,
group_id:'int',
security_id:'int',
) -> 'None':
# Store information that we are aware of this group ..
self.security_groups.add(group_id)
# .. map the group ID to a list of security definitions that are related to it, ..
# .. note that a single group may point to multiple security IDs ..
sec_def_id_list = self.group_to_sec_map.setdefault(group_id, set())
sec_def_id_list.add(security_id)
# ################################################################################################################################
def _after_auth_deleted(self, security_id:'int') -> 'None':
for sec_def_ids in self.group_to_sec_map.values():
if security_id in sec_def_ids:
sec_def_ids.remove(security_id)
# ################################################################################################################################
def _create_basic_auth(
self,
security_id:'int',
username:'str',
password:'str'
) -> 'None':
# Build a business object containing all the data needed in runtime ..
item = _BasicAuthSecDef()
item.security_id = security_id
item.username = username
item.password = password
# .. and add the business object to our container ..
self.basic_auth_credentials[username] = item
# ################################################################################################################################
def _create_apikey(
self,
security_id:'int',
header_value:'str',
) -> 'None':
# .. build a business object containing all the data needed in runtime ..
item = _APIKeySecDef()
item.security_id = security_id
item.header_value = header_value
# .. add the business object to our container.
self.apikey_credentials[item.header_value] = item
# ################################################################################################################################
def set_basic_auth(self, security_id:'int', username:'str', password:'str') -> 'None':
if self._delete_basic_auth(security_id):
self._create_basic_auth(security_id, username, password)
# ################################################################################################################################
def edit_apikey(self, security_id:'int', header_value:'str') -> 'None':
if self._delete_apikey(security_id):
self._create_apikey(security_id, header_value)
# ################################################################################################################################
def _delete_basic_auth(self, security_id:'int') -> 'boolnone':
# Continue only if we recognize such a Basic Auth definition ..
if sec_info := self._get_basic_auth_by_security_id(security_id):
# .. delete the definition itself ..
_ = self.basic_auth_credentials.pop(sec_info.username, None)
# .. remove it from maps too ..
self._after_auth_deleted(security_id)
# .. and indicate to our caller that we are done.
return True
# ################################################################################################################################
def _delete_apikey(self, security_id:'int') -> 'boolnone':
# Continue only if we recognize such an API key definition ..
if sec_info := self._get_apikey_by_security_id(security_id):
# .. delete the definition itself ..
_ = self.apikey_credentials.pop(sec_info.header_value, None)
# .. remove it from maps too ..
self._after_auth_deleted(security_id)
# .. and indicate to our caller that we are done.
return True
# ################################################################################################################################
def delete_basic_auth(self, security_id:'int') -> 'None':
_ = self._delete_basic_auth(security_id)
# ################################################################################################################################
def delete_apikey(self, security_id:'int') -> 'None':
_ = self._delete_apikey(security_id)
# ################################################################################################################################
def check_security_apikey(self, cid:'str', channel_name:'str', header_value:'str') -> 'intnone':
if sec_info := self.apikey_credentials.get(header_value):
return sec_info.security_id
else:
logger.info(f'Invalid API key; channel={channel_name}; cid={cid}')
# ################################################################################################################################
def _on_basic_auth_created(
self,
group_id:'int',
security_id:'int',
username:'str',
password:'str'
) -> 'None':
# Create the base object ..
self._create_basic_auth(security_id, username, password)
# .. and populate common containers.
self._after_auth_created(group_id, security_id)
# ################################################################################################################################
def on_basic_auth_created(
self,
group_id:'int',
security_id:'int',
username:'str',
password:'str'
) -> 'None':
with self._lock:
self._on_basic_auth_created(group_id, security_id, username, password)
# ################################################################################################################################
def set_current_basic_auth(self, security_id:'int', current_username:'str', password:'str') -> 'None':
with self._lock:
self.set_basic_auth(security_id, current_username, password)
# ################################################################################################################################
def _on_basic_auth_deleted(self, security_id:'int') -> 'None':
self.delete_basic_auth(security_id)
# ################################################################################################################################
def on_basic_auth_deleted(self, security_id:'int') -> 'None':
with self._lock:
self.delete_basic_auth(security_id)
# ################################################################################################################################
def _on_apikey_created(
self,
group_id:'int',
security_id:'int',
header_value:'str',
) -> 'None':
# Create the base object ..
self._create_apikey(security_id, header_value)
# .. and populate common containers.
self._after_auth_created(group_id, security_id)
# ################################################################################################################################
def on_apikey_created(
self,
group_id:'int',
security_id:'int',
header_value:'str',
) -> 'None':
with self._lock:
self._on_apikey_created(group_id, security_id, header_value)
# ################################################################################################################################
def set_current_apikey(self, security_id:'int', header_value:'str') -> 'None':
with self._lock:
self.edit_apikey(security_id, header_value)
# ################################################################################################################################
def _on_apikey_deleted(self, security_id:'int') -> 'None':
_ = self._delete_apikey(security_id)
# ################################################################################################################################
def on_apikey_deleted(self, security_id:'int') -> 'None':
with self._lock:
_ = self._delete_apikey(security_id)
# ################################################################################################################################
def on_group_deleted(self, group_id:'int') -> 'None':
# A list of all the Basic Auth usernames we are going to delete
basic_auth_list:'strlist' = []
# A list of all the API key header values we are going to delete
apikey_list:'strlist' = []
with self._lock:
# Continue only if this group has been previously assigned to our context object ..
if not group_id in self.security_groups:
return
# If we are here, it means that we really have a group to delete
# Find all security IDs related to this group
sec_id_list = self.group_to_sec_map.pop(group_id, [])
# .. turn security IDs into their names (Basic Auth) ..
for username, item in self.basic_auth_credentials.items():
if item.security_id in sec_id_list:
basic_auth_list.append(username)
# .. turn security IDs into their header values (API keys) ..
for header_value, item in self.apikey_credentials.items():
if item.security_id in sec_id_list:
apikey_list.append(header_value)
# .. remove security definitions (Basic Auth) ..
for item in basic_auth_list:
_ = self.basic_auth_credentials.pop(item, None)
# .. remove security definitions (API keys) ..
for item in apikey_list:
_ = self.apikey_credentials.pop(item, None)
# .. and remove the group itself.
try:
_ = self.security_groups.remove(group_id)
except KeyError:
pass
# ################################################################################################################################
def _get_sec_def_by_id(self, security_id:'int') -> 'anydict':
# Let's try Basic Auth definitions first ..
if not (sec_def := self.server.worker_store.basic_auth_get_by_id(security_id)):
# .. if we do not have anything, it must be an API key definition then ..
sec_def = self.server.worker_store.apikey_get_by_id(security_id)
# If we do not have anything, we can only report an error
if not sec_def:
raise Exception(f'Security ID is neither Basic Auth nor API key -> {security_id}')
# .. otherwise, we can return the definition to our caller.
else:
return sec_def
# ################################################################################################################################
def _get_sec_def_type_by_id(self, security_id:'int') -> 'str':
sec_def = self._get_sec_def_by_id(security_id)
sec_def_type = sec_def['sec_type']
return sec_def_type
# ################################################################################################################################
def has_members(self) -> 'bool':
return bool(self.basic_auth_credentials) or bool(self.apikey_credentials)
# ################################################################################################################################
def on_member_added_to_group(self, group_id:'int', security_id:'int') -> 'None':
with self._lock:
# Continue only if this group has been previously assigned to our context object ..
if not group_id in self.security_groups:
return
sec_def = self._get_sec_def_by_id(security_id)
sec_def_type = sec_def['sec_type']
# If we are here, we know we have everything to populate all the runtime containers
if sec_def_type == Sec_Def_Type.BASIC_AUTH:
self._on_basic_auth_created(group_id, security_id, sec_def['username'], sec_def['password'])
else:
self._on_apikey_created(group_id, security_id, sec_def['password'])
# ################################################################################################################################
def _on_member_removed_from_group(self, group_id:'int', security_id:'int') -> 'None':
# Continue only if this group has been previously assigned to our context object ..
if not group_id in self.security_groups:
return
# First, remove the security ID from the input group ..
self._after_auth_deleted(security_id)
# .. now, check if the security definition belongs to other groups as well ..
# .. and if not, delete the security definition altogether because ..
# .. it must have been the last one group to have contained it ..
for sec_def_ids in self.group_to_sec_map.values():
if security_id in sec_def_ids:
break
else:
# .. if we are here, it means that there was no break above ..
# .. which means that the security ID is not in any group, ..
# .. in which case we need to delete this definition now ..
sec_def_type = self._get_sec_def_type_by_id(security_id)
# .. do delete the definition from the correct container.
if sec_def_type == Sec_Def_Type.BASIC_AUTH:
self._on_basic_auth_deleted(security_id)
else:
self._on_apikey_deleted(security_id)
# ################################################################################################################################
def on_member_removed_from_group(self, group_id:'int', security_id:'int') -> 'None':
with self._lock:
self._on_member_removed_from_group(group_id, security_id)
# ################################################################################################################################
def on_group_assigned_to_channel(self, group_id:'int', members:'list_[Member]') -> 'None':
# .. now, go through each of the members found ..
for member in members:
# .. and add it to a container corresponding to its security type ..
if member.sec_type == Sec_Def_Type.BASIC_AUTH:
# .. get the member's security definition ..
sec_def = self.server.worker_store.basic_auth_get_by_id(member.security_id)
# .. populate the correct container ..
self.on_basic_auth_created(
group_id,
sec_def['id'],
sec_def['username'],
sec_def.get('password') or 'Zato-Not-Provided-Basic-Auth-' + uuid4().hex,
)
elif member.sec_type == Sec_Def_Type.APIKEY:
# .. get the member's security definition ..
sec_def = self.server.worker_store.apikey_get_by_id(member.security_id)
# .. populate the correct container ..
self.on_apikey_created(
group_id,
sec_def['id'],
sec_def.get('password') or 'Zato-Not-Provided-API-Key-' + uuid4().hex,
)
# ################################################################################################################################
def on_group_unassigned_from_channel(self, group_id:'int') -> 'None':
with self._lock:
# Pop the mapping which will also give us all the security definitions assigned to the group ..
sec_id_list = self.group_to_sec_map.pop(group_id, [])
# .. go through each definition ..
for security_id in sec_id_list:
# .. and remove it, if necessary.
self._on_member_removed_from_group(group_id, security_id)
# Lastly, delete the top-level container for groups.
try:
_ = self.security_groups.remove(group_id)
except KeyError:
pass
# ################################################################################################################################
# ################################################################################################################################
class SecurityGroupsCtxBuilder:
members: 'list_[Member]'
def __init__(self, server:'ParallelServer') -> 'None':
self.server = server
# ################################################################################################################################
def populate_members(self) -> 'None':
self.members = self.server.groups_manager.get_member_list(Groups.Type.API_Clients)
# ################################################################################################################################
def _get_members_by_group_id(self, group_id:'int') -> 'list_[Member]':
# Our response to produce
out:'list_[Member]' = []
# Go through each of the members that we are aware of ..
for item in self.members:
# .. check if the member belongs to our input group ..
if item.group_id == group_id:
# .. if yes, add it to our output ..
out.append(item)
# .. finally, we can return the response to our caller.
return out
# ################################################################################################################################
def build_ctx(self, channel_id:'int', security_groups: 'intlist') -> 'SecurityGroupsCtx':
# Build a basic object ..
ctx = SecurityGroupsCtx(self.server)
# .. populate it with the core data ..
ctx.channel_id = channel_id
# .. add all the credentials ..
for group_id in security_groups:
# .. extract all the members from this group ..
members = self._get_members_by_group_id(group_id)
# .. add them to the channel ..
ctx.on_group_assigned_to_channel(group_id, members)
# .. and return the business object to our caller.
return ctx
# ################################################################################################################################
# ################################################################################################################################
| 23,653
|
Python
|
.py
| 397
| 50.423174
| 130
| 0.431407
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,412
|
base.py
|
zatosource_zato/code/zato-server/src/zato/server/groups/base.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from contextlib import closing
# SQLAlchemy
from sqlalchemy import and_, func, select
# Zato
from zato.common.api import GENERIC, Groups, SEC_DEF_TYPE
from zato.common.groups import Member
from zato.common.odb.model import GenericObject as ModelGenericObject
from zato.common.odb.query.generic import GroupsWrapper
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, anydict, anylist, intnone, list_, strlist
from zato.server.base.parallel import ParallelServer
# ################################################################################################################################
# ################################################################################################################################
_generic_attr_name = GENERIC.ATTR_NAME
ModelGenericObjectTable:'any_' = ModelGenericObject.__table__
# ################################################################################################################################
# ################################################################################################################################
class GroupsManager:
def __init__(self, server:'ParallelServer') -> 'None':
self.server = server
self.cluster_id = self.server.cluster_id
# ################################################################################################################################
def create_group(self, group_type:'str', group_name:'str') -> 'str':
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
wrapper.type_ = Groups.Type.Group_Parent
wrapper.subtype = group_type
# .. do create the group now ..
insert = wrapper.create(group_name, '')
# .. commit the changes ..
session.execute(insert)
session.commit()
# .. get the newly added group now ..
group = wrapper.get(group_name)
# .. and return its ID to our caller.
return group['id']
# ################################################################################################################################
def edit_group(self, group_id:'int', group_type:'str', group_name:'str') -> 'None':
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
wrapper.type_ = Groups.Type.Group_Parent
wrapper.subtype = group_type
# .. do edit the group's name (but not its opaque attributes) ..
update = wrapper.update(group_name, id=group_id)
# .. and commit the changes now.
session.execute(update)
session.commit()
# ################################################################################################################################
def delete_group(self, group_id:'int') -> 'None':
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
# .. delete the group ..
delete_group = wrapper.delete_by_id(group_id)
session.execute(delete_group)
# .. remove its members in bulk ..
remove_members = wrapper.delete_by_parent_object_id(group_id)
session.execute(remove_members)
# .. and commit the changes now.
# session.commit()
# ################################################################################################################################
def get_group_list(self, group_type:'str') -> 'anylist':
# Our reponse to produce
out:'anylist' = []
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
wrapper.type_ = Groups.Type.Group_Parent
wrapper.subtype = group_type
# .. get all the results ..
results = wrapper.get_list()
# .. populate our response ..
out[:] = results
# .. and return the output to our caller.
return out
# ################################################################################################################################
def get_member_list(self, group_type:'str', group_id:'intnone'=None) -> 'list_[Member]':
# Our reponse to produce
out:'list_[Member]' = []
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
# .. get all the results ..
results = wrapper.get_list(Groups.Type.Group_Member, group_type, parent_object_id=group_id)
# .. extract security information for each item ..
for item in results:
sec_info = item['name']
sec_info = sec_info.split('-')
sec_type, security_id, _ignored_sql_group_id = sec_info
security_id = int(security_id)
if sec_type == SEC_DEF_TYPE.BASIC_AUTH:
get_sec_func = self.server.worker_store.basic_auth_get_by_id
elif sec_type == SEC_DEF_TYPE.APIKEY:
get_sec_func = self.server.worker_store.apikey_get_by_id
else:
raise Exception(f'Unrecognized sec_type: {sec_type}')
sec_config = get_sec_func(security_id)
item['name'] = sec_config['name']
item['security_id'] = sec_config['id']
item['sec_type'] = sec_type
# .. build a new business object for the member ..
member = Member.from_dict(item)
# .. populate our response list ..
out.append(member)
# .. and return the output to our caller.
return out
# ################################################################################################################################
def get_member_count(self, group_type:'str') -> 'anydict':
# Our response to produce
out:'anydict' = {}
# By default, assume that there are no members in any group
group_list = self.get_group_list(group_type)
for item in group_list:
group_id = item['id']
out[group_id] = 0
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
q = select([
ModelGenericObjectTable.c.parent_object_id,
func.count(ModelGenericObjectTable.c.parent_object_id),
]).\
where(and_(
ModelGenericObjectTable.c.type_ == Groups.Type.Group_Member,
ModelGenericObjectTable.c.subtype == group_type,
)).\
group_by(ModelGenericObjectTable.c.parent_object_id)
result:'any_' = session.execute(q).fetchall()
for item in result:
group_id, member_count = item
out[group_id] = member_count
return out
# ################################################################################################################################
def add_members_to_group(self, group_id:'int', member_id_list:'strlist') -> 'None':
# Local variables
member_list = []
# Process each input member ..
for member_id in member_id_list:
# .. each one needs a composite name because each such name has to be unique in the database
name = f'{member_id}-{group_id}'
# .. append it for later use
member_list.append({
'name': name,
_generic_attr_name: '',
})
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
# .. do add the members to the group now ..
insert = wrapper.create_many(
member_list,
Groups.Type.Group_Member,
Groups.Type.API_Clients,
parent_object_id=group_id
)
# .. run the query ..
session.execute(insert)
# .. and commit the changes.
session.commit()
# ################################################################################################################################
def remove_members_from_group(self, group_id:'str', member_id_list:'strlist') -> 'None':
# Work in a new SQL transaction ..
with closing(self.server.odb.session()) as session:
# .. build and object that will wrap access to the SQL database ..
wrapper = GroupsWrapper(session, self.cluster_id)
# .. delete members from the group now ..
for member_id in member_id_list:
# This is a composite name because each such name has to be unique in the database
name = f'{member_id}-{group_id}'
delete = wrapper.delete_by_name(name, parent_object_id=group_id)
session.execute(delete)
# .. and commit the changes.
session.commit()
# ################################################################################################################################
# ################################################################################################################################
| 10,521
|
Python
|
.py
| 190
| 44.852632
| 130
| 0.470232
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,413
|
setup.py
|
zatosource_zato/code/zato-scheduler/setup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# flake8: noqa
from setuptools import setup, find_packages
version = '3.2'
setup(
name = 'zato-scheduler',
version = version,
author = 'Zato Source s.r.o.',
author_email = 'info@zato.io',
url = 'https://zato.io',
package_dir = {'':'src'},
packages = find_packages('src'),
namespace_packages = ['zato'],
zip_safe = False,
)
| 538
|
Python
|
.py
| 19
| 24
| 64
| 0.619608
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,414
|
__init__.py
|
zatosource_zato/code/zato-scheduler/test/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,415
|
test_invoke_scheduler_rest.py
|
zatosource_zato/code/zato-scheduler/test/zato/test_invoke_scheduler_rest.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main
# Zato
from zato.common.test.config import TestConfig
from zato.common.test.rest_client import RESTClientTestCase
# ################################################################################################################################
# ################################################################################################################################
class InvokeSchedulerRESTTestCase(RESTClientTestCase):
def setUp(self) -> None:
super().setUp()
self.rest_client.base_address = TestConfig.scheduler_address
# ################################################################################################################################
def test_rest_invoke_server_to_scheduler(self):
# This will check whether the scheduler replies with the expected metadata
_ = self.post('/')
# ################################################################################################################################
def test_rest_invoke_server_to_scheduler_invalid_request(self):
# This will check whether the scheduler replies with the expected metadata
response = self.post('/', {'invalid-key':'invalid-value'}, expect_ok=False)
cid = response['cid'] # type: str
self.assertTrue(cid.startswith('zsch'))
self.assertGreaterEqual(len(cid), 20)
self.assertEqual(response['status'], 'error')
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 2,173
|
Python
|
.py
| 34
| 59.558824
| 130
| 0.355964
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,416
|
test_invoke_scheduler_client.py
|
zatosource_zato/code/zato-scheduler/test/zato/test_invoke_scheduler_client.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from unittest import main, TestCase
# Zato
from zato.broker.client import BrokerClient
from zato.common.broker_message import SCHEDULER
from zato.common.test.config import TestConfig
# ################################################################################################################################
# ################################################################################################################################
# Reusable test configuration
scheduler_config = {
'scheduler_host': TestConfig.scheduler_host,
'scheduler_port': TestConfig.scheduler_port,
'scheduler_use_tls': False,
}
# ################################################################################################################################
# ################################################################################################################################
class InvokeSchedulerClientTestCase(TestCase):
def test_client_invoke_server_to_scheduler_message_valid(self):
if not (username := os.environ.get('Zato_Scheduler_API_Client_For_Server_Username')):
return
if not (password := os.environ.get('Zato_Scheduler_API_Client_For_Server_Password')):
return
# If we are here, it means that we have the credentials
scheduler_config['scheduler_api_username'] = username
scheduler_config['scheduler_api_password'] = password
# Client that invokes the scheduler from servers
client = BrokerClient(scheduler_config=scheduler_config)
# Build a valid test message
msg = {
'action': SCHEDULER.EXECUTE.value,
'name': 'zato.outgoing.sql.auto-ping'
}
response = client.invoke_sync(msg)
cid = response['cid'] # type: str
self.assertTrue(cid.startswith('zsch'))
self.assertGreaterEqual(len(cid), 20)
self.assertEqual(response['status'], 'ok')
# ################################################################################################################################
def xtest_client_invoke_server_to_scheduler_invalid_request(self):
if not (username := os.environ.get('Zato_Scheduler_API_Client_For_Server_Username')):
return
if not (password := os.environ.get('Zato_Scheduler_API_Client_For_Server_Password')):
return
# If we are here, it means that we have the credentials
scheduler_config['scheduler_api_username'] = username
scheduler_config['scheduler_api_password'] = password
# Client that invokes the scheduler from servers
client = BrokerClient(scheduler_config=scheduler_config)
# Build an invalid test message
msg = {
'action': SCHEDULER.EXECUTE.value,
'invalid-key':'invalid-value'
}
# This will check whether the scheduler replies with the expected metadata
response = client.invoke_sync(msg)
cid = response['cid'] # type: str
self.assertTrue(cid.startswith('zsch'))
self.assertGreaterEqual(len(cid), 20)
self.assertEqual(response['status'], 'error')
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 3,907
|
Python
|
.py
| 71
| 48.422535
| 130
| 0.466894
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,417
|
__init__.py
|
zatosource_zato/code/zato-scheduler/test/zato/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,418
|
test_cleanup.py
|
zatosource_zato/code/zato-scheduler/test/zato/test_cleanup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# This needs to be done as soon as possible
from gevent.monkey import patch_all
_ = patch_all()
# stdlib
import os
from datetime import datetime
from unittest import main
# gevent
from gevent import sleep
# Zato
from zato.common import PUBSUB
from zato.common.test import CommandLineTestCase, PubSubConfig
from zato.common.test.unittest_ import BasePubSubRestTestCase, PubSubAPIRestImpl
from zato.common.typing_ import cast_
from zato.scheduler.cleanup.core import run_cleanup
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import anydict, intnone
anydict = anydict
# ################################################################################################################################
# ################################################################################################################################
_default = PUBSUB.DEFAULT
sec_name = _default.TEST_SECDEF_NAME
username = _default.TEST_USERNAME
test_topic_prefix = PubSubConfig.TestTopicPrefix
delta_environ_key = 'ZATO_SCHED_DELTA'
# ################################################################################################################################
# ################################################################################################################################
class PubSubCleanupTestCase(CommandLineTestCase, BasePubSubRestTestCase):
should_init_rest_client = False
def setUp(self) -> 'None':
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# Set up a test client before each test ..
self.rest_client.init(username=username, sec_name=sec_name)
self.api_impl = PubSubAPIRestImpl(self, self.rest_client)
# .. clean up any left over topics as well ..
self.delete_pubsub_topics_by_pattern(test_topic_prefix)
# A path to the scheduler that the tests will use
self.scheduler_path = os.environ['ZATO_SCHEDULER_BASE_DIR']
# .. and call our parent
super().setUp()
# ################################################################################################################################
def _run_cleanup_old_pubsub_one_sub_key(
self,
topic_name:'str',
*,
env_delta:'intnone',
limit_retention:'intnone',
limit_message_expiry:'intnone',
limit_sub_inactivity:'intnone',
clean_up_subscriptions: 'bool',
clean_up_topics_without_subscribers: 'bool',
clean_up_topics_with_max_retention_reached: 'bool',
clean_up_queues_with_expired_messages: 'bool',
) -> 'None':
# Filter our warnings coming from requests
import warnings
warnings.filterwarnings(action='ignore', message='unclosed', category=ResourceWarning)
# Assume we are not going to sleep after publishing
after_publish_sleep_base = 0
# All the messages published during this test
messages_published = []
# Before subscribing, make sure we are not currently subscribed
_ = self._unsubscribe(topic_name)
# Subscribe to the topic
response_initial = self.rest_client.post(PubSubConfig.PathSubscribe + topic_name)
# Wait a moment to make sure the subscription data is created
sleep(2)
sub_key:'str' = response_initial['sub_key']
sub_key
data = 'abc'
len_messages = 2
for _ in range(len_messages):
msg = self._publish(topic_name, data)
messages_published.append(msg['msg_id'])
# Sort all the message IDs published for later use
messages_published.sort()
# Because each publication is synchronous, we now know that all of them are in the subscriber's queue
# which means that we can delete them already.
# Use the delta value from the environment to override any per-topic cleanup time configuration.
# Such a delta indicates after a passage of how many seconds we will consider a subscribers as gone,
# that is, after how many seconds since its last interaction time it will be deleted.
if env_delta:
# Export a variable with delta as required by the underlying cleanup implementation
os.environ[delta_environ_key] = str(env_delta)
# Our sleep time is based on the delta environment variable
after_publish_sleep_base = env_delta
elif limit_retention:
# We are going to sleep based on the topic's max. retention time
after_publish_sleep_base = limit_retention
elif limit_message_expiry:
# We are going to sleep based on each of the message's expiration time
# Note that, because we are not assigning any explicit expiration time to messages,
# that value was taken from the topic's default expiration time when a message was published above.
after_publish_sleep_base = limit_message_expiry
elif limit_sub_inactivity:
# We need to sleep based on the topic's subscription inactivity limit.
after_publish_sleep_base = limit_sub_inactivity
# If requested to, sleep a little bit longer to make sure that we actually exceed the delta or retention time
if after_publish_sleep_base:
sleep_extra = after_publish_sleep_base * 0.1
sleep(after_publish_sleep_base + sleep_extra)
# Run the cleanup procedure now
cleanup_result = run_cleanup(
clean_up_subscriptions,
clean_up_topics_without_subscribers,
clean_up_topics_with_max_retention_reached,
clean_up_queues_with_expired_messages,
scheduler_path=self.scheduler_path,
)
# We enter and check the assertions here only if we were to clean up subscriptions
# as otherwise the subscription will be still around.
if clean_up_subscriptions:
self.assertEqual(cleanup_result.found_total_queue_messages, len_messages)
self.assertListEqual(cleanup_result.found_sk_list, [sub_key])
# The cleanup procedure invoked the server which in turn deleted our subscription,
# which means that we can sleep for a moment now to make sure that it is actually
# deleted and then we can try to get message for the now-already-deleted sub_key.
# We expect that it will result in a permissioned denied, as though this sub_key never existed.
# Wait a moment ..
sleep(0.1)
receive_result = cast_('anydict', self._receive(topic_name, expect_ok=False))
cid = receive_result['cid']
self.assertIsInstance(cid, str)
self.assertTrue(len(cid) >= 20)
self.assertEqual(receive_result['result'], 'Error')
self.assertEqual(receive_result['details'], f'You are not subscribed to topic `{topic_name}`')
# This topic was cleaned up based on the fact that it had no subscribers,
# which left no messages in the topic at all. That means that there must have been
# no messages with a max. retention time left which in turn means that we do not
# expect this topic to be among ones that still contained such messages
# when the cleanup procedure went on to clean up topics based on max. retention time.
for item in cleanup_result.topics_with_max_retention_reached:
if item.name == topic_name:
self.fail('Topic `{}` should not be among `{}` (topics_cleaned_up)'.format(
topic_name, cleanup_result.topics_with_max_retention_reached))
elif clean_up_topics_with_max_retention_reached:
# Confirm that the environment variable was not used
self.assertFalse(cleanup_result.has_env_delta)
# We expect for no other tasks to have been performed in this
self.assertListEqual(cleanup_result.topics_without_subscribers, [])
self.assertListEqual(cleanup_result.topics_with_expired_messages, [])
# Because its name is unique, there should be only one topic that was cleaned up
self.assertEqual(len(cleanup_result.topics_cleaned_up), 1)
self.assertEqual(len(cleanup_result.topics_with_max_retention_reached), 1)
# This is our topic that was cleaned up
topic_from_cleaned_up_list = cleanup_result.topics_cleaned_up[0]
topic_based_on_max_retention_reached = cleanup_result.topics_with_max_retention_reached[0]
# These two objects should be the same
self.assertIs(topic_from_cleaned_up_list, topic_based_on_max_retention_reached)
# Let's use a shorter name
topic_ctx = cleanup_result.topics_cleaned_up[0]
# These must be equal
self.assertTrue(topic_ctx.name, topic_name)
self.assertEqual(topic_ctx.limit_retention, limit_retention)
self.assertEqual(topic_ctx.len_messages, len(messages_published))
# Messages received are going to be a list of Bunch objects in an unspecified order.
# We need to convert them to a simple list of sorted message IDs.
cleaned_up_msg_id_list = [elem['pub_msg_id'] for elem in topic_ctx.messages]
cleaned_up_msg_id_list.sort()
self.assertListEqual(messages_published, cleaned_up_msg_id_list)
elif clean_up_queues_with_expired_messages:
cleanup_result
# Confirm that the environment variable was not used
self.assertFalse(cleanup_result.has_env_delta)
# We expect for no other tasks to have been performed in this
self.assertListEqual(cleanup_result.topics_without_subscribers, [])
self.assertListEqual(cleanup_result.topics_with_max_retention_reached, [])
# Because its name is unique, there should be only one topic that was cleaned up
self.assertEqual(len(cleanup_result.topics_cleaned_up), 1)
self.assertEqual(len(cleanup_result.topics_with_expired_messages), 1)
# This is our topic that was cleaned up
topic_from_cleaned_up_list = cleanup_result.topics_cleaned_up[0]
topic_based_on_expired_messages = cleanup_result.topics_with_expired_messages[0]
# These two objects should be the same
self.assertIs(topic_from_cleaned_up_list, topic_based_on_expired_messages)
# Let's use a shorter name
topic_ctx = cleanup_result.topics_cleaned_up[0]
# These must be equal
self.assertTrue(topic_ctx.name, topic_name)
self.assertEqual(topic_ctx.limit_message_expiry, limit_message_expiry)
self.assertEqual(topic_ctx.len_messages, len(messages_published))
# Messages received are going to be a list of Bunch objects in an unspecified order.
# We need to convert them to a simple list of sorted message IDs.
cleaned_up_msg_id_list_from_topic = [elem['pub_msg_id'] for elem in topic_ctx.messages]
cleaned_up_msg_id_list_from_topic.sort()
self.assertListEqual(messages_published, cleaned_up_msg_id_list_from_topic)
# We have another list in the same format as well and we need to check it too
cleaned_up_msg_id_list_from_expired_messages = [elem['pub_msg_id'] for elem in cleanup_result.expired_messages]
cleaned_up_msg_id_list_from_expired_messages.sort()
self.assertListEqual(messages_published, cleaned_up_msg_id_list_from_topic)
# ################################################################################################################################
def test_cleanup_old_subscriptions_no_sub_keys(self) -> 'None':
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# Indicate after a passage of how many seconds we will consider a subscribers as gone,
# that is, after how many seconds since its last interaction time it will be deleted.
env_delta = 1
# Create a new topic for this test with a unique prefix,
# which will ensure that there are no other subscriptions for it.
now = datetime.utcnow().isoformat()
prefix = f'{test_topic_prefix}{now}/'
out = self.create_pubsub_topic(topic_prefix=prefix)
topic_name = out['name']
# Export a variable with delta as required by the underlying cleanup implementation
os.environ[delta_environ_key] = str(env_delta)
# Sleep for that many seconds to make sure that we are the only pub/sub participant
# currently using the system. This will allow us to reliably check below
# that there were no sub_keys used during the delta time since we created the topic
# and when the cleanup procedure ran.
sleep_extra = env_delta * 0.1
sleep(env_delta + sleep_extra)
# Run the cleanup procedure now
cleanup_result = run_cleanup(
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
scheduler_path=self.scheduler_path,
)
# We expect for the environment variable to have been taken into account
self.assertEqual(cleanup_result.max_limit_sub_inactivity, env_delta)
# We do not know how topics will have been cleaned up
# because this test may be part of a bigger test suite
# with other topics, subscribers and message publications.
# However, because we do not have any subscription to that very topic,
# it means that we do not expect to find it in the list of topics cleaned up
# and this is what we are testing below, i.e. that it was not cleaned up.
for item in cleanup_result.topics_cleaned_up:
if item.name == topic_name:
self.fail('Topic `{}` should not be among `{}` (topics_cleaned_up)'.format(
topic_name, cleanup_result.topics_cleaned_up))
for item in cleanup_result.topics_without_subscribers:
if item.name == topic_name:
self.fail('Topic `{}` should not be among `{}` (topics_cleaned_up)'.format(
topic_name, cleanup_result.topics_without_subscribers))
# We are sure that have been the only potential user of the pub/sub system
# while the test was running and, because we did not publish anything,
# nor did we subscribe to anything, we can be sure that there have been
# no subscriptions found when the cleanup procedure ran.
self.assertEqual(len(cleanup_result.found_sk_list), 0)
self.assertListEqual(cleanup_result.found_sk_list, [])
# ################################################################################################################################
def test_cleanup_old_subscriptions_one_sub_key_with_env_delta_default_topic(self):
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# In this test, we explicitly specify a seconds delta to clean up messages by.
env_delta = 1
# Create a new topic for this test
out = self.create_pubsub_topic()
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=env_delta,
limit_retention=None,
limit_message_expiry=None,
limit_sub_inactivity=None,
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
)
# ################################################################################################################################
def test_cleanup_old_subscriptions_one_sub_key_with_env_delta_new_topic(self):
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# In this test, we explicitly specify a seconds delta to clean up messages by.
# I.e. even if we use a new test topic below, the delta is given on input too.
env_delta = 1
# Create a new topic for this test
out = self.create_pubsub_topic()
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=env_delta,
limit_retention=None,
limit_message_expiry=None,
limit_sub_inactivity=None,
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
)
# ################################################################################################################################
def test_cleanup_old_subscriptions_one_sub_key_no_env_delta(self):
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# We explcitly request that inactive subscriptions should be deleted after that many seconds
limit_sub_inactivity = 1
# Create a new topic for this test
out = self.create_pubsub_topic()
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=None,
limit_retention=None,
limit_message_expiry=None,
limit_sub_inactivity=limit_sub_inactivity,
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
)
# ################################################################################################################################
def test_cleanup_old_subscriptions_one_sub_key_env_delta_overrides_topic_delta(self):
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# In this test, we specify a short delta in the environment and we expect
# that it will override the explicit inactivity limit configured for a new topic.
# In other words, the environment variable has priority over what the topic has configured
env_delta = 1
# We explcitly request that inactive subscriptions should be deleted after that many seconds
limit_sub_inactivity = 123456789
# Create a new topic for this test
out = self.create_pubsub_topic()
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=env_delta,
limit_retention=None,
limit_message_expiry=None,
limit_sub_inactivity=limit_sub_inactivity,
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
)
# ################################################################################################################################
def test_cleanup_max_topic_retention_exceeded(self) -> 'None':
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# Messages without subscribers will be eligible for deletion from topics after that many seconds
limit_retention = 1
# Create a new topic for this test with a unique prefix,
# which will ensure that there are no other subscriptions for it.
now = datetime.utcnow().isoformat()
prefix = f'{test_topic_prefix}retention/{now}/'
out = self.create_pubsub_topic(topic_prefix=prefix, limit_retention=limit_retention)
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=None,
limit_retention=limit_retention,
limit_message_expiry=None,
limit_sub_inactivity=None,
clean_up_subscriptions = False,
clean_up_topics_without_subscribers = False,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = False,
)
# ################################################################################################################################
def test_cleanup_messages_already_expired(self) -> 'None':
if not os.environ.get('ZATO_TEST_SCHEDULER'):
return
# Messages will be considered expired after that many seconds
limit_message_expiry = 1
# Create a new topic for this test with a unique prefix,
# which will ensure that there are no other subscriptions for it.
now = datetime.utcnow().isoformat()
prefix = f'{test_topic_prefix}expiration/{now}/'
out = self.create_pubsub_topic(topic_prefix=prefix, limit_message_expiry=limit_message_expiry)
topic_name = out['name']
# Run the actual test
self._run_cleanup_old_pubsub_one_sub_key(
topic_name,
env_delta=None,
limit_retention=None,
limit_message_expiry=limit_message_expiry,
limit_sub_inactivity=None,
clean_up_subscriptions = False,
clean_up_topics_without_subscribers = False,
clean_up_topics_with_max_retention_reached = False,
clean_up_queues_with_expired_messages = True,
)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 22,858
|
Python
|
.py
| 392
| 48.201531
| 130
| 0.588075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,419
|
__init__.py
|
zatosource_zato/code/zato-scheduler/src/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,420
|
__init__.py
|
zatosource_zato/code/zato-scheduler/src/zato/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
__import__('pkg_resources').declare_namespace(__name__)
| 287
|
Python
|
.py
| 8
| 34.375
| 64
| 0.683636
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,421
|
util.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/util.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.client import AnyServiceInvoker, ZatoClient
from zato.common.typing_ import anydict
AnyServiceInvoker = AnyServiceInvoker
# ################################################################################################################################
# ################################################################################################################################
def _set_up_zato_client_by_server_path(server_path:'str') -> 'AnyServiceInvoker':
# Zato
from zato.common.util.api import get_client_from_server_conf
return get_client_from_server_conf(server_path, require_server=False)
# ################################################################################################################################
def _set_up_zato_client_by_remote_details(
server_use_tls: 'bool',
server_host: 'str',
server_port: 'int',
server_username: 'str',
server_password: 'str'
) -> 'ZatoClient':
# Zato
from zato.client import get_client_from_credentials
server_url = f'{server_host}:{server_port}'
client_auth = (server_username, server_password)
return get_client_from_credentials(server_use_tls, server_url, client_auth)
# ################################################################################################################################
def set_up_zato_client(config:'anydict') -> 'AnyServiceInvoker':
# New in 3.2, hence optional
server_config = config.get('server')
# We do have server configuration available ..
if server_config:
if server_config.get('server_path'):
return _set_up_zato_client_by_server_path(server_config.server_path)
else:
server_use_tls = server_config.get('server_use_tls')
server_host = server_config['server_host']
server_port = server_config['server_port']
server_username = server_config['server_username']
server_password = server_config['server_password']
return _set_up_zato_client_by_remote_details(
server_use_tls,
server_host,
server_port,
server_username,
server_password
) # type: ignore
# .. no configuration, assume this is a default quickstart cluster.
else:
# This is what quickstart environments use by default
server_path = '/opt/zato/env/qs-1'
return _set_up_zato_client_by_server_path(server_path)
# ################################################################################################################################
| 3,070
|
Python
|
.py
| 57
| 47.105263
| 130
| 0.458069
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,422
|
backend.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/backend.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import datetime
from logging import getLogger
from traceback import format_exc
# datetime
from dateutil.rrule import rrule, SECONDLY
# gevent
import gevent # Imported directly so it can be mocked out in tests
from gevent import lock, sleep
# paodate
from paodate import Delta
# Python 2/3 compatibility
from zato.common.ext.future.utils import iterkeys, itervalues
# Zato
from zato.common.api import FILE_TRANSFER, SCHEDULER
from zato.common.util.api import asbool, make_repr, new_cid, spawn_greenlet
from zato.common.util.scheduler import load_scheduler_jobs_by_api, load_scheduler_jobs_by_odb, add_startup_jobs_to_odb_by_api, \
add_startup_jobs_to_odb_by_odb
from zato.scheduler.cleanup.cli import start_cleanup
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import stranydict
from zato.scheduler.server import SchedulerServerConfig, SchedulerAPI
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
initial_sleep = 0.1
# ################################################################################################################################
# ################################################################################################################################
class Interval:
def __init__(self, days:'int'=0, hours:'int'=0, minutes:'int'=0, seconds:'int'=0, in_seconds:'int'=0) -> 'None':
self.days = int(days) if days else days
self.hours = int(hours) if hours else hours
self.minutes = int(minutes) if minutes else minutes
self.seconds = int(seconds) if seconds else seconds
self.in_seconds = in_seconds or self.get_in_seconds()
def __str__(self):
return make_repr(self)
__repr__ = __str__
def get_in_seconds(self):
return Delta(days=self.days, hours=self.hours, minutes=self.minutes, seconds=self.seconds).total_seconds
# ################################################################################################################################
# ################################################################################################################################
class Job:
def __init__(self, id, name, type, interval, start_time=None, callback=None, cb_kwargs=None, max_repeats=None,
on_max_repeats_reached_cb=None, is_active=True, clone_start_time=False, cron_definition=None, service=None,
extra=None, old_name=None):
self.id = id
self.name = name
self.type = type
self.interval = interval
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.max_repeats = max_repeats
self.on_max_repeats_reached_cb = on_max_repeats_reached_cb
self.is_active = is_active
self.cron_definition = cron_definition
self.service = service
self.extra = extra
# This is used by the edit action to be able to discern if an edit did not include a rename
self.old_name = old_name
self.current_run = 0 # Starts over each time scheduler is started
self.max_repeats_reached = False
self.max_repeats_reached_at = None
self.keep_running = True
if clone_start_time:
self.start_time = start_time
elif self.type == SCHEDULER.JOB_TYPE.CRON_STYLE:
now = datetime.datetime.utcnow()
self.start_time = now + datetime.timedelta(seconds=(self.get_sleep_time(now)))
else:
self.start_time = self.get_start_time(start_time if start_time is not None else datetime.datetime.utcnow())
self.wait_sleep_time = 1
self.wait_iter_cb = None
self.wait_iter_cb_args = ()
# ################################################################################################################################
def __str__(self):
return make_repr(self)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
# ################################################################################################################################
def clone(self, name=None, is_active=None):
# It will not be None if an edit changed it from True to False or the other way around
is_active = is_active if is_active is not None else self.is_active
return Job(self.id, self.name, self.type, self.interval, self.start_time, self.callback, self.cb_kwargs,
self.max_repeats, self.on_max_repeats_reached_cb, is_active, True, self.cron_definition, self.service,
self.extra)
# ################################################################################################################################
def get_start_time(self, start_time):
""" Converts initial start time to the time the job should be invoked next.
For instance, assume the scheduler has just been started. Given this job config ..
- start_time: 2019-11-23 13:15:17
- interval: 90 seconds
- now: 2019-11-23 17:32:44
.. a basic approach is to add 90 seconds to now and schedule the job. This would even
work for jobs that have very short intervals when no one usually cares that much if a job
is 15 seconds off or not.
However, consider this series of events ..
- start_time: 2019-11-23 13:00:00
- interval: 86400 seconds (1 day)
- the job is started
- at 2019-11-23 21:15:00 the scheduler is stopped and started again
.. now we don't want for the scheduler to start the job at 21:15:00 with an interval of one day,
the job should rather wait till the next day so that the computed start_time should in fact be 2019-11-24 13:00:00.
"""
# We have several scenarios to handle assuming that first_run_time = start_time + interval
#
# 1) first_run_time > now
# 2a) first_run_time <= now and first_run_time + interval_in_seconds > now
# 2b) first_run_time <= now and first_run_time + interval_in_seconds <= now
#
# 1) is quick - start_time simply becomes first_run_time
# 2a) means we already seen some executions of this job and there's still at least one in the future
# 2b) means we already seen some executions of this job and it won't be run in the future anymore
now = datetime.datetime.utcnow()
interval = datetime.timedelta(seconds=self.interval.in_seconds)
if start_time > now:
return start_time
first_run_time = start_time + interval
if first_run_time > now:
return first_run_time
else:
runs = rrule(SECONDLY, interval=int(self.interval.in_seconds), dtstart=start_time, count=self.max_repeats)
last_run_time = runs.before(now)
next_run_time = last_run_time + interval
if next_run_time >= now:
return next_run_time
# The assumption here is that all one-time jobs are always active at the instant we evaluate them here.
elif next_run_time < now and self.type == SCHEDULER.JOB_TYPE.ONE_TIME and self.is_active:
# The delay is 100% arbitrary
return now + datetime.timedelta(seconds=10)
else:
# We must have already run out of iterations
self.max_repeats_reached = True
self.max_repeats_reached_at = next_run_time
self.keep_running = False
logger.info(
'Cannot compute start_time. Job `%s` max repeats reached at `%s` (UTC)',
self.name, self.max_repeats_reached_at)
# ################################################################################################################################
def get_context(self):
ctx = {
'cid':new_cid(),
'start_time': self.start_time.isoformat(),
'cb_kwargs': self.cb_kwargs
}
if self.type == SCHEDULER.JOB_TYPE.CRON_STYLE:
ctx['cron_definition'] = self.cron_definition
else:
ctx['interval_in_seconds'] = self.interval.in_seconds
for name in 'id', 'name', 'current_run', 'max_repeats_reached', 'max_repeats', 'type':
ctx[name] = getattr(self, name)
return ctx
# ################################################################################################################################
def get_sleep_time(self, now):
""" Returns a number of seconds the job should sleep for before the next run.
For interval-based jobs this is a constant value pre-computed well ahead by self.interval
but for cron-style jobs the value is obtained each time it's needed.
"""
if self.type == SCHEDULER.JOB_TYPE.INTERVAL_BASED:
return self.interval.in_seconds
elif self.type == SCHEDULER.JOB_TYPE.CRON_STYLE:
return self.interval.next(now)
else:
raise ValueError('Unsupported job type `{}` ({})'.format(self.type, self.name))
# ################################################################################################################################
def _spawn(self, *args, **kwargs):
""" A thin wrapper so that it is easier to mock this method out in unit-tests.
"""
return spawn_greenlet(*args, **kwargs)
def main_loop(self):
logger.info('Job entering main loop `%s`', self)
_sleep = gevent.sleep
try:
while self.keep_running:
try:
self.current_run += 1
# Perhaps we've already been executed enough times
if self.max_repeats and self.current_run == self.max_repeats:
self.keep_running = False
self.max_repeats_reached = True
self.max_repeats_reached_at = datetime.datetime.utcnow()
if self.on_max_repeats_reached_cb:
self.on_max_repeats_reached_cb(self)
# Invoke callback in a new greenlet so it doesn't block the current one.
self._spawn(self.callback, **{'ctx':self.get_context()})
except Exception:
logger.warning(format_exc())
finally:
# pylint: disable=lost-exception
# Pause the greenlet for however long is needed if it is not a one-off job
if self.type == SCHEDULER.JOB_TYPE.ONE_TIME:
return True
else:
_sleep(self.get_sleep_time(datetime.datetime.utcnow()))
logger.info('Job leaving main loop `%s` after %d iterations', self, self.current_run)
except Exception:
logger.warning(format_exc())
return True
# ################################################################################################################################
def run(self):
# OK, we're ready
try:
# If we are a job that triggers file transfer channels we do not start
# unless our extra data is filled in. Otherwise, we would not trigger any transfer anyway.
if self.service == FILE_TRANSFER.SCHEDULER_SERVICE and (not self.extra):
logger.warning('Skipped file transfer job `%s` without extra set `%s` (%s)', self.name, self.extra, self.service)
return
if not self.start_time:
logger.warning('Job `%s` cannot start without start_time set', self.name)
return
logger.info('Job starting `%s`', self)
_utcnow = datetime.datetime.utcnow
_sleep = gevent.sleep
# If the job has a start time in the future, sleep until it's ready to go.
now = _utcnow()
while self.start_time > now:
_sleep(self.wait_sleep_time)
if self.wait_iter_cb:
self.wait_iter_cb(self.start_time, now, *self.wait_iter_cb_args)
now = _utcnow()
self.main_loop()
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
# ################################################################################################################################
class Scheduler:
def __init__(self, config:'SchedulerServerConfig', api:'SchedulerAPI') -> 'None':
self.config = config
self.api = api
self.on_job_executed_cb = config.on_job_executed_cb
self.current_status = config.current_status
self.startup_jobs = config.startup_jobs
self.odb = config.odb
self.jobs = {}
self.job_greenlets = {}
self.keep_running = True
self.lock = lock.RLock()
self.sleep_time = 0.1
self.iter_cb = None
self.iter_cb_args = ()
self.ready = False
self._add_startup_jobs = config._add_startup_jobs
self._add_scheduler_jobs = config._add_scheduler_jobs
self.job_log = getattr(logger, config.job_log_level)
self.initial_sleep_time = self.config.main.get('misc', {}).get('initial_sleep_time') or SCHEDULER.InitialSleepTime
# We set it to True for backward compatibility with pre-3.2
self.prefer_odb_config = self.config.raw_config.server.get('server_prefer_odb_config', True)
# ################################################################################################################################
def on_max_repeats_reached(self, job):
with self.lock:
job.is_active = False
# ################################################################################################################################
def _create(self, job, spawn=True):
""" Actually creates a job. Must be called with self.lock held.
"""
try:
self.jobs[job.name] = job
if job.is_active:
if spawn:
self.spawn_job(job)
self.job_log('Job scheduled `%s` (%s, start: %s UTC)', job.name, job.type, job.start_time)
else:
logger.info('Skipping inactive job `%s`', job)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
def create(self, *args, **kwargs):
with self.lock:
self._create(*args, **kwargs)
# ################################################################################################################################
def edit(self, job):
""" Edits a job - this means an already existing job is unscheduled and created again,
i.e. it's not an in-place update.
"""
with self.lock:
self._unschedule_stop(job, '(src:edit)')
self._create(job.clone(job.is_active), True)
# ################################################################################################################################
def _unschedule(self, job):
""" Actually unschedules a job. Must be called with self.lock held.
"""
# The job could have been renamed so we need to unschedule it by the previous name, if there is one
name = job.old_name if job.old_name else job.name
found = False
job.keep_running = False
if name in iterkeys(self.jobs):
del self.jobs[name]
found = True
if name in iterkeys(self.job_greenlets):
self.job_greenlets[name].kill(block=False, timeout=2.0)
del self.job_greenlets[name]
found = True
return found
# ################################################################################################################################
def _unschedule_stop(self, job, message):
""" API for job deletion and stopping. Must be called with a self.lock held.
"""
if self._unschedule(job):
name = job.old_name if job.old_name else job.name
logger.info('Unscheduled %s job %s `%s`', job.type, name, message)
else:
logger.info('Job not found `%s`', job)
# ################################################################################################################################
def unschedule(self, job):
""" Deletes a job.
"""
with self.lock:
self._unschedule_stop(job, '(src:unschedule)')
# ################################################################################################################################
def unschedule_by_name(self, name):
""" Deletes a job by its name.
"""
_job = None
with self.lock:
for job in itervalues(self.jobs):
if job.name == name:
_job = job
break
# We can't do it with self.lock because deleting changes the set = RuntimeError
if _job:
self.unschedule(job)
# ################################################################################################################################
def stop_job(self, job):
""" Stops a job by deleting it.
"""
with self.lock:
self._unschedule_stop(job, 'stopped')
# ################################################################################################################################
def stop(self):
""" Stops all jobs and the scheduler itself.
"""
with self.lock:
jobs = sorted(self.jobs)
for job in jobs:
self._unschedule_stop(job.clone(), 'stopped')
# ################################################################################################################################
def sleep(self, value):
""" A method introduced so the class is easier to mock out in tests.
"""
gevent.sleep(value)
# ################################################################################################################################
def is_scheduler_active(self) -> 'bool':
out = self.current_status == SCHEDULER.Status.Active
return out
# ################################################################################################################################
def execute(self, name):
""" If the scheduler is active, executes a job no matter if it's active or not. One-time job are not unscheduled afterwards.
"""
# Do not execute any jobs if we are not active
if not self.is_scheduler_active():
return
with self.lock:
for job in itervalues(self.jobs):
if job.name == name:
self.on_job_executed(job.get_context(), False)
break
else:
logger.warning('No such job `%s` in `%s`', name, [elem.get_context() for elem in itervalues(self.jobs)])
# ################################################################################################################################
def on_job_executed(self, ctx:'stranydict', unschedule_one_time:'bool'=True) -> 'None':
# Do not execute any jobs if we are not active
if not self.is_scheduler_active():
return
# If this is a specal, pub/sub cleanup job, run its underlying command in background ..
if ctx['name'] == SCHEDULER.PubSubCleanupJob:
start_cleanup(self.config.component_dir)
# .. otherwise, this is a job that runs in a server.
else:
logger.debug('Executing `%s`, `%s`', ctx['name'], ctx)
self.on_job_executed_cb(ctx)
self.job_log('Job executed `%s`, `%s`', ctx['name'], ctx)
if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME and unschedule_one_time:
self.unschedule_by_name(ctx['name'])
# ################################################################################################################################
def _spawn(self, *args, **kwargs):
""" As in the Job class, this is a thin wrapper so that it is easier to mock this method out in unit-tests.
"""
return spawn_greenlet(*args, **kwargs)
# ################################################################################################################################
def spawn_job(self, job):
""" Spawns a job's greenlet. Must be called with self.lock held.
"""
job.callback = self.on_job_executed
job.on_max_repeats_reached_cb = self.on_max_repeats_reached
self.job_greenlets[job.name] = self._spawn(job.run)
# ################################################################################################################################
def _init_jobs_by_odb(self):
cluster_conf = self.config.main.cluster
add_startup_jobs_to_odb_by_odb(cluster_conf.id, self.odb, self.startup_jobs, asbool(cluster_conf.stats_enabled))
# Actually start jobs now, including any added above
if self._add_scheduler_jobs:
load_scheduler_jobs_by_odb(self.api, self.odb, self.config.main.cluster.id, spawn=False)
# ################################################################################################################################
def _init_jobs_by_api(self):
cluster_conf = self.config.main.cluster
add_startup_jobs_to_odb_by_api(self.api, self.startup_jobs, asbool(cluster_conf.stats_enabled))
# Actually start jobs now, including any added above
if self._add_scheduler_jobs:
load_scheduler_jobs_by_api(self.api, spawn=False)
# ################################################################################################################################
def init_jobs(self):
# Sleep to make sure that at least one server is running if the environment was started from quickstart scripts
sleep(self.initial_sleep_time)
# If we have ODB configuration, we will be initializing jobs in the ODB ..
if self.prefer_odb_config:
self._init_jobs_by_odb()
# .. otherwise, we are initializing jobs via API calls to a remote server.
else:
spawn_greenlet(self._init_jobs_by_api)
# ################################################################################################################################
def run(self):
try:
logger.info('Scheduler will start to execute jobs in %s seconds', self.initial_sleep_time)
# Add default jobs to the ODB and start all of them, the default and user-defined ones
self.init_jobs()
_sleep = self.sleep
_sleep_time = self.sleep_time
with self.lock:
for job in sorted(itervalues(self.jobs)):
# Ignore pre-3.2 Redis-based jobs
if job.name.startswith('zato.stats'):
continue
if job.max_repeats_reached:
logger.info('Job `%s` already reached max runs count (%s UTC)', job.name, job.max_repeats_reached_at)
else:
self.spawn_job(job)
# Ok, we're good now.
self.ready = True
logger.info('Scheduler started')
while self.keep_running:
_sleep(_sleep_time)
if self.iter_cb:
self.iter_cb(*self.iter_cb_args)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
# ################################################################################################################################
| 25,148
|
Python
|
.py
| 450
| 45.842222
| 132
| 0.473729
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,423
|
api.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/api.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import logging
from traceback import format_exc
# ciso8601
try:
from zato.common.util.api import parse_datetime
except ImportError:
from dateutil.parser import parse as parse_datetime
# crontab
from crontab import CronTab
# gevent
from gevent import sleep
# Zato
from zato.common.api import MISC, SCHEDULER, ZATO_NONE
from zato.common.broker_message import SCHEDULER as SCHEDULER_MSG
from zato.common.util.api import new_cid, spawn_greenlet
from zato.common.util.config import parse_url_address
from zato.scheduler.backend import Interval, Job, Scheduler as _Scheduler
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.broker.client import BrokerClient
from zato.common.typing_ import strdict, strdictnone
from zato.scheduler.server import Config
# ################################################################################################################################
# ################################################################################################################################
logger = logging.getLogger('zato_scheduler')
_has_debug = logger.isEnabledFor(logging.DEBUG)
# ################################################################################################################################
# ################################################################################################################################
def _start_date(job_data):
if isinstance(job_data.start_date, str):
# Remove timezone information as we assume that all times are in UTC
start_date = job_data.start_date.split('+')
start_date = start_date[0]
return parse_datetime(start_date)
return job_data.start_date
# ################################################################################################################################
# ################################################################################################################################
class SchedulerAPI:
""" The job scheduler server. All of the operations assume the data was already validated
by relevant Zato public API services.
"""
def __init__(self, config:'Config'=None, run:'bool'=False):
self.config = config
self.broker_client = None # type: BrokerClient
self.config.on_job_executed_cb = self.on_job_executed
self.scheduler = _Scheduler(self.config, self)
if run:
self.serve_forever()
# ################################################################################################################################
def serve_forever(self):
try:
try:
spawn_greenlet(self.scheduler.run)
except Exception:
logger.warning(format_exc())
while not self.scheduler.ready:
sleep(0.1)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
def invoke_service(self, name:'str', request:'strdictnone'=None) -> 'strdict':
# Make sure we have a request to send
request = request or {}
# Assume there is no response until we have one
response = None
# Enrich the business data ..
request['cluster_id'] = MISC.Default_Cluster_ID
# .. keep looping until we have a response ..
while not response:
try:
# .. log what we are about to do ..
logger.info(f'Invoking service `{name}` with `{request}`')
# .. invoke the server ..
response = self.broker_client.zato_client.invoke(name, request, timeout=0.5)
except Exception as e:
logger.info(f'Service invocation error -> `{name}` -> {e}')
finally:
# .. if there is still none, wait a bit longer ..
logger.info(f'Waiting for response from service `{name}`')
# .. do wait now ..
sleep(1)
# .. if we are here, we have a response to return.
logger.info(f'Returning response from service {name}')
return response.data
# ################################################################################################################################
def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
""" Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
so it can be picked up by one of the parallel server's broker clients.
"""
name = ctx['name']
payload = ctx['cb_kwargs']['extra']
if isinstance(payload, bytes):
payload = payload.decode('utf8')
msg = {
'action': SCHEDULER_MSG.JOB_EXECUTED.value,
'name':name,
'service': ctx['cb_kwargs']['service'],
'payload':payload,
'cid':ctx['cid'],
'job_type': ctx['type']
}
if extra_data_format != ZATO_NONE:
msg['data_format'] = extra_data_format
self.broker_client.invoke_async(msg, from_scheduler=True)
if _has_debug:
msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
logger.debug(msg)
# Now, if it was a one-time job, it needs to be deactivated.
if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
msg = {
'action': SCHEDULER_MSG.DELETE.value,
'service': 'zato.scheduler.job.delete',
'payload': {
'id':ctx['id'],
},
'cid': new_cid(),
}
self.broker_client.publish(msg, from_scheduler=True)
# ################################################################################################################################
def create_edit(self, action, job_data, **kwargs):
""" Invokes a handler appropriate for the given action and job_data.job_type.
"""
handler = '{0}_{1}'.format(action, job_data.job_type)
handler = getattr(self, handler)
try:
handler(job_data, **kwargs)
except Exception:
logger.error('Caught exception `%s`', format_exc())
# ################################################################################################################################
def create_edit_job(self, id, name, old_name, start_time, job_type, service, is_create=True, max_repeats=1, days=0, hours=0,
minutes=0, seconds=0, extra=None, cron_definition=None, is_active=None, **kwargs):
""" A base method for scheduling of jobs.
"""
cb_kwargs = {
'service': service,
'extra': extra,
}
if job_type == SCHEDULER.JOB_TYPE.CRON_STYLE:
interval = CronTab(cron_definition)
else:
interval = Interval(days=days, hours=hours, minutes=minutes, seconds=seconds)
job = Job(id, name, job_type, interval, start_time, cb_kwargs=cb_kwargs, max_repeats=max_repeats,
is_active=is_active, cron_definition=cron_definition, service=service, extra=extra, old_name=old_name)
func = self.scheduler.create if is_create else self.scheduler.edit
func(job, **kwargs)
# ################################################################################################################################
def create_edit_one_time(self, job_data, is_create=True, **kwargs):
""" Re-/schedules the execution of a one-time job.
"""
self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), _start_date(job_data),
SCHEDULER.JOB_TYPE.ONE_TIME, job_data.service, is_create, extra=job_data.extra,
is_active=job_data.is_active, **kwargs)
def create_one_time(self, job_data, **kwargs):
""" Schedules the execution of a one-time job.
"""
self.create_edit_one_time(job_data, **kwargs)
def edit_one_time(self, job_data, **kwargs):
""" First unschedules a one-time job and then schedules its execution.
The operations aren't parts of an atomic transaction.
"""
self.create_edit_one_time(job_data, False, **kwargs)
# ################################################################################################################################
def create_edit_interval_based(self, job_data, is_create=True, **kwargs):
""" Re-/schedules the execution of an interval-based job.
"""
start_date = _start_date(job_data)
weeks = job_data.weeks if job_data.get('weeks') else 0
days = job_data.days if job_data.get('days') else 0
hours = job_data.hours if job_data.get('hours') else 0
minutes = job_data.minutes if job_data.get('minutes') else 0
seconds = job_data.seconds if job_data.get('seconds') else 0
max_repeats = job_data.repeats if job_data.get('repeats') else None
weeks = int(weeks)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
max_repeats = int(max_repeats) if max_repeats is not None else max_repeats
self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), start_date, SCHEDULER.JOB_TYPE.INTERVAL_BASED,
job_data.service, is_create, max_repeats, days+weeks*7, hours, minutes, seconds, job_data.extra,
is_active=job_data.is_active, **kwargs)
def create_interval_based(self, job_data, **kwargs):
""" Schedules the execution of an interval-based job.
"""
self.create_edit_interval_based(job_data, **kwargs)
def edit_interval_based(self, job_data, **kwargs):
""" First unschedules an interval-based job and then schedules its execution.
The operations aren't parts of an atomic transaction.
"""
self.create_edit_interval_based(job_data, False, **kwargs)
# ################################################################################################################################
def create_edit_cron_style(self, job_data, is_create=True, **kwargs):
""" Re-/schedules the execution of a cron-style job.
"""
start_date = _start_date(job_data)
self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), start_date, SCHEDULER.JOB_TYPE.CRON_STYLE,
job_data.service, is_create, max_repeats=None, extra=job_data.extra, is_active=job_data.is_active,
cron_definition=job_data.cron_definition, **kwargs)
def create_cron_style(self, job_data, **kwargs):
""" Schedules the execution of a cron-style job.
"""
self.create_edit_cron_style(job_data, **kwargs)
def edit_cron_style(self, job_data, **kwargs):
""" First unschedules a cron-style job and then schedules its execution.
The operations aren't parts of an atomic transaction.
"""
self.create_edit_cron_style(job_data, False, **kwargs)
# ################################################################################################################################
def delete(self, job_data, **kwargs):
""" Deletes the job from the scheduler.
"""
old_name = job_data.get('old_name')
name = job_data.old_name if old_name else job_data.name
logger.info('Deleting job %s (old_name:%s)', name, old_name)
self.scheduler.unschedule_by_name(name, **kwargs)
# ################################################################################################################################
def execute(self, job_data):
self.scheduler.execute(job_data.name)
# ################################################################################################################################
def stop(self):
self.scheduler.stop()
# ################################################################################################################################
def filter(self, *ignored):
""" Accept broker messages destined to our client.
"""
return True
# ################################################################################################################################
def on_broker_msg_SCHEDULER_CREATE(self, msg, *ignored_args):
self.create_edit('create', msg)
# ################################################################################################################################
def on_broker_msg_SCHEDULER_EDIT(self, msg, *ignored_args):
self.create_edit('edit', msg)
# ################################################################################################################################
def on_broker_msg_SCHEDULER_DELETE(self, msg, *ignored_args):
self.delete(msg)
# ################################################################################################################################
def on_broker_msg_SCHEDULER_EXECUTE(self, msg, *ignored_args):
self.execute(msg)
# ################################################################################################################################
def on_broker_msg_SCHEDULER_SET_SERVER_ADDRESS(self, msg, *ignored_args):
url = parse_url_address(msg.address, SCHEDULER.Default_Server_Port)
self.broker_client.set_zato_client_address(url)
logger.info('Set server address to -> %s', url)
# ################################################################################################################################
# ################################################################################################################################
| 14,336
|
Python
|
.py
| 253
| 48.367589
| 130
| 0.470458
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,424
|
__init__.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
| 238
|
Python
|
.py
| 6
| 38.166667
| 82
| 0.729258
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,425
|
main.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/main.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# First thing in the process
from gevent import monkey
_ = monkey.patch_all()
# stdlib
# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
try:
import cloghandler # type: ignore
except ImportError:
pass
else:
cloghandler = cloghandler # For pyflakes
# Zato
from zato.scheduler.server import SchedulerServer
# ################################################################################################################################
# ################################################################################################################################
def main():
SchedulerServer.start()
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 1,387
|
Python
|
.py
| 28
| 47.428571
| 130
| 0.307122
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,426
|
server.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/server.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from logging import captureWarnings, getLogger
# Zato
from zato.broker.client import BrokerClient
from zato.common.api import SCHEDULER
from zato.common.aux_server.base import AuxServer, AuxServerConfig
from zato.common.crypto.api import SchedulerCryptoManager
from zato.common.typing_ import cast_
from zato.common.util.api import get_config, store_pidfile
from zato.scheduler.api import SchedulerAPI
from zato.scheduler.util import set_up_zato_client
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import callable_, type_
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class SchedulerServerConfig(AuxServerConfig):
# Our current status, e.g. active or paused
current_status:'str' = SCHEDULER.Status.Active
# Configuration via environment variables
env_key_status = SCHEDULER.Env.Status
env_key_username = SCHEDULER.Env.Server_Username
env_key_password = SCHEDULER.Env.Server_Password
env_key_auth_required = SCHEDULER.Env.Server_Auth_Required
def __init__(self) -> 'None':
super().__init__()
self.startup_jobs = []
self.on_job_executed_cb = None
self.job_log_level = 'debug'
self._add_startup_jobs = True
self._add_scheduler_jobs = True
# ################################################################################################################################
# ################################################################################################################################
class SchedulerServer(AuxServer):
""" Main class spawning scheduler-related tasks and listening for HTTP API requests.
"""
needs_logging_setup = True
cid_prefix = 'zsch'
server_type = 'Scheduler'
conf_file_name = 'scheduler.conf'
config_class = SchedulerServerConfig
crypto_manager_class = SchedulerCryptoManager
def __init__(self, config:'AuxServerConfig') -> 'None':
super().__init__(config)
# Configures a client to Zato servers
self.zato_client = set_up_zato_client(config.main)
# SchedulerAPI
self.scheduler_api = SchedulerAPI(self.config)
self.scheduler_api.broker_client = BrokerClient(zato_client=self.zato_client, server_rpc=None, scheduler_config=None)
# ################################################################################################################################
def should_check_credentials(self) -> 'bool':
api_clients = self.config.main.get('api_clients')
api_clients = api_clients or self.config.main.get('api_users')
api_clients = api_clients or {}
auth_required = api_clients.get('auth_required')
auth_required = auth_required or False
return auth_required
# ################################################################################################################################
@classmethod
def before_config_hook(class_:'type_[AuxServer]') -> 'None':
if 'ZATO_SCHEDULER_BASE_DIR' in os.environ:
os.chdir(os.environ['ZATO_SCHEDULER_BASE_DIR'])
# Always attempt to store the PID file first
store_pidfile(os.path.abspath('.'))
# Capture warnings to log files
captureWarnings(True)
# ################################################################################################################################
@classmethod
def after_config_hook(
class_, # type: type_[AuxServer]
config, # type: AuxServerConfig
repo_location, # type: str
) -> 'None':
super().after_config_hook(config, repo_location)
# Reusable
startup_jobs_config_file = 'startup_jobs.conf'
# Fix up configuration so it uses the format that internal utilities expect
startup_jobs_config = get_config(repo_location, startup_jobs_config_file, needs_user_config=False)
for name, job_config in startup_jobs_config.items(): # type: ignore
# Ignore jobs that have been removed
if name in SCHEDULER.JobsToIgnore:
logger.info('Ignoring job `%s (%s)`', name, startup_jobs_config_file)
continue
job_config['name'] = name # type: ignore
cast_('SchedulerServerConfig', config).startup_jobs.append(job_config)
# ################################################################################################################################
def get_action_func_impl(self, action_name:'str') -> 'callable_':
func_name = 'on_broker_msg_{}'.format(action_name)
func = getattr(self.scheduler_api, func_name)
return func
# ################################################################################################################################
def serve_forever(self) -> 'None':
self.scheduler_api.serve_forever()
super().serve_forever()
# ################################################################################################################################
# ################################################################################################################################
| 6,064
|
Python
|
.py
| 106
| 51.169811
| 130
| 0.458756
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,427
|
cli.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/cleanup/cli.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from logging import getLogger
# ################################################################################################################################
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
def start_cleanup(path:'str') -> 'None':
# Zato
from zato.common.util.platform_ import is_windows
if is_windows:
return
# sh
from sh import ErrorReturnCode # type: ignore
# Zato
from zato.common.typing_ import cast_
from zato.common.util.cli import CommandLineInvoker
# Build the base invoker object
invoker = CommandLineInvoker()
# Our cleanup command to execute
cli_params = [
'pubsub',
'cleanup',
'--path',
path,
'--verbose',
]
try:
# We are ready to invoke it now
out = invoker.invoke_cli(cli_params)
except ErrorReturnCode as e:
stdout = cast_('bytes', e.stdout)
stdout = stdout.decode('utf8', errors='replace')
stderr = cast_('bytes', e.stderr)
stderr = stderr.decode('utf8', errors='replace')
if stdout:
logger.info('Cleanup return stdout -> `\n%s`', stdout)
if stderr:
logger.warn('Cleanup return stderr -> `\n%s`', stderr)
else:
logger.info('Cleanup out.exit_code -> %s', out.exit_code)
logger.info('Cleanup out.stderr -> %s', out.stderr)
logger.info('Cleanup out.process.pid -> %s', out.process.pid if out.process else '(No PID)')
logger.info('Cleanup out.cmd -> %s', cast_('str', out.cmd))
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
import os
# Zato
from zato.common.util.platform_ import is_windows
# We do not run on Windows
if not is_windows:
# Look up the path through an environment variable ..
path = os.environ['ZATO_SCHEDULER_BASE_DIR']
# .. and run the cleanup job.
start_cleanup(path)
# ################################################################################################################################
# ################################################################################################################################
| 2,984
|
Python
|
.py
| 64
| 40.671875
| 130
| 0.396403
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,428
|
__init__.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/cleanup/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,429
|
core.py
|
zatosource_zato/code/zato-scheduler/src/zato/scheduler/cleanup/core.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
try:
import cloghandler # type: ignore
except ImportError:
pass
else:
cloghandler = cloghandler # For pyflakes
# stdlib
import os
from contextlib import closing
from dataclasses import dataclass
from datetime import datetime, timedelta
from json import loads
from logging import captureWarnings, getLogger
# gevent
from gevent import sleep
# Zato
from zato.broker.client import BrokerClient
from zato.common.api import PUBSUB
from zato.common.broker_message import SCHEDULER
from zato.common.marshal_.api import Model
from zato.common.odb.query.cleanup import delete_queue_messages, delete_topic_messages, \
get_topic_messages_already_expired, get_topic_messages_with_max_retention_reached, \
get_topic_messages_without_subscribers, get_subscriptions
from zato.common.odb.query.pubsub.delivery import get_sql_msg_ids_by_sub_key
from zato.common.odb.query.pubsub.topic import get_topics_basic_data
from zato.common.typing_ import cast_, list_
from zato.common.util.api import grouper, set_up_logging, tabulate_dictlist
from zato.common.util.time_ import datetime_from_ms, datetime_to_sec
from zato.scheduler.util import set_up_zato_client
# ################################################################################################################################
# ################################################################################################################################
if 0:
from logging import Logger
from sqlalchemy.orm.session import Session as SASession
from zato.common.typing_ import any_, anylist, callable_, dictlist, dtnone, floatnone, stranydict, strlist, \
strlistdict
from zato.scheduler.server import Config
SASession = SASession
# ################################################################################################################################
# ################################################################################################################################
_default_pubsub = PUBSUB.DEFAULT
topic_ctx_list = list_['TopicCtx']
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class GroupsCtx:
items: 'anylist'
len_items: 'int'
len_groups: 'int'
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class TopicCtx:
id: 'int'
name: 'str'
messages: 'dictlist'
len_messages: 'int'
limit_retention: 'int'
limit_retention_dt: 'datetime'
limit_retention_float: 'float'
limit_message_expiry: 'int'
limit_sub_inactivity: 'int'
groups_ctx: 'GroupsCtx'
# ################################################################################################################################
# ################################################################################################################################
class CleanupConfig:
# One day in seconds; 60 seconds * 60 minutes * 24 hours = 86_400 seconds
TopicRetentionTime = 86_400
# (As above)
DeltaNotInteracted = 86_400
# How many messages to delete from a queue or topic in one batch
MsgDeleteBatchSize = 5000
# How long to sleep after deleting messages from a single group (no matter if queue, topic or subscriber)
DeleteSleepTime = 0.02 # In seconds
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class CleanupCtx:
run_id: 'str'
found_all_topics: 'int'
found_sk_list: 'strlist'
found_total_queue_messages: 'int'
found_total_expired_messages: 'int'
now: 'float'
now_dt: 'datetime'
# This is a list of all topics in the system that will be processed. The list is built upfront, when the task starts.
all_topics: 'topic_ctx_list'
# All topics that will have been cleaned up, no matter why
topics_cleaned_up: 'topic_ctx_list'
# Topics cleaned up because they had no subscribers
topics_without_subscribers: 'topic_ctx_list'
# Topics cleaned up because their messages reached max. retention time allowed
topics_with_max_retention_reached: 'topic_ctx_list'
# Topics cleaned up because they contained messages that were expired
topics_with_expired_messages: 'topic_ctx_list'
# A lits of IDs of messages that were found to have expired
expired_messages: 'dictlist'
has_env_delta: 'bool'
max_limit_sub_inactivity: 'int'
max_limit_sub_inactivity_dt: 'datetime'
max_last_interaction_time: 'float'
max_last_interaction_time_dt: 'datetime'
clean_up_subscriptions: 'bool'
clean_up_topics_without_subscribers: 'bool'
clean_up_topics_with_max_retention_reached: 'bool'
clean_up_queues_with_expired_messages: 'bool'
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class CleanupPartsEnabled(Model):
subscriptions: 'bool'
topics_without_subscribers: 'bool'
topics_with_max_retention_reached: 'bool'
queues_with_expired_messages: 'bool'
# ################################################################################################################################
# ################################################################################################################################
class CleanupManager:
logger: 'Logger'
config: 'Config'
repo_location: 'str'
broker_client: 'BrokerClient'
parts_enabled: 'CleanupPartsEnabled'
def __init__(self, repo_location:'str', parts_enabled:'CleanupPartsEnabled') -> 'None':
self.repo_location = repo_location
self.parts_enabled = parts_enabled
# ################################################################################################################################
def init(self):
# Zato
from zato.common.crypto.api import SchedulerCryptoManager
from zato.scheduler.server import SchedulerServer, SchedulerServerConfig
# Make sure we are in the same directory that the scheduler is in
base_dir = os.path.join(self.repo_location, '..', '..')
base_dir = os.path.abspath(base_dir)
os.chdir(base_dir)
# Build our main configuration object
self.config = SchedulerServerConfig.from_repo_location(
SchedulerServer.server_type,
self.repo_location,
SchedulerServer.conf_file_name,
SchedulerCryptoManager
)
# Configures a client to Zato servers
self.zato_client = set_up_zato_client(self.config.main)
self.broker_client = BrokerClient(zato_client=self.zato_client, server_rpc=None, scheduler_config=None)
# Capture warnings to log files
captureWarnings(True)
# Logging configuration
set_up_logging(self.repo_location)
# Build our logger
self.logger = getLogger(__name__)
# ################################################################################################################################
def _get_suffix(self, items:'any_', needs_space:'bool'=True) -> 'str':
len_out = len(items)
suffix = '' if len_out == 1 else 's'
if needs_space:
suffix += ' '
return suffix
# ################################################################################################################################
def _build_groups(self, task_id:'str', label:'str', msg_list:'dictlist', group_size:'int', ctx_id:'str') -> 'GroupsCtx':
out = GroupsCtx()
groups = grouper(group_size, msg_list)
groups = list(groups)
len_groups = len(groups)
out.items = groups
out.len_items = len_groups
out.len_groups = len_groups
suffix = self._get_suffix(groups, needs_space=False)
self.logger.info('%s: %s %s turned into %s group%s, group_size:%s (%s)',
task_id, len(msg_list), label, len_groups, suffix, group_size, ctx_id)
return out
# ################################################################################################################################
def _get_subscriptions(self, task_id:'str', topic_ctx:'TopicCtx', cleanup_ctx:'CleanupCtx') -> 'anylist':
# Each topic has its own limit_sub_inactivity delta which means that we compute
# the max_last_interaction time for each of them separately.
# However, it is always possible to override the per-topic configuration
# with an environment variable which is why we need to check that too.
if cleanup_ctx.has_env_delta:
topic_max_last_interaction_time_source = 'env'
limit_sub_inactivity = cleanup_ctx.max_limit_sub_inactivity
topic_max_last_interaction_time_dt = cleanup_ctx.max_last_interaction_time_dt
topic_max_last_interaction_time = cleanup_ctx.max_last_interaction_time
else:
topic_max_last_interaction_time_source = 'topic'
limit_sub_inactivity = topic_ctx.limit_sub_inactivity
topic_max_last_interaction_time_dt = cleanup_ctx.now_dt - timedelta(seconds=limit_sub_inactivity)
topic_max_last_interaction_time = datetime_to_sec(topic_max_last_interaction_time_dt)
# Always create a new session so as not to block the database
with closing(self.config.odb.session()) as session: # type: ignore
result = get_subscriptions(task_id, session, topic_ctx.id, topic_ctx.name,
topic_max_last_interaction_time, topic_max_last_interaction_time_dt, topic_max_last_interaction_time_source)
# Convert SQL results to a dict that we can easily work with
result = [elem._asdict() for elem in result]
# This is what we will return. We need a new dict because we are adding
# a new columns and we what to preserve the insertion order.
out = []
for idx, elem in enumerate(result, 1):
out_elem = {
'idx': idx,
}
for key, value in elem.items():
if key in ('last_interaction_time', 'ext_client_id'):
if not value:
value = '---'
else:
if key == 'last_interaction_time':
value = datetime_from_ms(value * 1000) # type: ignore
elem[key] = value # type: ignore
out_elem.update(elem)
out.append(out_elem)
suffix = self._get_suffix(out)
msg = '%s: Returning %s subscription%sfor topic %s '
msg += 'with no interaction or last interaction time older than `%s` (s:%s; delta: %s; topic-delta:%s)'
self.logger.info(msg, task_id, len(out), suffix, topic_ctx.name, topic_max_last_interaction_time_dt,
topic_max_last_interaction_time_source,
limit_sub_inactivity,
topic_ctx.limit_sub_inactivity,
)
if out:
table = tabulate_dictlist(out, skip_keys='topic_opaque')
self.logger.info('%s: ** Subscriptions to clean up **\n%s', task_id, table)
return out
# ################################################################################################################################
def _cleanup_queue_msg_list(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx',
ctx_label:'str',
ctx_id:'str',
msg_list:'dictlist'
) -> 'None':
self.logger.info('%s: Building queue message groups for %s -> %s', task_id, ctx_label, ctx_id)
groups_ctx = self._build_groups(task_id, 'queue message(s)', msg_list, CleanupConfig.MsgDeleteBatchSize, ctx_id)
# Note that messages for each subscriber are deleted under a new session
with closing(self.config.odb.session()) as session: # type: ignore
# Iterate over groups to delete each of them ..
for idx, group in enumerate(groups_ctx.items, 1):
# .. extract message IDs from each group ..
msg_id_list = [elem['pub_msg_id'] for elem in group if elem]
# .. log what we are about to do ..
self.logger.info('%s: Deleting queue message group %s/%s (%s -> %s)',
task_id, idx, groups_ctx.len_items, ctx_label, ctx_id)
# .. delete the group ..
delete_queue_messages(session, msg_id_list)
# .. make sure to commit the progress of the transaction ..
session.commit()
# .. store for later use ..
cleanup_ctx.found_total_queue_messages += len(msg_id_list)
# .. confirm that we did it ..
self.logger.info('%s: Deleted group %s/%s (%s -> %s)',
task_id, idx, groups_ctx.len_items, ctx_label, ctx_id)
# .. and sleep for a moment so as not to overwhelm the database.
sleep(CleanupConfig.DeleteSleepTime)
# ################################################################################################################################
def _cleanup_sub(self, task_id:'str', cleanup_ctx:'CleanupCtx', sub:'stranydict') -> 'strlist':
""" Cleans up an individual subscription. First it deletes old queue messages, then it notifies servers
that a subscription object should be deleted as well.
"""
# Local aliases
sub_key = sub['sub_key']
self.logger.info('%s: ---------------', task_id)
self.logger.info('%s: Looking up queue messages for %s', task_id, sub_key)
# We look up all the messages in the database which is why the last_sql_run
# needs to be set to a value that will be always matched
# and the start of UNIX time, as expressed as a float, will always do.
last_sql_run = 0.0
# We look up messages up to this point in time, which is equal to the start of our job
pub_time_max = cleanup_ctx.now
# We assume there is always one cluster in the database and we can skip its ID
cluster_id = None
# Always create a new session so as not to block the database
with closing(self.config.odb.session()) as session: # type: ignore
sk_queue_msg_list = get_sql_msg_ids_by_sub_key(
session, cluster_id, sub_key, last_sql_run, pub_time_max, include_unexpired_only=False, needs_result=True)
# Convert SQL results to a dict that we can easily work with
sk_queue_msg_list = [elem._asdict() for elem in sk_queue_msg_list]
suffix = self._get_suffix(sk_queue_msg_list)
self.logger.info('%s: Found %s message%sfor sub_key `%s` (ext: %s)',
task_id, len(sk_queue_msg_list), suffix, sub_key, sub['ext_client_id'])
if sk_queue_msg_list:
table = tabulate_dictlist(sk_queue_msg_list)
self.logger.debug('%s: ** Messages to clean up for sub_key `%s` **\n%s', task_id, sub_key, table)
self._cleanup_queue_msg_list(task_id, cleanup_ctx, 'sub-cleanup', sub_key, sk_queue_msg_list)
# At this point, we have already deleted all the enqueued messages for all the subscribers
# that we have not seen in DeltaNotInteracted hours. It means that we can proceed now
# to delete each subscriber too because we know that it will not cascade to any of its
# now-already-deleted messages. However, we do not do it using SQL queries
# because there may still exist references to such subscribers among self.pubsub and tasks in servers.
# Thus, we invoke our server, telling it that a given subscriber can be deleted, and the server
# will know what to delete and clean up next. Again, note that at this point there are no outstanding
# messages for that subscribers because we have just deleted them (or, possibly, a few more will have been enqueued
# by the time the server receives our request) which means that the server's action will not cascade
# to many rows in the queue table which in turn means that the database will not block for a long time.
self.logger.info('%s: Notifying server to delete sub_key `%s`', task_id, sub_key)
self.broker_client.invoke_async({
'service': 'zato.pubsub.endpoint.delete-endpoint-queue',
'action': SCHEDULER.DELETE_PUBSUB_SUBSCRIBER.value,
'payload': {
'sub_key': sub_key,
}
}, from_scheduler=True)
# Now, we can append the sub_key to the list of what has been processed
cleanup_ctx.found_sk_list.append(sub_key)
# Finally, we can return all the IDs of messages enqueued for that sub_key
return sk_queue_msg_list
# ################################################################################################################################
def _cleanup_subscriptions(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx'
) -> 'CleanupCtx':
""" Cleans up all subscriptions - all the old queue messages as well as their subscribers.
"""
# For each topic we already know it exists ..
for topic_ctx in cleanup_ctx.all_topics:
# This is a list of all the pub_msg_id objects that we are going to remove from subsciption queues.
# It does not contain messages residing in topics that do not have any subscribers.
# Currently, we populate this list but we do not use it for anything.
queue_msg_list = []
# Find all subscribers in the database ..
subs = self._get_subscriptions(task_id, topic_ctx, cleanup_ctx)
len_subs = len(subs)
# .. and clean up their queues, if any were found ..
for sub in subs:
sub_key = sub['sub_key']
endpoint_name = sub['endpoint_name']
self.logger.info('%s: Cleaning up subscription %s/%s; %s -> %s (%s)',
task_id, sub['idx'], len_subs, sub_key, endpoint_name, topic_ctx.name)
# Clean up this sub_key and get the list of message IDs found for it ..
sk_queue_msg_list = self._cleanup_sub(task_id, cleanup_ctx, sub)
# .. append the per-sub_key message to the overall list of messages found for subscribers.
queue_msg_list.extend(sk_queue_msg_list)
self.logger.info(f'{task_id}: Cleaned up %d pub/sub queue message(s) from sk_list: %s (%s)',
cleanup_ctx.found_total_queue_messages, cleanup_ctx.found_sk_list, topic_ctx.name)
# .. and sleep for a moment so as not to overwhelm the database.
sleep(CleanupConfig.DeleteSleepTime)
return cleanup_ctx
# ################################################################################################################################
def _get_topics(self, task_id:'str', cleanup_ctx:'CleanupCtx') -> topic_ctx_list:
# Our response to produce
out = []
# Topics found represented as dicts, not TopicCtx objects.
# We use it for logging purposes because otherwise TopicCtx objects
# would log their still empty information about messages, which could be misleading,
# as though there were no messages for topics.
topics_found = []
# Always create a new session so as not to block the database
with closing(self.config.odb.session()) as session: # type: ignore
result = get_topics_basic_data(session)
# Convert SQL results to a dict that we can easily work with
result = [elem._asdict() for elem in result]
# Extrat minimum retention time from each topic. If it is not found, use the default one.
for topic_dict in result:
# This is always a dict
topic_dict = cast_('dict', topic_dict)
# Remove all the opaque attributes because we need only the minimum retention time
opaque = topic_dict.pop('opaque1')
if opaque:
opaque = loads(opaque)
else:
opaque = opaque or {}
# Not all topics will have the minimum retention time and related data configured,
# in which case we use the relevant value from our configuration object.
# Observe that these are limits, expressed as integers, not timestamps based on these limits.
limit_retention = opaque.get('limit_retention') or _default_pubsub.LimitTopicRetention
limit_message_expiry = opaque.get('limit_message_expiry') or _default_pubsub.LimitMessageExpiry
limit_sub_inactivity = opaque.get('limit_sub_inactivity') or _default_pubsub.LimitSubInactivity
# Timestamps are computed here
limit_retention_dt = cleanup_ctx.now_dt - timedelta(seconds=limit_retention)
limit_retention_float = datetime_to_sec(limit_retention_dt)
# Add the now ready topic dict to a list that the logger will use later
topics_found.append(topic_dict)
# Populate the result for our caller's benefit
topic_ctx = TopicCtx()
topic_ctx.id = topic_dict['id']
topic_ctx.name = topic_dict['name']
topic_ctx.limit_retention = limit_retention
topic_ctx.limit_retention_dt = limit_retention_dt
topic_ctx.limit_retention_float = limit_retention_float
topic_ctx.limit_message_expiry = limit_message_expiry
topic_ctx.limit_sub_inactivity = limit_sub_inactivity
topic_ctx.messages = []
topic_ctx.len_messages = 0
out.append(topic_ctx)
self.logger.info('%s: Topics found -> %s', task_id, topics_found)
return out
# ################################################################################################################################
def _delete_topic_messages_from_group(self, task_id:'str', groups_ctx:'GroupsCtx', topic_name:'str') -> 'None':
# Always create a new session so as not to block the database
with closing(self.config.odb.session()) as session: # type: ignore
for idx, group in enumerate(groups_ctx.items, 1):
# .. log what we are about to do ..
self.logger.info('%s: Deleting topic message group %s/%s (%s)',
task_id, idx, groups_ctx.len_groups, topic_name)
# .. extract msg IDs from each dictionary in the group ..
msg_id_list = [elem['pub_msg_id'] for elem in group if elem]
# .. delete the group ..
delete_topic_messages(session, msg_id_list)
# .. make sure to commit the progress of the transaction ..
session.commit()
# .. sleep for a moment so as not to overwhelm the database ..
sleep(CleanupConfig.DeleteSleepTime)
# ################################################################################################################################
def _delete_topic_messages(
self,
task_id:'str',
topics_to_clean_up: 'topic_ctx_list',
messages_to_delete: 'strlistdict',
message_type_label: 'str',
) -> 'None':
# Iterate through each topic to clean it up
for topic_ctx in topics_to_clean_up:
per_topic_messages_to_delete = messages_to_delete[topic_ctx.name]
# .. log what we are about to do ..
self.logger.info('%s: Cleaning up %s message(s) %s -> %s',
task_id, len(per_topic_messages_to_delete), message_type_label, topic_ctx.name)
# .. assign to each topics individual groups of messages to be deleted ..
groups_ctx = self._build_groups(
task_id, 'message(s)', per_topic_messages_to_delete, CleanupConfig.MsgDeleteBatchSize, topic_ctx.name)
# .. and clean it up now.
self._delete_topic_messages_from_group(task_id, groups_ctx, topic_ctx.name)
# ################################################################################################################################
def _cleanup_topic_messages(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx',
query:'callable_',
message_type_label:'str',
*,
use_as_dict:'bool',
use_topic_retention_time:'bool',
max_time_dt: 'dtnone' = None,
max_time_float: 'floatnone' = None,
) -> 'topic_ctx_list':
# A dictionary mapping all the topics that have any messages to be deleted
topics_to_clean_up = [] # type: topic_ctx_list
# A dictionary mapping topic names to messages that need to be deleted from them
messages_to_delete = {} # type: strlistdict
# Always create a new session so as not to block the database
with closing(self.config.odb.session()) as session: # type: ignore
for topic_ctx in cleanup_ctx.all_topics:
# We enter here if we check the max. allowed publication time
# for each topic separately, based on its max. allowed retention time.
# In other words, we are interested in topics that contain messages
# whose retention time has been reached ..
if use_topic_retention_time:
per_topic_max_time_dt = topic_ctx.limit_retention_dt
per_topic_max_time_float = topic_ctx.limit_retention_float
# .. we enter here if simply want to find messages published
# at any point in the past as long as it was before our task started.
else:
per_topic_max_time_dt = max_time_dt
per_topic_max_time_float = max_time_float
# Run our input query to look up messages to delete
messages_for_topic = query(
task_id,
session,
topic_ctx.id,
topic_ctx.name,
per_topic_max_time_dt,
per_topic_max_time_float,
)
# .. convert the messages to dicts so as not to keep references to database objects ..
# .. What method to choose depends on whether the underlying query uses SQLAlchemy ORM (with _asdict)
# .. or whether it is a regular select statement, in which case dict() is used over a series of tuples.
if use_as_dict:
messages_for_topic = [elem._asdict() for elem in messages_for_topic]
else:
messages_for_topic = [dict(elem) for elem in messages_for_topic]
# .. populate the context object with the newest information ..
topic_ctx.len_messages = len(messages_for_topic)
# .. populate the map of messages that need to be deleted
per_topic_messages_to_delete_list = messages_to_delete.setdefault(topic_ctx.name, [])
per_topic_messages_to_delete_list.extend(messages_for_topic)
self.logger.info('%s: Found %d message(s) %s for topic %s',
task_id, topic_ctx.len_messages, message_type_label, topic_ctx.name)
# Save for later use if there are any messages that can be deleted for that topic
if topic_ctx.len_messages:
topic_ctx.messages.extend(per_topic_messages_to_delete_list)
topics_to_clean_up.append(topic_ctx)
# .. sleep for a moment so as not to overwhelm the database ..
sleep(CleanupConfig.DeleteSleepTime)
# Remove messages from all the topics found ..
self._delete_topic_messages(task_id, topics_to_clean_up, messages_to_delete, message_type_label)
# .. and return all the processed topics to our caller.
return topics_to_clean_up
# ################################################################################################################################
def _cleanup_topic_messages_without_subscribers(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx'
) -> 'topic_ctx_list':
#
# This query will look up all the messages that can be deleted from a topic in the database based on two conditions.
#
# 1) A message must not have any subscribers
# 2) A message must have been published before our task started
#
# Thanks to the second condition we do not delete messages that have been
# published after our task started. Such messages may be included in a future run
# in case they never see any subscribers, or perhaps their max. retention time will be reached,
# but we are not concerned with them in the current run and the current query here.
#
# Note that this is akin to self._clean_up_queues_with_expired_messages but that other method's query
# explicitly checks messages that do have subscribers with messages that expired
# whereas here we do not check if the messages expired, we only check if there are no subscribers.
#
query = get_topic_messages_without_subscribers
message_type_label = 'without subscribers'
max_time_dt = cleanup_ctx.now_dt
max_time_float = cleanup_ctx.now
return self._cleanup_topic_messages(task_id, cleanup_ctx, query, message_type_label,
use_as_dict=True,
use_topic_retention_time=False, max_time_dt=max_time_dt, max_time_float=max_time_float)
# ################################################################################################################################
def _cleanup_topic_messages_with_max_retention_reached(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx'
) -> 'topic_ctx_list':
#
# This query will look up all the messages that are still in topics
# but their max. retention time has been reached.
#
query = get_topic_messages_with_max_retention_reached
message_type_label = 'with max. retention reached'
# These are explictly set to None because the max. publication time will be computed
# for each topic separately in the cleanup function that we are calling below,
# based on each of the topic's max retention time allowed.
max_time_dt = None
max_time_float = None
return self._cleanup_topic_messages(task_id, cleanup_ctx, query, message_type_label,
use_as_dict=True,
use_topic_retention_time=True, max_time_dt=max_time_dt, max_time_float=max_time_float)
# ################################################################################################################################
def _clean_up_queues_with_expired_messages(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx'
) -> 'topic_ctx_list':
#
# This query will look up all the messages that can be deleted from a topic in the database based on two conditions.
#
# 1) A message must not have any subscribers
# 2) A message must have been published before our task started
#
# Thanks to the second condition we do not delete messages that have been
# published after our task started. Such messages may be included in a future run
# in case they never see any subscribers, or perhaps their max. retention time will be reached,
# but we are not concerned with them in the current run and the current query here.
#
# Note that this is akin to self._cleanup_topic_messages_without_subscribers but that other method's query
# explicitly looks up messages that have no subscribers - no matter if they are expired or not.
#
query = get_topic_messages_already_expired
message_type_label = 'already expired'
max_time_dt = cleanup_ctx.now_dt
max_time_float = cleanup_ctx.now
return self._cleanup_topic_messages(task_id, cleanup_ctx, query, message_type_label,
use_as_dict=False,
use_topic_retention_time=False, max_time_dt=max_time_dt, max_time_float=max_time_float)
# ################################################################################################################################
def cleanup_pub_sub(
self,
task_id:'str',
cleanup_ctx:'CleanupCtx'
) -> 'CleanupCtx':
# First, clean up all the old messages from subscription queues
# as well as subscribers that have not used the system in the last delta seconds.
if cleanup_ctx.clean_up_subscriptions:
self.logger.info('Entering _cleanup_subscriptions')
self._cleanup_subscriptions(task_id, cleanup_ctx)
# Clean up topics that contain messages without subscribers
if cleanup_ctx.clean_up_topics_without_subscribers:
self.logger.info('Entering _cleanup_topic_messages_without_subscribers')
topics_cleaned_up = self._cleanup_topic_messages_without_subscribers(task_id, cleanup_ctx)
cleanup_ctx.topics_cleaned_up.extend(topics_cleaned_up)
cleanup_ctx.topics_without_subscribers.extend(topics_cleaned_up)
# Clean up topics that contain messages whose max. retention time has been reached
if cleanup_ctx.clean_up_topics_with_max_retention_reached:
self.logger.info('Entering _cleanup_topic_messages_with_max_retention_reached')
topics_cleaned_up = self._cleanup_topic_messages_with_max_retention_reached(task_id, cleanup_ctx)
cleanup_ctx.topics_cleaned_up.extend(topics_cleaned_up)
cleanup_ctx.topics_with_max_retention_reached.extend(topics_cleaned_up)
# Clean up queues that contain messages which were already expired
if cleanup_ctx.clean_up_queues_with_expired_messages:
self.logger.info('Entering _clean_up_queues_with_expired_messages')
topics_cleaned_up = self._clean_up_queues_with_expired_messages(task_id, cleanup_ctx)
cleanup_ctx.topics_cleaned_up.extend(topics_cleaned_up)
cleanup_ctx.topics_with_expired_messages.extend(topics_cleaned_up)
for topic_ctx in topics_cleaned_up:
cleanup_ctx.expired_messages.extend(topic_ctx.messages)
cleanup_ctx.found_total_expired_messages += len(topic_ctx.messages)
return cleanup_ctx
# ################################################################################################################################
def run(self) -> 'CleanupCtx':
# Local aliases
now_dt = datetime.utcnow()
# This uniquely identifies our run
run_id = f'{now_dt.year}{now_dt.month}{now_dt.day}-{now_dt.hour:02}{now_dt.minute:02}{now_dt.second:02}'
# IDs for our task
task_id = f'CleanUp-{run_id}'
# Log what parts of the cleanup procedure are enabled
parts_enabled_log_msg = tabulate_dictlist([self.parts_enabled.to_dict()])
self.logger.info('%s: Parts enabled: \n%s', task_id, parts_enabled_log_msg)
# Overall context of the procedure, including a response to produce
cleanup_ctx = CleanupCtx()
cleanup_ctx.run_id = run_id
cleanup_ctx.found_all_topics = 0
cleanup_ctx.found_sk_list = []
cleanup_ctx.found_total_queue_messages = 0
cleanup_ctx.found_total_expired_messages = 0
cleanup_ctx.topics_cleaned_up = []
cleanup_ctx.topics_without_subscribers = []
cleanup_ctx.topics_with_max_retention_reached = []
cleanup_ctx.topics_with_expired_messages = []
cleanup_ctx.expired_messages = []
# What cleanup parts to run
cleanup_ctx.clean_up_subscriptions = self.parts_enabled.subscriptions
cleanup_ctx.clean_up_topics_without_subscribers = self.parts_enabled.topics_without_subscribers
cleanup_ctx.clean_up_topics_with_max_retention_reached = self.parts_enabled.topics_with_max_retention_reached
cleanup_ctx.clean_up_queues_with_expired_messages = self.parts_enabled.queues_with_expired_messages
cleanup_ctx.now_dt = now_dt
cleanup_ctx.now = datetime_to_sec(cleanup_ctx.now_dt)
# Assign a list of topics found in the database
cleanup_ctx.all_topics = self._get_topics(task_id, cleanup_ctx)
# Note that this limit is in seconds ..
cleanup_ctx.max_limit_sub_inactivity = max(item.limit_sub_inactivity for item in cleanup_ctx.all_topics)
# Max. inactivity allowed can be always overridden through this environment variable
env_max_limit_sub_inactivity = int(os.environ.get('ZATO_SCHED_DELTA') or 0)
if env_max_limit_sub_inactivity:
cleanup_ctx.has_env_delta = True
cleanup_ctx.max_limit_sub_inactivity = env_max_limit_sub_inactivity
max_last_interaction_time_dt = cleanup_ctx.now_dt - timedelta(seconds=env_max_limit_sub_inactivity)
max_last_interaction_time = datetime_to_sec(max_last_interaction_time_dt)
cleanup_ctx.max_last_interaction_time = max_last_interaction_time
cleanup_ctx.max_last_interaction_time_dt = max_last_interaction_time_dt
else:
cleanup_ctx.has_env_delta = False
self.logger.info('Starting cleanup tasks: %s', task_id)
# Clean up old pub/sub objects
cleanup_ctx = self.cleanup_pub_sub(task_id, cleanup_ctx)
return cleanup_ctx
# ################################################################################################################################
# ################################################################################################################################
def run_cleanup(
clean_up_subscriptions: 'bool',
clean_up_topics_without_subscribers: 'bool',
clean_up_topics_with_max_retention_reached: 'bool',
clean_up_queues_with_expired_messages: 'bool',
scheduler_path:'str',
) -> 'CleanupCtx':
# Always work with absolute paths
scheduler_path = os.path.abspath(scheduler_path)
# Make sure that the path exists
if not os.path.exists(scheduler_path):
raise Exception(f'Scheduler path not found: `{scheduler_path}')
repo_location = os.path.join(scheduler_path, 'config', 'repo')
repo_location = os.path.expanduser(repo_location)
# Information about what cleanup parts / tasks are enabled
parts_enabled = CleanupPartsEnabled()
parts_enabled.subscriptions = clean_up_subscriptions
parts_enabled.topics_without_subscribers = clean_up_topics_without_subscribers
parts_enabled.topics_with_max_retention_reached = clean_up_topics_with_max_retention_reached
parts_enabled.queues_with_expired_messages = clean_up_queues_with_expired_messages
# Build and initialize object responsible for cleanup tasks ..
cleanup_manager = CleanupManager(repo_location, parts_enabled)
cleanup_manager.init()
# .. if we are here, it means that we can start our work ..
result = cleanup_manager.run()
return result
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
scheduler_path = os.environ['ZATO_SCHEDULER_BASE_DIR']
_ = run_cleanup(
clean_up_subscriptions = True,
clean_up_topics_without_subscribers = True,
clean_up_topics_with_max_retention_reached = True,
clean_up_queues_with_expired_messages = True,
scheduler_path=scheduler_path
)
# ################################################################################################################################
# ################################################################################################################################
| 41,231
|
Python
|
.py
| 689
| 50.240929
| 130
| 0.569622
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,430
|
setup.py
|
zatosource_zato/code/zato-cli/setup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# flake8: noqa
from setuptools import setup, find_packages
version = '3.2'
setup(
name = 'zato-cli',
version = version,
author = 'Zato Source s.r.o.',
author_email = 'info@zato.io',
url = 'https://zato.io',
package_dir = {'':'src'},
packages = find_packages('src'),
namespace_packages = ['zato'],
zip_safe = False
)
| 531
|
Python
|
.py
| 19
| 23.631579
| 64
| 0.616302
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,431
|
__init__.py
|
zatosource_zato/code/zato-cli/test/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
| 238
|
Python
|
.py
| 6
| 38.166667
| 82
| 0.729258
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,432
|
test_service_invoke.py
|
zatosource_zato/code/zato-cli/test/zato/test_service_invoke.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main, TestCase
# ################################################################################################################################
# ################################################################################################################################
class CommandLineServiceInvokeTest(TestCase):
def test_wsx_services(self) -> 'None':
# Zato
from zato.common.util.cli import CommandLineServiceInvoker
service = 'zato.ping'
expected_stdout = b"{'pong': 'zato'}\n"
invoker = CommandLineServiceInvoker(expected_stdout)
invoker.invoke_and_test(service)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 1,388
|
Python
|
.py
| 23
| 56.391304
| 130
| 0.285292
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,433
|
__init__.py
|
zatosource_zato/code/zato-cli/test/zato/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,434
|
test_openapi.py
|
zatosource_zato/code/zato-cli/test/zato/test_openapi.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from http.client import BAD_REQUEST
from logging import basicConfig, getLogger, WARN
from tempfile import gettempdir
from traceback import format_exc
from unittest import main, TestCase
# openapi-spec-validator
from openapi_spec_validator import validate_spec
from openapi_spec_validator.readers import read_from_filename
# requests-openapi
import requests_openapi
# Zato
from zato.common.test.apispec_ import run_common_apispec_assertions
from zato.common.test.config import TestConfig
from zato.common.test import rand_string, rand_unicode
from zato.common.util.open_ import open_r
# ################################################################################################################################
# ################################################################################################################################
basicConfig(level=WARN, format='%(asctime)s - %(message)s')
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
if 0:
from requests import Response
from sh import RunningCommand
from zato.common.typing_ import any_, anydict
# ################################################################################################################################
# ################################################################################################################################
class APISpecTestCase(TestCase):
# ################################################################################################################################
def _warn_on_error(self, stdout:'any_', stderr:'any_', has_exception:'bool'=True) -> 'None':
if has_exception:
logger.warning(format_exc())
logger.warning('stdout -> %r', stdout)
logger.warning('stderr -> %r', stderr)
# ################################################################################################################################
def _assert_command_line_result(self, out:'RunningCommand', file_path:'str') -> 'None':
self.assertEqual(out.exit_code, 0)
# This is the information returned to user
expected = 'Output saved to '+ file_path + '\n'
# This is stdout that the command returned
stdout = out.stdout.decode('utf8')
stderr = out.stderr.decode('utf8')
# Make sure the expected information is in stdout
if expected not in stdout:
self._warn_on_error(stdout, stderr, has_exception=False)
msg = 'Could not find {!r} in {!r}'
self.fail(msg.format(expected, stdout))
# ################################################################################################################################
def _invoke_command(self, file_path:'str', require_ok:'bool'=True) -> 'RunningCommand':
# Zato
from zato.common.util.cli import get_zato_sh_command
# A shortcut
command = get_zato_sh_command() # type: ignore
# Invoke enmasse ..
out:'any_' = command('openapi', TestConfig.server_location,
'--exclude', '""', '--include', 'helpers.dataclass-service',
'--file', file_path,
'--verbose')
# .. if told to, make sure there was no error in stdout/stderr ..
if require_ok:
self._assert_command_line_result(out, file_path)
return out
# ################################################################################################################################
def test_openapi(self) -> 'None':
if not os.environ.get('ZATO_TEST_OPENAPI'):
return
# sh
from sh import ErrorReturnCode
tmp_dir = gettempdir()
test_suffix = rand_unicode() + '.' + rand_string()
file_name = 'zato-test-' + test_suffix + '.yaml'
file_path = os.path.join(tmp_dir, file_name)
try:
# Invoke openapi to create a definition ..
_ = self._invoke_command(file_path)
# .. read it back ..
f = open_r(file_path)
data = f.read()
f.close()
# .. run our assertions ..
run_common_apispec_assertions(self, data, with_all_paths=False)
# .. validate it once more using an external library ..
spec_dict, _ = read_from_filename(file_path)
validate_spec(spec_dict)
# .. and triple-check now by invoking the endpoint based on the spec generated ..
client = requests_openapi.Client()
client.load_spec_from_file(file_path)
# Note that we provide no request here
response = client.post__zato_api_invoke_helpers_dataclass_service() # type: Response
json_result = response.json() # type: anydict
# The response and JSON result will point to a 400 error because
# the underlying client that we use does not accept JSON request messages.
# Yet, it still a useful test because we know that the operation did exist
# and the server did correctly reject a call without a correct input.
self.assertEqual(response.status_code, BAD_REQUEST)
self.assertEqual(json_result['result'], 'Error')
self.assertEqual(json_result['details'], 'Invalid input')
self.assertIsInstance(json_result['cid'], str)
self.assertGreaterEqual(len(json_result['cid']), 20)
except ErrorReturnCode as e:
stdout = e.stdout # type: ignore
stdout = stdout.decode('utf8') # type: ignore
stderr = e.stderr # type: ignore
self._warn_on_error(stdout, stderr)
self.fail('Caught an exception while invoking openapi')
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
| 6,565
|
Python
|
.py
| 117
| 48.324786
| 130
| 0.469365
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,435
|
__init__.py
|
zatosource_zato/code/zato-cli/test/zato/pubsub/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,436
|
test_topic.py
|
zatosource_zato/code/zato-cli/test/zato/pubsub/test_topic.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from datetime import datetime
from unittest import main
# gevent
from gevent import sleep
# Zato
from zato.cli.pubsub.topic import Config as CLITopicConfig
from zato.common.api import PUBSUB
from zato.common.test import CommandLineTestCase
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, anydict
# ################################################################################################################################
# ################################################################################################################################
_default = PUBSUB.DEFAULT
# ################################################################################################################################
# ################################################################################################################################
class PubSubTopicTestCase(CommandLineTestCase):
def _confirm_topic_created(self, out:'anydict', expected_prefix:'str') -> 'None':
# We expect only for two keys to exist - id and name
self.assertEqual(len(out), 2)
topic_id = out['id'] # type: int
topic_name = out['name'] # type: str
self.assertIsInstance(topic_id, int)
self.assertIsInstance(topic_name, str)
self.assertTrue(topic_name.startswith(expected_prefix))
self.assertTrue(len(topic_name) >= 30)
# ################################################################################################################################
def test_create_topic_does_not_exist(self) -> 'None':
# Test data
expected_prefix = '/auto/topic.2' # E.g. /auto/topic.2022_01_31T12_28_42_280577
# Command to invoke ..
cli_params = ['pubsub', 'create-topic']
# .. get its response as a dict ..
out = self.run_zato_cli_json_command(cli_params) # type: anydict
# .. and confirm that the topic was created.
self._confirm_topic_created(out, expected_prefix)
# ################################################################################################################################
def test_create_topic_already_exists(self) -> 'None':
# Test data
prefix = 'test.already-exists.'
topic_name = prefix + datetime.utcnow().isoformat()
# Command to invoke ..
cli_params = ['pubsub', 'create-topic', '--name', topic_name]
# Create the topic once ..
out = self.run_zato_cli_json_command(cli_params) # type: anydict
# .. there should be no error yet
self._confirm_topic_created(out, prefix)
# .. create it once more ..
out = self.run_zato_cli_json_command(cli_params) # type: anydict
# now, we expect for three keys to exist - cid, result, and details
self.assertEqual(len(out), 3)
cid = out['cid'] # type: str
result = out['result'] # type: str
details = out['details'] # type: str
expected_details_message = f'A pub/sub topic `{topic_name}` already exists in this cluster'
self.assertTrue(len(cid) >= 20)
self.assertEqual(result, 'Error')
self.assertEqual(details, expected_details_message)
# ################################################################################################################################
def _run_get_topic_no_such_topic(self, command:'str') -> 'None':
# There will be no such topic
topic_name = '/no/such/topic/' + datetime.utcnow().isoformat()
# Command to invoke ..
get_cli_params = ['pubsub', command, '--name', topic_name]
# Get the result - there should be none
out = self.run_zato_cli_json_command(get_cli_params) # type: any_
self.assertListEqual(out, [])
# ################################################################################################################################
def _run_get_topic_test_one_topic(self, command:'str') -> 'None':
# Test data
prefix = '/test/'
topic_name = prefix + datetime.utcnow().isoformat()
# Command to invoke ..
create_cli_params = ['pubsub', 'create-topic', '--name', topic_name]
# Create one topic ..
create_out = self.run_zato_cli_json_command(create_cli_params) # type: anydict
# Command to get the topic back with ..
get_cli_params = ['pubsub', command, '--name', topic_name]
# Now, we expect to get that one topic back
out_get = self.run_zato_cli_json_command(get_cli_params) # type: any_
# There must be one topic on output
self.assertTrue(len(out_get), 1)
# Extract it now ..
out = out_get[0] # type: anydict
# .. and run our assertions.
self.assertEqual(out['id'], create_out['id'])
self.assertEqual(out['name'], create_out['name'])
self.assertEqual(out['name'], topic_name)
# This is a new topic and we do not expect any sort of publication-related information associated with it.
self.assertEqual(out['current_depth_gd'], 0)
self.assertIsNone(out['last_pub_time'])
self.assertIsNone(out['last_pub_msg_id'])
self.assertIsNone(out['last_endpoint_name'])
self.assertIsNone(out['last_pub_server_name'])
self.assertIsNone(out['last_pub_server_pid'])
self.assertIsNone(out['last_pub_has_gd'])
# ################################################################################################################################
def _run_get_topic_test_multiple_topics(self, command:'str') -> 'None':
# Note that the prefix itself is unique to ensure that it will not repeat
prefix = '/test/' + datetime.utcnow().isoformat() + '/'
# Sleep for a moment to make sure that prefix and the rest are different
sleep(0.05)
# Both
topic_name0 = prefix + datetime.utcnow().isoformat()
# Again, ensure the names are different
sleep(0.05)
topic_name1 = prefix + datetime.utcnow().isoformat()
# Create both topics now
create_cli_params0 = ['pubsub', 'create-topic', '--name', topic_name0]
create_cli_params1 = ['pubsub', 'create-topic', '--name', topic_name1]
create_out0 = self.run_zato_cli_json_command(create_cli_params0) # type: anydict
create_out1 = self.run_zato_cli_json_command(create_cli_params1) # type: anydict
# Command to get the two topics with - note that it uses the prefix to ensure that both are returned ..
get_cli_params = ['pubsub', command, '--name', prefix]
# Now, we expect to get that one topic back
out_get = self.run_zato_cli_json_command(get_cli_params) # type: any_
# Both topics must be on output
self.assertTrue(len(out_get), 2)
# Extract them now ..
out0 = out_get[0] # type: anydict
out1 = out_get[1] # type: anydict
# .. and run our assertions ..
self.assertEqual(out0['id'], create_out0['id'])
self.assertEqual(out0['name'], create_out0['name'])
self.assertEqual(out0['name'], topic_name0)
self.assertEqual(out1['id'], create_out1['id'])
self.assertEqual(out1['name'], create_out1['name'])
self.assertEqual(out1['name'], topic_name1)
# .. these are new topics and we do not expect any sort of publication-related information associated with it.
self.assertEqual(out0['current_depth_gd'], 0)
self.assertIsNone(out0['last_pub_time'])
self.assertIsNone(out0['last_pub_msg_id'])
self.assertIsNone(out0['last_endpoint_name'])
self.assertIsNone(out0['last_pub_server_name'])
self.assertIsNone(out0['last_pub_server_pid'])
self.assertIsNone(out0['last_pub_has_gd'])
self.assertEqual(out1['current_depth_gd'], 0)
self.assertIsNone(out1['last_pub_time'])
self.assertIsNone(out1['last_pub_msg_id'])
self.assertIsNone(out1['last_endpoint_name'])
self.assertIsNone(out1['last_pub_server_name'])
self.assertIsNone(out1['last_pub_server_pid'])
self.assertIsNone(out1['last_pub_has_gd'])
# ################################################################################################################################
def _run_get_topic_default_keys(self, command:'str') -> 'None':
# Test data
prefix = '/test/'
topic_name = prefix + datetime.utcnow().isoformat()
# Command to invoke ..
create_cli_params = ['pubsub', 'create-topic', '--name', topic_name]
# Create one topic ..
_ = self.run_zato_cli_json_command(create_cli_params) # type: anydict
# Command to get the topic back with ..
get_cli_params = ['pubsub', command, '--name', topic_name]
# Now, we expect to get that one topic back
out_get = self.run_zato_cli_json_command(get_cli_params) # type: any_
# There must be one topic on output
self.assertTrue(len(out_get), 1)
# Extract it now ..
out = out_get[0] # type: anydict
# .. and confirm that all the default keys, and only the default ones, are returned.
default_keys = CLITopicConfig.DefaultTopicKeys
len_out = len(out)
len_default_keys = len(default_keys)
self.assertEqual(len_out, len_default_keys)
for key in default_keys:
self.assertIn(key, out)
# ################################################################################################################################
def _run_get_topic_all_keys(self, command:'str') -> 'None':
# Test data
prefix = '/test/'
topic_name = prefix + datetime.utcnow().isoformat()
# Command to invoke ..
create_cli_params = ['pubsub', 'create-topic', '--name', topic_name]
# Create one topic ..
create_out = self.run_zato_cli_json_command(create_cli_params) # type: anydict
# Command to get the topic back with - note that we request for all the keys to be returned ..
get_cli_params = ['pubsub', command, '--name', topic_name, '--keys', 'all']
# Now, we expect to get that one topic back
out_get = self.run_zato_cli_json_command(get_cli_params) # type: any_
# There must be one topic on output
self.assertTrue(len(out_get), 1)
# Extract it now ..
out = out_get[0] # type: anydict
# .. make sure that the default keys are still returned if all of them are requested ..
default_keys = CLITopicConfig.DefaultTopicKeys
len_out = len(out)
self.assertEqual(len_out, 25)
# .. each default key is expected to be returned ..
for key in default_keys:
self.assertIn(key, out)
# This, we can compare based on the response from create-topic
self.assertEqual(out['id'], create_out['id'])
self.assertEqual(out['name'], create_out['name'])
self.assertEqual(out['name'], topic_name)
# These are default keys
self.assertEqual(out['current_depth_gd'], 0)
self.assertIsNone(out['last_pub_time'])
self.assertIsNone(out['last_pub_msg_id'])
self.assertIsNone(out['last_endpoint_name'])
self.assertIsNone(out['last_pub_server_name'])
self.assertIsNone(out['last_pub_server_pid'])
self.assertIsNone(out['last_pub_has_gd'])
# And this is the rest of the keys
self.assertEqual(out['max_depth_gd'], _default.TOPIC_MAX_DEPTH_GD)
self.assertEqual(out['limit_retention'], _default.LimitTopicRetention)
self.assertEqual(out['depth_check_freq'], _default.DEPTH_CHECK_FREQ)
self.assertEqual(out['max_depth_non_gd'], _default.TOPIC_MAX_DEPTH_NON_GD)
self.assertEqual(out['task_sync_interval'], _default.TASK_SYNC_INTERVAL)
self.assertEqual(out['task_delivery_interval'], _default.TASK_DELIVERY_INTERVAL)
self.assertEqual(out['pub_buffer_size_gd'], 0)
self.assertEqual(out['limit_sub_inactivity'], _default.LimitSubInactivity)
self.assertEqual(out['limit_message_expiry'], _default.LimitMessageExpiry)
self.assertTrue(out['is_active'])
self.assertFalse(out['has_gd'])
self.assertFalse(out['is_api_sub_allowed'])
self.assertFalse(out['is_internal'])
self.assertIsNone(out['hook_service_id'])
self.assertIsNone(out['hook_service_name'])
self.assertIsNone(out['on_no_subs_pub'])
# ################################################################################################################################
def _run_get_topic_only_selected_keys(self, command:'str') -> 'None':
# Test data
prefix = '/test/'
topic_name = prefix + datetime.utcnow().isoformat()
# Command to invoke ..
create_cli_params = ['pubsub', 'create-topic', '--name', topic_name]
# Create one topic ..
create_out = self.run_zato_cli_json_command(create_cli_params) # type: anydict
# Command to get the topic back with - note that we request for all the keys to be returned ..
get_cli_params = ['pubsub', command, '--name', topic_name, '--keys', 'id, is_active, is_internal']
# Now, we expect to get that one topic back
out_get = self.run_zato_cli_json_command(get_cli_params) # type: any_
# There must be one topic on output
self.assertTrue(len(out_get), 1)
# Extract it now ..
out = out_get[0] # type: anydict
# We expect to find only three keys here
len_out = len(out)
self.assertEqual(len_out, 3)
# Run our assertions name
self.assertEqual(out['id'], create_out['id'])
self.assertTrue(out['is_active'])
self.assertFalse(out['is_internal'])
# ################################################################################################################################
def test_get_topics(self) -> 'None':
# Pub/sub command to run
command = 'get-topics'
# Run all tests
self._run_get_topic_no_such_topic(command)
self._run_get_topic_test_one_topic(command)
self._run_get_topic_test_multiple_topics(command)
self._run_get_topic_default_keys(command)
self._run_get_topic_all_keys(command)
self._run_get_topic_only_selected_keys(command)
# ################################################################################################################################
def test_get_topic(self) -> 'None':
# Pub/sub command to run
command = 'get-topic'
# Run all tests
self._run_get_topic_no_such_topic(command)
self._run_get_topic_test_one_topic(command)
self._run_get_topic_test_multiple_topics(command)
self._run_get_topic_default_keys(command)
self._run_get_topic_all_keys(command)
self._run_get_topic_only_selected_keys(command)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 16,021
|
Python
|
.py
| 279
| 49.225806
| 130
| 0.526828
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,437
|
test_enmasse.py
|
zatosource_zato/code/zato-cli/test/zato/enmasse_/test_enmasse.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from datetime import datetime
from logging import basicConfig, getLogger, WARN
from tempfile import gettempdir
from unittest import main
# Bunch
from bunch import Bunch
# sh
from sh import RunningCommand
# Zato
from zato.common.test import rand_string, rand_unicode
from zato.common.test.config import TestConfig
from zato.common.test.enmasse_.base import BaseEnmasseTestCase
from zato.common.test.enmasse_._template_complex_01 import template_complex_01
from zato.common.test.enmasse_._template_complex_02 import template_complex_02
from zato.common.test.enmasse_._template_complex_03 import template_complex_03
from zato.common.test.enmasse_._template_complex_04 import template_complex_04
from zato.common.test.enmasse_._template_complex_05 import template_complex_05
from zato.common.test.enmasse_._template_simple_01 import template_simple_01
from zato.common.test.enmasse_._template_simple_02 import template_simple_02
from zato.common.test.enmasse_._template_simple_03 import template_simple_03
from zato.common.test.enmasse_._template_simple_04 import template_simple_04
from zato.common.test.enmasse_._template_simple_05 import template_simple_05
from zato.common.test.enmasse_._template_simple_06 import template_simple_06
from zato.common.util.open_ import open_w
# ################################################################################################################################
# ################################################################################################################################
basicConfig(level=WARN, format='%(asctime)s - %(message)s')
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class EnmasseTestCase(BaseEnmasseTestCase):
def get_smtp_config(self) -> 'Bunch':
out = Bunch()
out.name = os.environ.get('Zato_Test_Enmasse_SMTP_Name')
out.host = os.environ.get('Zato_Test_Enmasse_SMTP_Host')
out.username = os.environ.get('Zato_Test_Enmasse_SMTP_Username')
out.password = os.environ.get('Zato_Test_Enmasse_SMTP_Password')
out.ping_address = os.environ.get('Zato_Test_Enmasse_SMTP_Ping_Address')
return out
# ################################################################################################################################
def _cleanup(self, test_suffix:'str') -> 'None':
# Zato
from zato.common.util.cli import get_zato_sh_command
# A shortcut
command = get_zato_sh_command()
# Build the name of the connection to delete
conn_name = f'test.enmasse.{test_suffix}'
# Invoke the delete command ..
out:'RunningCommand' = command(
'delete-wsx-outconn',
'--path', TestConfig.server_location,
'--name', conn_name
)
# .. and make sure there was no error in stdout/stderr ..
self._assert_command_line_result(out)
# ################################################################################################################################
def _test_enmasse_ok(self, test_name:'str', template:'str') -> 'None':
# sh
from sh import ErrorReturnCode
tmp_dir = gettempdir()
test_suffix = rand_unicode() + '.' + rand_string()
file_name = 'zato-enmasse-' + test_suffix + '.yaml'
config_path = os.path.join(tmp_dir, file_name)
smtp_config = self.get_smtp_config()
data = template.format(test_suffix=test_suffix, smtp_config=smtp_config)
f = open_w(config_path)
_ = f.write(data)
f.close()
try:
# Invoke enmasse to create objects ..
_ = self.invoke_enmasse(config_path)
# .. now invoke it again to edit them in place.
_ = self.invoke_enmasse(config_path)
except ErrorReturnCode as e:
stdout:'bytes' = e.stdout # type: bytes
stdout = stdout.decode('utf8') # type: ignore
stderr:'str' = e.stderr
self._warn_on_error(stdout, stderr)
self.fail(f'Caught an exception while invoking enmasse; stdout -> {stdout}')
finally:
self._cleanup(test_suffix)
# ################################################################################################################################
def test_enmasse_complex_ok_01(self) -> 'None':
self._test_enmasse_ok('complex_ok_01', template_complex_01)
# ################################################################################################################################
def test_enmasse_complex_ok_02(self) -> 'None':
self._test_enmasse_ok('complex_ok_02', template_complex_02)
# ################################################################################################################################
def test_enmasse_complex_ok_03(self) -> 'None':
self._test_enmasse_ok('complex_ok_03', template_complex_03)
# ################################################################################################################################
def test_enmasse_complex_ok_04(self) -> 'None':
self._test_enmasse_ok('complex_ok_04', template_complex_04)
# ################################################################################################################################
def test_enmasse_complex_ok_05(self) -> 'None':
self._test_enmasse_ok('complex_ok_05', template_complex_05)
# ################################################################################################################################
def test_enmasse_simple_ok_01(self) -> 'None':
self._test_enmasse_ok('simple_ok_01', template_simple_01)
# ################################################################################################################################
def test_enmasse_simple_ok_02(self) -> 'None':
self._test_enmasse_ok('simple_ok_02', template_simple_02)
# ################################################################################################################################
def test_enmasse_simple_ok_03(self) -> 'None':
self._test_enmasse_ok('simple_ok_03', template_simple_03)
# ################################################################################################################################
def test_enmasse_simple_ok_04(self) -> 'None':
self._test_enmasse_ok('simple_ok_04', template_simple_04)
# ################################################################################################################################
def test_enmasse_simple_ok_05(self) -> 'None':
self._test_enmasse_ok('simple_ok_05', template_simple_05)
# ################################################################################################################################
def test_enmasse_simple_ok_06(self) -> 'None':
self._test_enmasse_ok('simple_ok_06', template_simple_06)
# ################################################################################################################################
def test_enmasse_service_does_not_exit(self) -> 'None':
# We are going to wait that many seconds for enmasse to complete
start = datetime.utcnow()
missing_wait_time = 3
tmp_dir = gettempdir()
test_suffix = rand_unicode() + '.' + rand_string()
file_name = 'zato-enmasse-' + test_suffix + '.yaml'
config_path = os.path.join(tmp_dir, file_name)
smtp_config = self.get_smtp_config()
# Note that we replace pub.zato.ping with a service that certainly does not exist
data = template_complex_01.replace('pub.zato.ping', 'zato-enmasse-service-does-not-exit')
data = data.format(test_suffix=test_suffix, smtp_config=smtp_config)
f = open_w(config_path)
_ = f.write(data)
f.close()
# Invoke enmasse to create objects (which will block for missing_wait_time seconds) ..
_ = self.invoke_enmasse(config_path, require_ok=False, missing_wait_time=missing_wait_time)
# .. now, make sure that we actually had to wait that many seconds ..
now = datetime.utcnow()
delta = now - start
# .. the whole test should have taken longer than what we waited for in enmasse .
if not delta.total_seconds() > missing_wait_time:
msg = f'Total time should be bigger than {missing_wait_time} (missing_wait_time) instead of {delta}'
self.fail(msg)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
| 9,358
|
Python
|
.py
| 151
| 55.503311
| 130
| 0.467331
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,438
|
__init__.py
|
zatosource_zato/code/zato-cli/src/__init__.py
|
# -*- coding: utf-8 -*-
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 100
|
Python
|
.py
| 3
| 32
| 42
| 0.614583
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,439
|
__init__.py
|
zatosource_zato/code/zato-cli/src/zato/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
__import__('pkg_resources').declare_namespace(__name__)
| 287
|
Python
|
.py
| 8
| 34.375
| 64
| 0.683636
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,440
|
ca_create_server.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ca_create_server.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import CACreateCommand, common_ca_create_opts
class Create(CACreateCommand):
""" Creates crypto material for a Zato server.
"""
opts = [
{'name':'cluster_name', 'help':'Cluster name'},
{'name':'server_name', 'help':'Server name'},
{'name':'--organizational-unit', 'help':'Organizational unit name (defaults to cluster_name:server_name)'},
]
opts += common_ca_create_opts
def get_file_prefix(self, file_args):
return '{cluster_name}-{server_name}'.format(**file_args)
def get_organizational_unit(self, args):
return args.cluster_name + ':' + args.server_name
def execute(self, args, show_output=True):
self._execute(args, 'v3_client_server', show_output)
| 998
|
Python
|
.py
| 23
| 38.565217
| 115
| 0.67425
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,441
|
component_version.py
|
zatosource_zato/code/zato-cli/src/zato/cli/component_version.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import ZatoCommand
from zato.common.api import ZATO_INFO_FILE
from zato.common.util.open_ import open_r
class ComponentVersion(ZatoCommand):
file_needed = ZATO_INFO_FILE
def execute(self, args):
# stdlib
import os
# Zato
from zato.common.json_internal import load
info = load(open_r(os.path.join(args.path, self.file_needed))) # noqa
self.logger.info(info['version'])
| 689
|
Python
|
.py
| 19
| 31.842105
| 82
| 0.706505
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,442
|
ca_create_lb_agent.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ca_create_lb_agent.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import CACreateCommand, common_ca_create_opts
class Create(CACreateCommand):
""" Creates crypto material for a Zato load-balancer agent
"""
opts = [
{'name':'organizational-unit', 'help':'Organizational unit name'},
]
opts += common_ca_create_opts
def get_file_prefix(self, file_args):
return 'lb-agent'
def get_organizational_unit(self, args):
return 'zato-lb-agent'
def execute(self, args, show_output=True):
self._execute(args, 'v3_server', show_output)
| 785
|
Python
|
.py
| 21
| 32.952381
| 82
| 0.690476
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,443
|
util.py
|
zatosource_zato/code/zato-cli/src/zato/cli/util.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import any_, anydict
Namespace = Namespace
# ################################################################################################################################
def get_totp_info_from_args(args, default_key_label=None): # type: ignore
""" Returns a key and its label extracted from command line arguments
or auto-generates a new pair if they are missing in args.
"""
# PyOTP
import pyotp
# Zato
from zato.common.crypto.totp_ import TOTPManager
from zato.common.api import TOTP
default_key_label = default_key_label or TOTP.default_label # type: ignore
# If there was a key given on input, we need to validate it,
# this reports an error if the key cannot be used.
if args.key:
totp = pyotp.TOTP(args.key)
_ = totp.now()
# If we are here, it means that the key was valid
key = args.key # type: ignore
else:
key = TOTPManager.generate_totp_key()
return key, args.key_label if args.key_label else default_key_label # type: ignore
# ################################################################################################################################
def run_cli_command(command_class:'any_', config:'anydict', path:'any_') -> 'None':
# stdlib
import os
# Bunch
from bunch import Bunch
args = Bunch()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = path or os.environ['ZATO_SERVER_BASE_DIR']
args.password = None
args.skip_stdout = False
args.update(config)
command = command_class(args)
command.execute(args)
# ################################################################################################################################
| 2,254
|
Python
|
.py
| 49
| 41.244898
| 130
| 0.477803
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,444
|
hl7_.py
|
zatosource_zato/code/zato-cli/src/zato/cli/hl7_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os
import sys
# Zato
from zato.cli import ZatoCommand
from zato.hl7.mllp.client import send_data as send_mllp_data
# ################################################################################################################################
class MLLPSend(ZatoCommand):
""" Sends an HL7 v2 file to an MLLP endpoint.
"""
opts = [
{'name':'--file', 'help':'File with HL7 v2 data to send', 'required':True},
{'name':'--address', 'help':'Address of an MLLP server', 'required':True},
]
# ################################################################################################################################
def execute(self, args):
for name in ('file', 'address'):
value = getattr(args, name, None)
if not value:
self.logger.warning('Missing required parameter --%s', name)
sys.exit(self.SYS_ERROR.PARAMETER_MISSING)
file_path = os.path.join(self.original_dir, args.file)
file_path = os.path.abspath(file_path)
if not os.path.exists(file_path):
self.logger.warning('File path not found `%s`', file_path)
sys.exit(self.SYS_ERROR.FILE_MISSING)
if not os.path.isfile(file_path):
self.logger.warning('Path is not a file `%s`', file_path)
sys.exit(self.SYS_ERROR.PATH_NOT_A_FILE)
# Now, read the file as bytes ..
data = open(file_path, 'rb').read()
# .. send it to the remote end ..
response = send_mllp_data(args.address, data) # type: bytes
# .. and print the response back.
self.logger.info('Response: `%s`', response)
# ################################################################################################################################
| 2,049
|
Python
|
.py
| 42
| 41.952381
| 130
| 0.50201
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,445
|
quickstart.py
|
zatosource_zato/code/zato-cli/src/zato/cli/quickstart.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from copy import deepcopy
# Zato
from zato.cli import common_odb_opts, common_scheduler_server_api_client_opts, common_scheduler_server_address_opts, ZatoCommand
from zato.common.typing_ import cast_
from zato.common.util.config import get_scheduler_api_client_for_server_password, get_scheduler_api_client_for_server_username
from zato.common.util.platform_ import is_windows, is_non_windows
from zato.common.util.open_ import open_w
# ################################################################################################################################
# ################################################################################################################################
if 0:
from bunch import Bunch
from zato.common.typing_ import any_
# ################################################################################################################################
# ################################################################################################################################
DEFAULT_NO_SERVERS=1
vscode_launch_json = """
{
"version": "0.2.0",
"configurations": [
{
"name": "Remote Zato Main",
"type": "python",
"request": "launch",
"program": "/opt/zato/current/zato-server/src/zato/server/main.py",
"console": "integratedTerminal",
"justMyCode": false,
"env": {
"GEVENT_SUPPORT":"true",
"ZATO_SERVER_BASE_DIR": "/opt/zato/env/qs-1/server1",
"ZATO_SCHEDULER_BASE_DIR": "/opt/zato/env/qs-1/scheduler"
}
}
]
}
"""
vscode_settings_json = """
{
"python.defaultInterpreterPath": "/opt/zato/current/bin/python"
}
"""
# ################################################################################################################################
# ################################################################################################################################
windows_qs_start_template = """
@echo off
set zato_cmd=zato
set env_dir="{env_dir}"
start /b %zato_cmd% start %env_dir%\\server1
start /b %zato_cmd% start %env_dir%\\web-admin
start /b %zato_cmd% start %env_dir%\\scheduler
echo:
echo *** Starting Zato in %env_dir% ***
echo:
""".strip() # noqa: W605
# ################################################################################################################################
# ################################################################################################################################
# Taken from http://stackoverflow.com/a/246128
script_dir = """SOURCE="${BASH_SOURCE[0]}"
BASE_DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$BASE_DIR/$SOURCE"
BASE_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
BASE_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
"""
# ################################################################################################################################
# ################################################################################################################################
check_config_template = """$ZATO_BIN check-config $BASE_DIR/{server_name}"""
# ################################################################################################################################
# ################################################################################################################################
start_servers_template = """
$ZATO_BIN start $BASE_DIR/{server_name} --verbose --env-file /opt/hot-deploy/enmasse/env.ini
$ZATO_BIN wait --path $BASE_DIR/{server_name}
echo [{step_number}/$STEPS] {server_name} started
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_head_template = """#!/bin/bash
set -e
export ZATO_CLI_DONT_SHOW_OUTPUT=1
{preamble_script}
{script_dir}
ZATO_BIN={zato_bin}
STEPS={start_steps}
CLUSTER={cluster_name}
{cluster_starting}
echo Checking configuration
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_body_template = """
{check_config}
{check_config_extra}
# Make sure TCP ports are available
echo [{check_config_step_number}/$STEPS] Checking TCP ports availability
ZATO_BIN_PATH=`which zato`
ZATO_BIN_DIR=`python -c "import os; print(os.path.dirname('$ZATO_BIN_PATH'))"`
UTIL_DIR=`python -c "import os; print(os.path.join('$ZATO_BIN_DIR', '..', 'util'))"`
$ZATO_BIN_DIR/py $UTIL_DIR/check_tcp_ports.py {check_tcp_ports_suffix}
# .. load-balancer ..
{start_lb}
# .. scheduler ..
{start_scheduler}
# .. servers ..
{start_servers}
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_check_config_extra = """
echo [1/$STEPS] Redis connection OK
echo [2/$STEPS] SQL ODB connection OK
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_lb_windows = 'echo "[4/%STEPS%] (Skipped starting load balancer)"'
zato_qs_start_lb_non_windows = """
# Start the load balancer first ..
$ZATO_BIN start $BASE_DIR/load-balancer --verbose
echo [4/$STEPS] Load-balancer started
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_dashboard = """
# .. Dashboard comes as the last one because it may ask Django-related questions.
$ZATO_BIN start $BASE_DIR/web-admin --verbose
echo [$STEPS/$STEPS] Dashboard started
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_cluster_starting = """
echo Starting Zato cluster $CLUSTER
"""
zato_qs_cluster_started = """
echo Zato cluster $CLUSTER started
"""
zato_qs_cluster_stopping = """
echo Stopping Zato cluster $CLUSTER
"""
zato_qs_cluster_stopped = """
echo Zato cluster $CLUSTER stopped
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_tail_template = """
{start_dashboard}
cd $BASE_DIR
{cluster_started}
echo Visit https://zato.io/support for more information and support options
exit 0
"""
stop_servers_template = """
$ZATO_BIN stop $BASE_DIR/{server_name}
echo [{step_number}/$STEPS] {server_name} stopped
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_start_scheduler = """
$ZATO_BIN start $BASE_DIR/scheduler --verbose
echo [{scheduler_step_count}/$STEPS] Scheduler started
"""
zato_qs_stop_scheduler = """
$ZATO_BIN stop $BASE_DIR/scheduler
echo [$STEPS/$STEPS] Scheduler stopped
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_stop_template = """#!/bin/bash
export ZATO_CLI_DONT_SHOW_OUTPUT=1
{script_dir}
if [[ "$1" = "--delete-pidfiles" ]]
then
echo Deleting PID files
rm -f $BASE_DIR/load-balancer/pidfile
rm -f $BASE_DIR/load-balancer/zato-lb-agent.pid
rm -f $BASE_DIR/server1/pidfile
rm -f $BASE_DIR/server2/pidfile
rm -f $BASE_DIR/web-admin/pidfile
rm -f $BASE_DIR/scheduler/pidfile
echo PID files deleted
fi
ZATO_BIN={zato_bin}
STEPS={stop_steps}
CLUSTER={cluster_name}
{cluster_stopping}
# Start the load balancer first ..
$ZATO_BIN stop $BASE_DIR/load-balancer
echo [1/$STEPS] Load-balancer stopped
# .. servers ..
{stop_servers}
$ZATO_BIN stop $BASE_DIR/web-admin
echo [{web_admin_step_count}/$STEPS] Dashboard stopped
# .. scheduler ..
{stop_scheduler}
cd $BASE_DIR
{cluster_stopped}
"""
# ################################################################################################################################
# ################################################################################################################################
zato_qs_restart = """#!/bin/bash
{script_dir}
cd $BASE_DIR
$BASE_DIR/zato-qs-stop.sh
$BASE_DIR/zato-qs-start.sh
"""
# ################################################################################################################################
# ################################################################################################################################
class CryptoMaterialLocation:
""" Locates and remembers location of various crypto material for Zato components.
"""
def __init__(self, ca_dir:'str', component_pattern:'str') -> 'None':
self.ca_dir = ca_dir
self.component_pattern = component_pattern
self.ca_certs_path = os.path.join(self.ca_dir, 'ca-material', 'ca-cert.pem')
self.cert_path = None
self.pub_path = None
self.priv_path = None
self.locate()
def locate(self) -> 'None':
for crypto_name in('cert', 'priv', 'pub'):
path = os.path.join(self.ca_dir, 'out-{}'.format(crypto_name))
for name in os.listdir(path):
full_path = os.path.join(path, name)
if '{}-{}'.format(self.component_pattern, crypto_name) in full_path:
setattr(self, '{}_path'.format(crypto_name), full_path)
# ################################################################################################################################
class Create(ZatoCommand):
""" Quickly creates a working cluster
"""
needs_empty_dir = True
opts:'any_' = deepcopy(common_odb_opts)
opts.append({'name':'--cluster-name', 'help':'Name to be given to the new cluster'})
opts.append({'name':'--servers', 'help':'How many servers to create', 'default':1}) # type: ignore
opts.append({'name':'--threads-per-server', 'help':'How many main threads to use per server', 'default':1}) # type: ignore
opts.append({'name':'--secret-key', 'help':'Main secret key the server(s) will use'})
opts.append({'name':'--jwt-secret-key', 'help':'Secret key for JWT (JSON Web Tokens)'})
opts.append({'name':'--no-scheduler', 'help':'Create all the components but not a scheduler', 'action':'store_true'})
opts.append({'name':'--scheduler-only', 'help':'Only create a scheduler, without other components', 'action':'store_true'})
opts.append({'name':'--preamble-script', 'help':'Extra script to add to startup scripts'})
opts += deepcopy(common_scheduler_server_address_opts)
opts += deepcopy(common_scheduler_server_api_client_opts)
def _bunch_from_args(self, args:'any_', admin_invoke_password:'str', cluster_name:'str'='') -> 'Bunch':
# Bunch
from bunch import Bunch
out = Bunch()
out.path = args.path
out.verbose = args.verbose
out.store_log = args.store_log
out.store_config = args.store_config
out.odb_type = args.odb_type
out.odb_host = args.odb_host
out.odb_port = args.odb_port
out.odb_user = args.odb_user
out.odb_db_name = args.odb_db_name
out.kvdb_host = self.get_arg('kvdb_host')
out.kvdb_port = self.get_arg('kvdb_port')
out.sqlite_path = getattr(args, 'sqlite_path', None)
out.postgresql_schema = getattr(args, 'postgresql_schema', None)
out.odb_password = args.odb_password
out.kvdb_password = self.get_arg('kvdb_password')
out.cluster_name = cluster_name
out.scheduler_name = 'scheduler1'
out.scheduler_address_for_server = getattr(args, 'scheduler_address_for_server', '')
out.server_address_for_scheduler = getattr(args, 'server_address_for_scheduler', '')
out['admin-invoke-password'] = admin_invoke_password
out.admin_invoke_password = admin_invoke_password
out.server_password = admin_invoke_password
out.server_api_client_for_scheduler_password = admin_invoke_password
return out
# ################################################################################################################################
def allow_empty_secrets(self) -> 'bool':
return True
# ################################################################################################################################
def _set_pubsub_server(self, args:'any_', server_id:'int', cluster_name:'str', topic_name:'str') -> 'None':
# Zato
from zato.common.odb.model import Cluster, PubSubSubscription, PubSubTopic
engine = self._get_engine(args) # type: ignore
session = self._get_session(engine) # type: ignore
sub_list:'any_' = session.query(PubSubSubscription).\
filter(PubSubTopic.id==PubSubSubscription.topic_id).\
filter(PubSubTopic.name==topic_name).\
filter(PubSubTopic.cluster_id==Cluster.id).\
filter(Cluster.name==cluster_name).\
all()
for sub in sub_list: # type: ignore
# Set publishing server for that subscription
sub.server_id = server_id
session.add(sub)
session.commit()
# ################################################################################################################################
def execute(self, args:'any_') -> 'None':
""" Quickly creates Zato components
1) CA and crypto material
2) ODB
3) ODB initial data
4) Servers
5) Load-balancer
6) Dashboard
7) Scheduler
8) Scripts
"""
# stdlib
import os
import random
import stat
from collections import OrderedDict
from contextlib import closing
from copy import deepcopy
from itertools import count
from uuid import uuid4
# Cryptography
from cryptography.fernet import Fernet
# These are shared by all servers
secret_key = getattr(args, 'secret_key', None) or Fernet.generate_key()
jwt_secret = getattr(args, 'jwt_secret_key', None) or Fernet.generate_key()
# Zato
from zato.cli import ca_create_ca, ca_create_lb_agent, ca_create_scheduler, ca_create_server, \
ca_create_web_admin, create_cluster, create_lb, create_odb, create_scheduler, create_server, create_web_admin
from zato.common.crypto.api import CryptoManager
from zato.common.defaults import http_plain_server_port
from zato.common.odb.model import Cluster
from zato.common.util.api import get_engine, get_session
random.seed()
# Possibly used by startup scripts
preamble_script = self.get_arg('preamble_script') or '# No preamble script'
# We handle both ..
admin_invoke_password = self.get_arg('admin_invoke_password')
server_api_client_for_scheduler_password = self.get_arg('server_api_client_for_scheduler_password')
# .. but we prefer the latter ..
admin_invoke_password = admin_invoke_password or server_api_client_for_scheduler_password
# .. and we build it ourselves if it is not given.
admin_invoke_password = admin_invoke_password or 'admin.invoke.' + uuid4().hex
scheduler_api_client_for_server_auth_required = getattr(args, 'scheduler_api_client_for_server_auth_required', None)
scheduler_api_client_for_server_username = get_scheduler_api_client_for_server_username(args)
scheduler_api_client_for_server_password = get_scheduler_api_client_for_server_password(
args,
cast_('CryptoManager', None),
initial_password=cast_('str', CryptoManager.generate_password(to_str=True)),
needs_encrypt=False
)
# Make sure we always work with absolute paths
args_path = os.path.abspath(args.path)
if args.odb_type == 'sqlite':
args.sqlite_path = os.path.join(args_path, 'zato.db')
next_step = count(1)
next_port = count(http_plain_server_port)
cluster_name = getattr(args, 'cluster_name', None) or 'quickstart-{}'.format(random.getrandbits(20)).zfill(7)
servers = int(getattr(args, 'servers', 0) or DEFAULT_NO_SERVERS)
server_names = OrderedDict() # type: ignore
for idx in range(1, servers+1):
server_names['{}'.format(idx)] = 'server{}'.format(idx)
try:
threads_per_server = int(args.threads_per_server)
except Exception:
threads_per_server = 1
lb_host = '127.0.0.1'
lb_port = 11223
lb_agent_port = 20151
# This could've been set to True by user in the command-line so we'd want
# to unset it so that individual commands quickstart invokes don't attempt
# to store their own configs.
args.store_config = False
# We use TLS only on systems other than Windows
has_tls = is_non_windows
# This will be True if the scheduler does not have to be created
no_scheduler:'bool' = self.get_arg('no_scheduler', False)
# This will be True if we create only the scheduler, without any other components
scheduler_only:'bool' = self.get_arg('scheduler_only', False)
# Shortcuts for later use
should_create_scheduler = not no_scheduler
create_components_other_than_scheduler = not scheduler_only
# Under Windows, even if the load balancer is created, we do not log this information.
total_non_servers_steps = 5 if is_windows else 7
total_steps = total_non_servers_steps + servers
# Take the scheduler into account
if no_scheduler:
total_steps -= 1
elif scheduler_only:
# 1 for servers
# 1 for Dashboard
# 1 for the load-balancer
total_steps -= 3
# ################################################################################################################################
#
# 1) CA
#
if has_tls:
ca_path = os.path.join(args_path, 'ca')
os.mkdir(ca_path)
ca_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
ca_args.path = ca_path
ca_create_ca.Create(ca_args).execute(ca_args, False)
ca_create_lb_agent.Create(ca_args).execute(ca_args, False)
ca_create_web_admin.Create(ca_args).execute(ca_args, False)
ca_create_scheduler.Create(ca_args).execute(ca_args, False)
server_crypto_loc = {}
for name in server_names: # type: ignore
ca_args_server = deepcopy(ca_args)
ca_args_server.server_name = server_names[name]
ca_create_server.Create(ca_args_server).execute(ca_args_server, False)
server_crypto_loc[name] = CryptoMaterialLocation(ca_path, '{}-{}'.format(cluster_name, server_names[name]))
lb_agent_crypto_loc = CryptoMaterialLocation(ca_path, 'lb-agent')
web_admin_crypto_loc = CryptoMaterialLocation(ca_path, 'web-admin')
scheduler_crypto_loc = CryptoMaterialLocation(ca_path, 'scheduler1')
self.logger.info('[{}/{}] Certificate authority created'.format(next(next_step), total_steps))
# ################################################################################################################################
#
# 2) ODB
#
if create_odb.Create(args).execute(args, False) == self.SYS_ERROR.ODB_EXISTS:
self.logger.info('[{}/{}] ODB schema already exists'.format(next(next_step), total_steps))
else:
self.logger.info('[{}/{}] ODB schema created'.format(next(next_step), total_steps))
# ################################################################################################################################
#
# 3) ODB initial data
#
create_cluster_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
create_cluster_args.lb_host = lb_host
create_cluster_args.lb_port = lb_port
create_cluster_args.lb_agent_port = lb_agent_port
create_cluster_args.secret_key = secret_key
create_cluster.Create(create_cluster_args).execute(create_cluster_args, False) # type: ignore
self.logger.info('[{}/{}] ODB initial data created'.format(next(next_step), total_steps))
# ################################################################################################################################
#
# 4) servers
#
# This is populated below in order for the scheduler to use it.
first_server_path = ''
if create_components_other_than_scheduler:
for idx, name in enumerate(server_names): # type: ignore
server_path = os.path.join(args_path, server_names[name])
os.mkdir(server_path)
create_server_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
create_server_args.server_name = server_names[name]
create_server_args.path = server_path
create_server_args.jwt_secret = jwt_secret
create_server_args.secret_key = secret_key
create_server_args.threads = threads_per_server
create_server_args.scheduler_api_client_for_server_auth_required = scheduler_api_client_for_server_auth_required
create_server_args.scheduler_api_client_for_server_username = scheduler_api_client_for_server_username
create_server_args.scheduler_api_client_for_server_password = scheduler_api_client_for_server_password
if has_tls:
create_server_args.cert_path = server_crypto_loc[name].cert_path # type: ignore
create_server_args.pub_key_path = server_crypto_loc[name].pub_path # type: ignore
create_server_args.priv_key_path = server_crypto_loc[name].priv_path # type: ignore
create_server_args.ca_certs_path = server_crypto_loc[name].ca_certs_path # type: ignore
server_id:'int' = create_server.Create(
create_server_args).execute(create_server_args, next(next_port), False, True) # type: ignore
# We special case the first server ..
if idx == 0:
# .. make it a delivery server for sample pub/sub topics ..
self._set_pubsub_server(args, server_id, cluster_name, '/zato/demo/sample') # type: ignore
# .. make the scheduler use it.
first_server_path = server_path
self.logger.info('[{}/{}] server{} created'.format(next(next_step), total_steps, name))
# ################################################################################################################################
#
# 5) load-balancer
#
if create_components_other_than_scheduler:
lb_path = os.path.join(args_path, 'load-balancer')
os.mkdir(lb_path)
create_lb_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
create_lb_args.path = lb_path
if has_tls:
create_lb_args.cert_path = lb_agent_crypto_loc.cert_path # type: ignore
create_lb_args.pub_key_path = lb_agent_crypto_loc.pub_path # type: ignore
create_lb_args.priv_key_path = lb_agent_crypto_loc.priv_path # type: ignore
create_lb_args.ca_certs_path = lb_agent_crypto_loc.ca_certs_path # type: ignore
# Need to substract 1 because we've already called .next() twice
# when creating servers above.
servers_port = next(next_port) - 1
create_lb.Create(create_lb_args).execute(create_lb_args, True, servers_port, False)
# Under Windows, we create the directory for the load-balancer
# but we do not advertise it because we do not start it.
if is_non_windows:
self.logger.info('[{}/{}] Load-balancer created'.format(next(next_step), total_steps))
# ################################################################################################################################
#
# 6) Dashboard
#
if create_components_other_than_scheduler:
web_admin_path = os.path.join(args_path, 'web-admin')
os.mkdir(web_admin_path)
create_web_admin_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
create_web_admin_args.path = web_admin_path
create_web_admin_args.admin_invoke_password = admin_invoke_password
if has_tls:
create_web_admin_args.cert_path = web_admin_crypto_loc.cert_path # type: ignore
create_web_admin_args.pub_key_path = web_admin_crypto_loc.pub_path # type: ignore
create_web_admin_args.priv_key_path = web_admin_crypto_loc.priv_path # type: ignore
create_web_admin_args.ca_certs_path = web_admin_crypto_loc.ca_certs_path # type: ignore
web_admin_password:'bytes' = CryptoManager.generate_password() # type: ignore
admin_created = create_web_admin.Create(create_web_admin_args).execute(
create_web_admin_args, False, web_admin_password, True)
# Need to reset the logger here because executing the create_web_admin command
# loads the Dashboard's logger which doesn't like that of ours.
self.reset_logger(args, True)
self.logger.info('[{}/{}] Dashboard created'.format(next(next_step), total_steps))
else:
admin_created = False
# ################################################################################################################################
#
# 7) Scheduler
#
# Creation of a scheduler is optional
if should_create_scheduler:
scheduler_path = os.path.join(args_path, 'scheduler')
os.mkdir(scheduler_path)
session = get_session(get_engine(args)) # type: ignore
with closing(session):
cluster_id:'int' = session.query(Cluster.id).\
filter(Cluster.name==cluster_name).\
one()[0]
create_scheduler_args = self._bunch_from_args(args, admin_invoke_password, cluster_name)
create_scheduler_args.path = scheduler_path
create_scheduler_args.cluster_id = cluster_id
create_scheduler_args.server_path = first_server_path
create_scheduler_args.scheduler_api_client_for_server_auth_required = scheduler_api_client_for_server_auth_required
create_scheduler_args.scheduler_api_client_for_server_username = scheduler_api_client_for_server_username
create_scheduler_args.scheduler_api_client_for_server_password = scheduler_api_client_for_server_password
if has_tls:
create_scheduler_args.cert_path = scheduler_crypto_loc.cert_path # type: ignore
create_scheduler_args.pub_key_path = scheduler_crypto_loc.pub_path # type: ignore
create_scheduler_args.priv_key_path = scheduler_crypto_loc.priv_path # type: ignore
create_scheduler_args.ca_certs_path = scheduler_crypto_loc.ca_certs_path # type: ignore
_ = create_scheduler.Create(create_scheduler_args).execute(create_scheduler_args, False, True) # type: ignore
self.logger.info('[{}/{}] Scheduler created'.format(next(next_step), total_steps))
# ################################################################################################################################
#
# 8) Scripts
#
zato_bin = 'zato.bat' if is_windows else 'zato'
# Visual Studio integration
vscode_dir = os.path.join(args_path, '.vscode')
vscode_launch_json_path = os.path.join(vscode_dir, 'launch.json')
vscode_settings_json_path = os.path.join(vscode_dir, 'settings.json')
os.mkdir(vscode_dir)
_ = open_w(vscode_launch_json_path).write(vscode_launch_json)
_ = open_w(vscode_settings_json_path).write(vscode_settings_json)
# This will exist for Windows and other systems
zato_qs_start_path = 'zato-qs-start.bat' if is_windows else 'zato-qs-start.sh'
zato_qs_start_path = os.path.join(args_path, zato_qs_start_path)
# These commands are generated for non-Windows systems only
zato_qs_stop_path = os.path.join(args_path, 'zato-qs-stop.sh')
zato_qs_restart_path = os.path.join(args_path, 'zato-qs-restart.sh')
check_config = []
start_servers = []
stop_servers = []
if create_components_other_than_scheduler:
for name in server_names: # type: ignore
check_config.append(check_config_template.format(server_name=server_names[name]))
start_servers.append(start_servers_template.format(server_name=server_names[name], step_number=int(name)+5))
stop_servers.append(stop_servers_template.format(server_name=server_names[name], step_number=int(name)+1))
check_config = '\n'.join(check_config)
start_servers = '\n'.join(start_servers)
stop_servers = '\n'.join(stop_servers)
if scheduler_only:
start_servers = '# No servers to start'
start_lb = '# No load-balancer to start'
check_config_extra = ''
check_tcp_ports_suffix = 'scheduler-only'
cluster_starting = ''
cluster_started = ''
cluster_stopping = ''
cluster_stopped = ''
check_config_step_number = 1
scheduler_step_count = 2
start_steps = 2
stop_steps = 3
start_scheduler = zato_qs_start_scheduler.format(scheduler_step_count=scheduler_step_count)
stop_scheduler = zato_qs_stop_scheduler
else:
start_lb = zato_qs_start_lb_windows if is_windows else zato_qs_start_lb_non_windows
check_config_extra = zato_qs_check_config_extra
check_tcp_ports_suffix = ''
cluster_starting = zato_qs_cluster_started
cluster_started = zato_qs_cluster_started
cluster_stopping = zato_qs_cluster_stopping
cluster_stopped = zato_qs_cluster_stopped
check_config_step_number = 3
start_steps = 6 + servers
stop_steps = 3 + servers
scheduler_step_count = start_steps - 2
if no_scheduler:
start_steps -= 1
stop_steps -= 1
start_scheduler = '# No scheduler to start'
stop_scheduler = '# No scheduler to stop'
else:
start_scheduler = zato_qs_start_scheduler.format(scheduler_step_count=scheduler_step_count)
stop_scheduler = zato_qs_stop_scheduler
web_admin_step_count = stop_steps
if create_components_other_than_scheduler and should_create_scheduler:
web_admin_step_count -= 1
zato_qs_start_head = zato_qs_start_head_template.format(
preamble_script=preamble_script,
zato_bin=zato_bin,
script_dir=script_dir,
cluster_name=cluster_name,
start_steps=start_steps,
cluster_starting=cluster_starting,
)
zato_qs_start_body = zato_qs_start_body_template.format(
check_config=check_config,
check_config_extra=check_config_extra,
check_tcp_ports_suffix=check_tcp_ports_suffix,
start_lb=start_lb,
scheduler_step_count=scheduler_step_count,
start_servers=start_servers,
check_config_step_number=check_config_step_number,
start_scheduler=start_scheduler,
)
if scheduler_only:
start_dashboard = ''
else:
start_dashboard = zato_qs_start_dashboard
zato_qs_start_tail = zato_qs_start_tail_template.format(
start_dashboard=start_dashboard,
cluster_started=cluster_started,
)
zato_qs_start = zato_qs_start_head + zato_qs_start_body + zato_qs_start_tail
zato_qs_stop = zato_qs_stop_template.format(
zato_bin=zato_bin,
script_dir=script_dir,
cluster_name=cluster_name,
web_admin_step_count=web_admin_step_count,
stop_steps=stop_steps,
stop_servers=stop_servers,
cluster_stopping=cluster_stopping,
cluster_stopped=cluster_stopped,
stop_scheduler=stop_scheduler,
)
if is_windows:
windows_qs_start = windows_qs_start_template.format(env_dir=args_path)
_ = open_w(zato_qs_start_path).write(windows_qs_start)
else:
_ = open_w(zato_qs_start_path).write(zato_qs_start)
_ = open_w(zato_qs_stop_path).write(zato_qs_stop)
_ = open_w(zato_qs_restart_path).write(zato_qs_restart.format(script_dir=script_dir, cluster_name=cluster_name))
file_mod = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
os.chmod(zato_qs_start_path, file_mod)
os.chmod(zato_qs_stop_path, file_mod)
os.chmod(zato_qs_restart_path, file_mod)
self.logger.info('[{}/{}] Management scripts created'.format(next(next_step), total_steps))
self.logger.info('Quickstart cluster {} created'.format(cluster_name))
if admin_created:
self.logger.info('Dashboard user:[admin], password:[%s]', web_admin_password.decode('utf8')) # type: ignore
else:
self.logger.info('User [admin] already exists in the ODB')
self.logger.info('Start the cluster by issuing this command: %s', zato_qs_start_path)
self.logger.info('Visit https://zato.io/support for more information and support options')
# ################################################################################################################################
# ################################################################################################################################
| 35,882
|
Python
|
.py
| 663
| 45.758673
| 130
| 0.523629
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,446
|
enmasse.py
|
zatosource_zato/code/zato-cli/src/zato/cli/enmasse.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from collections import namedtuple
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime, timedelta
from itertools import chain
from time import sleep
from uuid import uuid4
# Zato
from zato.cli import ManageCommand
from zato.common.api import All_Sec_Def_Types, Data_Format, GENERIC as COMMON_GENERIC, Groups as Common_Groups, \
LDAP as COMMON_LDAP, NotGiven, PUBSUB as Common_PubSub, Sec_Def_Type, TLS as COMMON_TLS, Zato_No_Security, Zato_None
from zato.common.const import ServiceConst
from zato.common.typing_ import cast_
# ################################################################################################################################
# ################################################################################################################################
if 0:
from logging import Logger
from zato.client import APIClient
from zato.common.typing_ import any_, anydict, anylist, dictlist, list_, stranydict, strdict, strdictnone, strdictdict, \
strstrdict, strlist, strlistdict, strnone
APIClient = APIClient
Logger = Logger
strdictdict = strdictdict
strlistdict = strlistdict
# ################################################################################################################################
# ################################################################################################################################
zato_enmasse_env1 = 'ZatoEnmasseEnv.'
zato_enmasse_env2 = 'Zato_Enmasse_Env.'
zato_enmasse_env_value_prefix = 'Zato_Enmasse_Env_'
DEFAULT_COLS_WIDTH = '15,100'
# ################################################################################################################################
class _NoValue:
pass
_no_value1 = _NoValue()
_no_value2 = _NoValue()
# ################################################################################################################################
Code = namedtuple('Code', ('symbol', 'desc')) # type: ignore
WARNING_ALREADY_EXISTS_IN_ODB = Code('W01', 'already exists in ODB')
WARNING_MISSING_DEF = Code('W02', 'definition missing')
WARNING_MISSING_DEF_INCL_ODB = Code('W04', 'missing def incl. ODB')
ERROR_ITEM_INCLUDED_MULTIPLE_TIMES = Code('E01', 'item included multiple')
ERROR_INCLUDE_COULD_NOT_BE_PARSED = Code('E03', 'include parsing error')
ERROR_INVALID_INPUT = Code('E05', 'invalid input')
ERROR_UNKNOWN_ELEM = Code('E06', 'unrecognized import element')
ERROR_KEYS_MISSING = Code('E08', 'missing keys')
ERROR_INVALID_SEC_DEF_TYPE = Code('E09', 'invalid sec def type')
ERROR_INVALID_KEY = Code('E10', 'invalid key')
ERROR_SERVICE_NAME_MISSING = Code('E11', 'service name missing')
ERROR_SERVICE_MISSING = Code('E12', 'service missing')
ERROR_MISSING_DEP = Code('E13', 'dependency missing')
ERROR_COULD_NOT_IMPORT_OBJECT = Code('E13', 'could not import object')
ERROR_TYPE_MISSING = Code('E04', 'type missing')
Error_Include_Not_Found = Code('E14', 'include not found')
# ################################################################################################################################
outconn_wsx = COMMON_GENERIC.CONNECTION.TYPE.OUTCONN_WSX
outconn_ldap = COMMON_GENERIC.CONNECTION.TYPE.OUTCONN_LDAP
_prefix_generic = 'zato_generic_connection'
_attr_outconn_wsx = f'{_prefix_generic}_{outconn_wsx}'
_attr_outconn_ldap = f'{_prefix_generic}_{outconn_ldap}'
# We need to have our own version because type "bearer_token" exists in enmasse only.
_All_Sec_Def_Types = All_Sec_Def_Types + ['bearer_token']
_pubsub_default = Common_PubSub.DEFAULT
zato_name_prefix = (
'admin.',
'admin.invoke',
'ide_publisher',
'pub.zato',
'pubsub.demo',
'pubsub.test',
'zato.',
'/zato',
'zato.pubsub',
)
# ################################################################################################################################
def has_name_zato_prefix(name:'str') -> 'bool':
name = name or ''
for prefix in zato_name_prefix:
if name.startswith(prefix):
return True
return False
# ################################################################################################################################
class ModuleCtx:
class Include_Type:
All = 'all'
Cache = 'cache'
LDAP = 'ldap'
Microsoft_365 = 'cloud-microsoft-365'
SQL = 'sql'
PubSub = 'pubsub'
REST = 'rest'
Scheduler = 'scheduler'
Security = 'security'
WebSockets = 'websockets'
# An indicator that this is an include directive
Item_Type_Include = 'include'
# Maps enmasse definition types to include types
Enmasse_Type = cast_('strdict', None)
# Maps enmasse defintions types to their attributes that are to be included during an export
Enmasse_Attr_List_Include = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that are to be excluded during an export
Enmasse_Attr_List_Exclude = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that need to be renamed during an export
Enmasse_Attr_List_Rename = cast_('strdictdict', None)
# Maps enmasse values that need to be renamed during an export
Enmasse_Attr_List_Value_Rename = cast_('strdictdict', None)
# Maps enmasse values that need to be renamed during an import
Enmasse_Attr_List_Value_Rename_Reverse = cast_('strdictdict', None)
# Maps enmasse defintions types to their attributes that need to be converted to a list during an export
Enmasse_Attr_List_As_List = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that will be always skipped during an export
Enmasse_Attr_List_Skip_Always = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that will be skipped during an export if they are empty
Enmasse_Attr_List_Skip_If_Empty = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that will be skipped during an export if they are True
Enmasse_Attr_List_Skip_If_True = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that will be skipped during an export if they are False
Enmasse_Attr_List_Skip_If_False = cast_('strlistdict', None)
# Maps enmasse defintions types to their attributes that will be skipped during an export if their value matches configuration
Enmasse_Attr_List_Skip_If_Value_Matches = cast_('strdictdict', None)
# Maps enmasse defintions types to their attributes that will be skipped during an export if other values matche configuration
Enmasse_Attr_List_Skip_If_Other_Value_Matches = cast_('strdict', None)
# Maps enmasse defintions types to their attributes that will be turned into multiline strings
Enmasse_Attr_List_As_Multiline = cast_('strlistdict', None)
# Maps enmasse types to default values of their attributes
Enmasse_Attr_List_Default_By_Type = cast_('strdictdict', None)
# Maps pre-3.2 item types to the 3.2+ ones
Enmasse_Item_Type_Name_Map = cast_('strdict', None)
# As above, in the reverse direction
Enmasse_Item_Type_Name_Map_Reverse = cast_('strdict', None)
# As above, in the reverse direction, but only for specific types
Enmasse_Item_Type_Name_Map_Reverse_By_Type = cast_('strdict', None)
# How to sort attributes of a given object during an export
Enmasse_Attr_List_Sort_Order = cast_('strlistdict', None)
# How many seconds to wait for servers to start up
Initial_Wait_Time = 60 * 60 * 12 # In seconds = 12 hours
# How many seconds to wait for missing objects
Missing_Wait_Time = 120
# Extra security types that exist only in enmasse, such as bearer_token in lieu of oauth
Extra_Security_Types = ['bearer_token']
# ################################################################################################################################
zato_generic_connection_microsoft_365 = 'zato_generic_connection_' + ModuleCtx.Include_Type.Microsoft_365
_enmasse_type_generic = (
ModuleCtx.Include_Type.LDAP,
ModuleCtx.Include_Type.Microsoft_365,
ModuleCtx.Include_Type.WebSockets,
)
ModuleCtx.Enmasse_Type = {
# REST connections
'channel_plain_http': ModuleCtx.Include_Type.REST,
'outconn_plain_http': ModuleCtx.Include_Type.REST,
'zato_cache_builtin': ModuleCtx.Include_Type.Cache,
'zato_generic_rest_wrapper': ModuleCtx.Include_Type.REST,
'zato_generic_connection': _enmasse_type_generic,
# Security definitions
'def_sec': ModuleCtx.Include_Type.Security,
# Security definitions
'security_groups': ModuleCtx.Include_Type.Security,
# SQL Connections
'outconn_sql':ModuleCtx.Include_Type.SQL,
# Scheduler
'scheduler':ModuleCtx.Include_Type.Scheduler,
# Pub/sub
'pubsub_topic':ModuleCtx.Include_Type.PubSub,
'pubsub_endpoint':ModuleCtx.Include_Type.PubSub,
'pubsub_sub':ModuleCtx.Include_Type.PubSub,
'pubsub_subscription':ModuleCtx.Include_Type.PubSub,
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Include = {
# Security definitions
'def_sec': [
'type',
'name',
'username',
'realm',
'auth_server_url',
'client_id_field',
'client_secret_field',
'grant_type',
'scopes',
'extra_fields',
],
# REST connections - Channels
'channel_plain_http': [
'name',
'service',
'url_path',
'security_name',
'security_groups',
'is_active',
'data_format',
'connection',
'transport',
],
# REST connections - outgoing connections
'outconn_plain_http': [
'name',
'host',
'url_path',
'security_name',
'is_active',
'data_format',
'connection',
'transport',
],
# Scheduled tasks
'scheduler': [
'name',
'service',
'job_type',
'start_date',
'weeks',
'days',
'hours',
'minutes',
'seconds',
'cron_definition',
'repeats',
'extra',
],
# LDAP outgoing connections
_attr_outconn_ldap: [
'type_',
'name',
'username',
'auth_type',
'server_list',
],
# Outgoing WSX connections
_attr_outconn_wsx: [
'name',
'address',
'data_format',
'has_auto_reconnect',
'on_connect_service_name',
'on_message_service_name',
'on_close_service_name',
'subscription_list',
],
# Pub/sub - Endpoints
'pubsub_endpoint': [
'name',
'endpoint_type',
'service_name',
'topic_patterns',
'sec_name',
],
# Pub/sub - Topics
'pubsub_topic': [
'name',
'has_gd',
'hook_service_name',
],
# Generic connections - Cloud Microsoft 365
zato_generic_connection_microsoft_365: [
'client_id',
'name',
'scopes',
'tenant_id',
'type_',
]
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Exclude = {
# Cache definitions
'zato_cache_builtin': [
'cache_id',
'current_size',
'opaque1',
],
# REST connections - Channels
'channel_plain_http': [
'connection',
'service_name',
'transport',
],
# REST connections - outgoing connections
'outconn_plain_http': [
'connection',
'transport',
],
}
# ################################################################################################################################
#
# This is used during export
#
ModuleCtx.Enmasse_Attr_List_Rename = {
# Security definitions
'def_sec': {
'auth_server_url':'auth_endpoint'
},
# Pub/sub endpoints
'pubsub_endpoint': {
'sec_name':'security_name'
},
}
# ################################################################################################################################
#
# This is used during export
#
ModuleCtx.Enmasse_Attr_List_Value_Rename = {
# Security definitions
'def_sec': {
'type': [{'oauth':'bearer_token'}]
},
# Pub/sub endpoints
'pubsub_endpoint': {
'endpoint_type': [{'srv':'service'}]
},
# Pub/sub subscriptions
'pubsub_subscription': {
'endpoint_type': [{'srv':'service'}]
},
}
# ################################################################################################################################
#
# This is used during import
#
ModuleCtx.Enmasse_Attr_List_Value_Rename_Reverse = {
# Pub/sub endpoints
'pubsub_endpoint': {
'endpoint_type': [{'service':'srv'}]
},
# Pub/sub subscriptions
'pubsub_subscription': {
'endpoint_type': [{'service':'srv'}]
},
# Security - OAuth & Bearer Token
'security': {
'type': [{'bearer_token':'oauth'}]
},
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_As_List = {
# Security definitions
'def_sec': ['scopes', 'extra_fields'],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_Always = {
# Security groups
'security_groups': ['description', 'group_id', 'member_count', 'type'],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_If_Empty = {
# Security definitions
'scheduler': ['weeks', 'days', 'hours', 'minutes', 'seconds', 'cron_definition', 'repeats', 'extra'],
# REST channels
'channel_plain_http': ['data_format'],
# Outgoing WSX connections
_attr_outconn_wsx: ['data_format', 'subscription_list'],
# Pub/sub - Topics
'pubsub_topic': [
'hook_service_name',
],
# Pub/sub - Subscriptions
'pubsub_subscription': [
'rest_connection',
'service',
'service_name',
],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_If_True = {
# Outgoing WSX connections
_attr_outconn_wsx: ['has_auto_reconnect'],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_If_False = {
# Pub/sub - Topics
'pubsub_topic': [
'has_gd',
],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_If_Value_Matches = {
# E-Mail IMAP
'email_imap': {'get_criteria':'UNSEEN', 'timeout':10},
# Pub/sub - Endpoints
'pubsub_endpoint': {'security_name':Zato_No_Security},
# Pub/sub - Subscriptions
'pubsub_subscription': {'delivery_server':'server1'},
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Skip_If_Other_Value_Matches = {
# Security definitions
'def_sec': [
{'criteria':[{'type':'apikey'}], 'attrs':['username']},
],
# Pub/sub subscriptions
'pubsub_subscription': [
{'criteria':[{'delivery_method':'pull'}], 'attrs':['rest_method', 'rest_connection']},
{'criteria':[{'endpoint_type':'service'}], 'attrs':['rest_method', 'rest_connection']},
],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_As_Multiline = {
# Security definitions
'scheduler': ['extra'],
'pubsub_endpoint': ['topic_patterns'],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Default_By_Type = {
'pubsub_endpoint': {
'is_internal': False,
'role': Common_PubSub.ROLE.PUBLISHER_SUBSCRIBER.id,
# Note that it is not security_name because it has been already re-mapped to sec_name
# by the time this check is taking place.
'sec_name': Zato_No_Security,
},
'pubsub_topic': {
'has_gd': False,
'is_api_sub_allowed': True,
'max_depth_gd': _pubsub_default.TOPIC_MAX_DEPTH_GD,
'max_depth_non_gd': _pubsub_default.TOPIC_MAX_DEPTH_NON_GD,
'depth_check_freq': _pubsub_default.DEPTH_CHECK_FREQ,
'pub_buffer_size_gd': _pubsub_default.PUB_BUFFER_SIZE_GD,
'task_sync_interval': _pubsub_default.TASK_SYNC_INTERVAL,
'task_delivery_interval': _pubsub_default.TASK_DELIVERY_INTERVAL,
},
'pubsub_subscription': {
'should_ignore_if_sub_exists': True,
'should_delete_all': True,
},
'channel_rest': {
'security_name': Zato_No_Security,
'merge_url_params_req': True,
},
'outgoing_rest': {
'security_name': Zato_No_Security,
'merge_url_params_req': True,
},
# Generic connections - Cloud Microsoft 365
zato_generic_connection_microsoft_365: {
'is_channel': False,
'is_internal': False,
'is_outconn': True,
'pool_size': 20,
'sec_use_rbac': False,
}
}
# ################################################################################################################################
ModuleCtx.Enmasse_Item_Type_Name_Map = {
'def_sec': 'security',
'channel_plain_http': 'channel_rest',
'outconn_plain_http': 'outgoing_rest',
'zato_generic_connection_outconn-ldap': 'outgoing_ldap',
'zato_generic_connection_outconn-wsx': 'outgoing_wsx',
}
# ################################################################################################################################
ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse = {}
for key, value in ModuleCtx.Enmasse_Item_Type_Name_Map.items():
ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse[value] = key
# ################################################################################################################################
ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse_By_Type = {
'pubsub_endpoint': [{'security_name':'sec_name'}],
'pubsub_subscription': [{'topic_list':'topic_list_json'}],
}
# ################################################################################################################################
ModuleCtx.Enmasse_Attr_List_Sort_Order = {
# REST connections - Channels
'channel_plain_http': [
'name',
'service',
'url_path',
'security_name',
],
# REST connections - outgoing connections
'outconn_plain_http': [
'name',
'host',
'url_path',
'security_name',
'is_active',
'data_format',
],
# Security definitions
'def_sec': [
'name',
'username',
'password',
'type',
'realm',
'auth_endpoint',
'client_id_field',
'client_secret_field',
'grant_type',
'scopes',
'extra_fields',
],
# Security groups
'security_groups': [
'name',
'members',
],
# Scheduled tasks
'scheduler': [
'name',
'service',
'job_type',
'start_date',
'weeks',
'days',
'hours',
'minutes',
'seconds',
'cron_definition',
'repeats',
'extra',
],
# Outgoing WSX connections
f'zato_generic_connection_{outconn_wsx}': [
'name',
'address',
'data_format',
'has_auto_reconnect',
'on_connect_service_name',
'on_message_service_name',
'on_close_service_name',
'subscription_list',
],
# Pub/sub - Endpoints
'pubsub_endpoint': [
'name',
'endpoint_type',
'security_name',
'service_name',
'topic_patterns',
],
# Pub/sub - Topics
'pubsub_topic': [
'name',
'has_gd',
'hook_service_name',
],
# Pub/sub - Subscription
'pubsub_subscription': [
'name',
'endpoint_name',
'endpoint_type',
'delivery_method',
'rest_connection',
'rest_method',
'delivery_server',
'topic_list',
],
# Generic connections - Cloud Microsoft 365
zato_generic_connection_microsoft_365: [
'name',
'type_',
'tenant_id',
'client_id',
'scopes',
]
}
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class EnvKeyData:
def_type: 'str'
name: 'str'
attr_name: 'str'
attr_value: 'str'
# ################################################################################################################################
# ################################################################################################################################
def _replace_item_type(is_import:'bool', item_type:'str') -> 'str':
# Certain item types need to be replaced because they exist only in enmasse
if is_import:
if item_type == 'bearer_token':
return 'oauth'
else:
return item_type
else:
if item_type == 'oauth':
return 'bearer_token'
else:
return item_type
# ################################################################################################################################
def find_first(it, pred): # type: ignore
""" Given any iterable, return the first element `elem` from it matching `pred(elem)`.
"""
for obj in it: # type: ignore
if pred(obj):
return obj # type: ignore
# ################################################################################################################################
def dict_match(item_type, item, fields): # type: ignore
""" Returns True if input item has all the fields matching.
"""
for key, value in fields.items(): # type: ignore
item_value = item.get(key) # type: ignore
if item_value != value:
return False
# If we are here, it means that we had a match
return True
# ################################################################################################################################
#: List of zato services we explicitly don't support.
IGNORE_PREFIXES = {
'zato.kvdb.data-dict.dictionary',
'zato.kvdb.data-dict.translation',
}
# ################################################################################################################################
def try_keys(item:'strdict', keys:'strlist') -> 'any_':
for key in keys:
if value := item.get(key, NotGiven):
if value is not NotGiven:
return value
else:
raise Exception(f'Keys were not found -> {keys} -> {item}')
# ################################################################################################################################
def populate_services_from_apispec(client, logger): # type: ignore
""" Request a list of services from the APISpec service, and merge the results into SERVICES_BY_PREFIX,
creating new ServiceInfo instances to represent previously unknown services as appropriate.
"""
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
response:'any_' = client.invoke('zato.apispec.get-api-spec', {
'return_internal': True,
'include': '*',
'needs_sphinx': False
})
if not response.ok:
logger.error('Could not fetch service list -> %s', response.inner.text)
return
by_prefix = {} # { "zato.apispec": {"get-api-spec": { .. } } }
for service in response.data: # type: ignore
prefix, _, name = service['name'].rpartition('.') # type: ignore
methods = by_prefix.setdefault(prefix, {}) # type: ignore
methods[name] = service
# Services belonging here may not have all the CRUD methods and it is expected that they do not
allow_incomplete_methods = [
'zato.outgoing.redis',
'zato.pubsub.subscription',
'zato.security',
'zato.security.rbac.client-role'
]
for prefix, methods in iteritems(by_prefix): # type: ignore
# Ignore prefixes lacking 'get-list', 'create' and 'edit' methods.
if not all(n in methods for n in ('get-list', 'create', 'edit')):
# RBAC client roles cannot be edited so it is fine that they lack the 'edit' method.
if prefix not in allow_incomplete_methods:
continue
if prefix in IGNORE_PREFIXES:
continue
service_info = SERVICE_BY_PREFIX.get(prefix)
if service_info is None:
service_info = ServiceInfo(prefix=prefix, name=make_service_name(prefix))
SERVICE_BY_PREFIX[prefix] = service_info
SERVICE_BY_NAME[service_info.name] = service_info
SERVICES.append(service_info)
service_info.methods = methods
# The common prefix for a set of services is tested against the first element in this list using startswith().
# If it matches, that prefix is replaced by the second element. The prefixes must match exactly if the first element
# does not end in a period.
SHORTNAME_BY_PREFIX:'anylist' = [
('zato.pubsub.', 'pubsub'),
('zato.definition.', 'def'),
('zato.email.', 'email'),
('zato.message.namespace', 'def_namespace'),
('zato.cloud.aws.s3', 'cloud_aws_s3'),
('zato.message.json-pointer', 'json_pointer'),
('zato.notif.', 'notif'),
('zato.outgoing.', 'outconn'),
('zato.scheduler.job', 'scheduler'),
('zato.search.', 'search'),
('zato.security.tls.channel', 'tls_channel_sec'),
('zato.security.', ''),
('zato.channel.', ''),
]
# ################################################################################################################################
def make_service_name(prefix): # type: ignore
# This can be done quickly
if 'groups' in prefix:
return 'security_groups'
# stdlib
import re
escaped = re.sub('[.-]', '_', prefix)
for module_prefix, name_prefix in SHORTNAME_BY_PREFIX:
if prefix.startswith(module_prefix) and module_prefix.endswith('.'):
name = escaped[len(module_prefix):]
if name_prefix:
name = '{}_{}'.format(name_prefix, name)
return name
elif prefix == module_prefix:
return name_prefix
return escaped
# ################################################################################################################################
def normalize_service_name(item): # type: ignore
""" Given an item originating from the API or from an import file, if the item contains either the 'service'
or 'service_name' keys, ensure the other key is set. Either the dict contains neither key, or both keys set
to the same value."""
if 'service' in item or 'service_name' in item:
item.setdefault('service', item.get('service_name'))
item.setdefault('service_name', item.get('service'))
# ################################################################################################################################
def test_item(item, condition): # type: ignore
""" Given a dictionary `cond` containing some conditions to test an item for, return True if those conditions match.
Currently only supports testing whether a field has a particular value. Returns ``True`` if `cond` is ``None``."""
if condition is not None:
condition = condition if isinstance(condition, list) else [condition] # type: ignore
for condition_config in condition: # type: ignore
only_if_field:'any_' = condition_config.get('only_if_field')
only_if_value:'any_' = condition_config.get('only_if_value')
if not isinstance(only_if_value, (list, tuple)):
only_if_value = [only_if_value]
if only_if_field and item.get(only_if_field) not in only_if_value:
return False
return True
# ################################################################################################################################
# Note that the order of items in this list matters
_security_fields = ['security', 'sec_def', 'security_name']
# All keys under which a security name can be found
_security_keys = ['security_name', 'sec_name', 'sec_def']
# ################################################################################################################################
def resolve_security_field_name(item:'strdict') -> 'str':
default = 'security'
for name in _security_fields:
if name in item:
return name
else:
return default
# ################################################################################################################################
# ################################################################################################################################
class ServiceInfo:
def __init__(self, prefix=None, name=None, object_dependencies=None, service_dependencies=None, export_filter=None): # type: ignore
assert name or prefix
# Short service name as appears in export data.
self.name = cast_('str', name or prefix)
# Optional name of the object enumeration/retrieval service.
self.prefix = prefix
# Overwritten by populate_services_from_apispec().
self.methods = {}
# Specifies a list of object dependencies:
# field_name: {"dependent_type": "shortname", "dependent_field":
# "fieldname", "empty_value": None, or e.g. Zato_No_Security}
self.object_dependencies = object_dependencies or {}
# Specifies a list of service dependencies. The field's value contains
# the name of a service that must exist.
# field_name: {"only_if_field": "field_name" or None, "only_if_value": "value" or None}
self.service_dependencies = service_dependencies or {}
# List of field/value specifications that should be ignored during export:
# field_name: value
self.export_filter = export_filter or {}
# ################################################################################################################################
@property
def is_security(self): # type: ignore
""" If True, indicates the service is source of authentication credentials for use in another service.
"""
return self.prefix and self.prefix.startswith('zato.security.') # type: ignore
# ################################################################################################################################
def get_service_name(self, method): # type: ignore
return self.methods.get(method, {}).get('name') # type: ignore
# ################################################################################################################################
def get_required_keys(self): # type: ignore
""" Return the set of keys required to create a new instance.
"""
method_sig:'any_' = self.methods.get('create')
if method_sig is None:
return set() # type: ignore
input_required:'any_' = method_sig.get('input_required', [])
required = {elem['name'] for elem in input_required}
required.discard('cluster_id')
return required
# ################################################################################################################################
def __repr__(self):
return '<ServiceInfo for {}>'.format(self.prefix)
# ServiceInfo templates for services that have additional semantics not yet described by apispec.
# To be replaced by introspection later.
SERVICES = [
ServiceInfo(
name='channel_amqp',
prefix='zato.channel.amqp',
object_dependencies={
'def_name': {
'dependent_type': 'def_amqp',
'dependent_field': 'name',
},
},
service_dependencies={
'service_name': {}
},
),
ServiceInfo(
name='web_socket',
prefix='zato.channel.web-socket',
object_dependencies={
'sec_def': {
'dependent_type': 'def_sec',
'dependent_field': 'name',
'empty_value': Zato_No_Security,
'id_field': 'security_id',
},
},
service_dependencies={
'service': {}
},
),
ServiceInfo(
name='pubsub_endpoint',
prefix='zato.pubsub.endpoint',
object_dependencies={
'ws_channel_name': {
'dependent_type': 'web_socket',
'dependent_field': 'name',
'condition': {
'only_if_field': 'endpoint_type',
'only_if_value': 'wsx',
},
'id_field': 'ws_channel_id',
},
'sec_name': {
'dependent_type': 'basic_auth',
'dependent_field': 'name',
'empty_value': Zato_No_Security,
'condition': {
'only_if_field': 'endpoint_type',
'only_if_value': ['soap', 'rest'],
},
'id_field': 'security_id',
}
},
),
ServiceInfo(
name='pubsub_subscription',
prefix='zato.pubsub.subscription',
object_dependencies={
'endpoint_name': {
'dependent_type': 'pubsub_endpoint',
'dependent_field': 'name',
},
'rest_connection': {
'dependent_type': 'http_soap',
'dependent_field': 'name',
'condition': [
{
'only_if_field': 'endpoint_type',
'only_if_value': ['soap', 'rest'],
},
{
'only_if_field': 'delivery_method',
'only_if_value': ['push'],
}
],
},
},
),
ServiceInfo(
name='channel_jms_wmq',
prefix='zato.channel.jms-wmq',
object_dependencies={
'def_name': {
'dependent_type': 'def_jms_wmq',
'dependent_field': 'name',
},
},
service_dependencies={
'service_name': {}
},
),
ServiceInfo(
name='channel_zmq',
prefix='zato.channel.zmq',
service_dependencies={
'service_name': {}
},
),
ServiceInfo(
name='def_sec',
prefix='zato.security',
),
ServiceInfo(
name='http_soap',
prefix='zato.http-soap',
# Covers outconn_plain_http, outconn_soap, channel_plain_http, channel_soap
object_dependencies={
'security': {
'dependent_type': 'def_sec',
'dependent_field': 'name',
'empty_value': Zato_No_Security,
'id_field': 'security_id',
},
'sec_def': {
'dependent_type': 'def_sec',
'dependent_field': 'name',
'empty_value': Zato_No_Security,
'id_field': 'security_id',
},
},
service_dependencies={
'service_name': {
'id_field': 'service_id',
'condition': {
'only_if_field': 'connection',
'only_if_value': 'channel',
},
}
},
export_filter={
'is_internal': True,
}
),
ServiceInfo(
name='scheduler',
service_dependencies={
'service_name': {
'id_field': 'service_id',
}
},
),
ServiceInfo(
name='notif_sql',
prefix='zato.notif.sql',
object_dependencies={
'def_name': {
'dependent_type': 'outconn_sql',
'dependent_field': 'name',
},
},
),
ServiceInfo(
name='outconn_amqp',
prefix='zato.outgoing.amqp',
object_dependencies={
'def_name': {
'dependent_type': 'def_amqp',
'dependent_field': 'name',
},
},
),
ServiceInfo(
name='outconn_jms_wmq',
prefix='zato.outgoing.jms-wmq',
object_dependencies={
'def_name': {
'dependent_type': 'def_jms_wmq',
'dependent_field': 'name',
},
},
),
ServiceInfo(
name='outconn_redis',
prefix='zato.outgoing.redis',
),
ServiceInfo(
name='query_cassandra',
prefix='zato.query.cassandra',
object_dependencies={
'def_name': {
'dependent_type': 'def_cassandra',
'dependent_field': 'name',
'empty_value': Zato_No_Security,
},
},
),
]
SERVICE_BY_NAME = {info.name: info for info in SERVICES}
SERVICE_BY_PREFIX = {info.prefix: info for info in SERVICES}
HTTP_SOAP_KINDS = (
# item_type connection transport
('channel_soap', 'channel', 'soap'),
('channel_plain_http', 'channel', 'plain_http'),
('outconn_soap', 'outgoing', 'soap'),
('outconn_plain_http', 'outgoing', 'plain_http')
)
HTTP_SOAP_ITEM_TYPES = {elem[0] for elem in HTTP_SOAP_KINDS}
# ################################################################################################################################
# ################################################################################################################################
class _DummyLink: # type: ignore
""" Pip requires URLs to have a .url attribute.
"""
def __init__(self, url): # type: ignore
self.url = url
# ################################################################################################################################
# ################################################################################################################################
class Notice:
def __init__(self, value_raw, value, code): # type: ignore
self.value_raw = value_raw
self.value = value
self.code = code
def __repr__(self):
return "<{} at {} value_raw:'{}' value:'{}' code:'{}'>".format(
self.__class__.__name__, hex(id(self)), self.value_raw,
self.value, self.code)
# ################################################################################################################################
# ################################################################################################################################
class Results:
def __init__(self, warnings=None, errors=None, service=None): # type: ignore
# List of Warning instances
self.warnings = warnings or []
# List of Error instances
self.errors = errors or []
self.service_name = service.get_name() if service else None
def add_error(self, raw, code, msg, *args): # type: ignore
if args:
msg:'any_' = msg.format(*args)
_:'any_' = self.errors.append(Notice(raw, msg, code))
# ################################################################################################################################
def add_warning(self, raw, code, msg, *args): # type: ignore
if args:
msg:'any_' = msg.format(*args)
_:'any_'= self.warnings.append(Notice(raw, msg, code))
# ################################################################################################################################
@property
def ok(self):
return not (self.warnings or self.errors)
# ################################################################################################################################
# ################################################################################################################################
class InputValidator:
def __init__(self, json): # type: ignore
#: Validation result.
self.results = Results()
#: Input JSON to validate.
self.json = json
# ################################################################################################################################
def validate(self):
# type: () -> Results
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
for item_type, items in iteritems(self.json): # type: ignore
for item in items: # type: ignore
self.validate_one(item_type, item)
return self.results
# ################################################################################################################################
def validate_one(self, item_type:'str', item:'strdict') -> 'None':
if item_type not in SERVICE_BY_NAME:
raw = (item_type, sorted(SERVICE_BY_NAME))
self.results.add_error(raw, ERROR_INVALID_KEY, "Invalid key '{}', must be one of '{}'", item_type, sorted(SERVICE_BY_NAME))
return
item_dict = dict(item)
service_info = SERVICE_BY_NAME[item_type]
required_keys:'any_' = service_info.get_required_keys()
# OK, the keys are there, but do they all have non-None values?
for req_key in required_keys:
if item.get(req_key) is None: # 0 or '' can be correct values
raw = (req_key, required_keys, item_dict, item_type)
self.results.add_error(raw, ERROR_KEYS_MISSING, "Key '{}' must exist in {}: {}", req_key, item_type, item_dict)
# ################################################################################################################################
# ################################################################################################################################
class DependencyScanner:
def __init__(self, json:'strdict', is_import:'bool', is_export:'bool', ignore_missing:'bool'=False) -> 'None':
self.json = json
self.is_import = is_import
self.is_export = is_export
self.ignore_missing = ignore_missing
self.missing = {}
# ################################################################################################################################
def find_sec(self, fields:'strdict') -> 'strdictnone':
for service in SERVICES:
if service.is_security:
service_name = service.name # _replace_item_type(False, service.name)
item = self.find(service_name, fields)
if item is not None:
return item
# ################################################################################################################################
def find(self, item_type:'str', fields:'strdict') -> 'strdictnone':
if item_type in ['def_sec']:
return self.find_sec(fields)
items = self.json.get(item_type, ())
for item in items:
if dict_match(item_type, item, fields):
return item
# ################################################################################################################################
def scan_item(self, item_type:'str', item:'Bunch', results:'Results') -> 'None':
""" Scan the data of a single item for required dependencies, recording any that are missing in self.missing.
"""
missing_item:'any_'
#
# Preprocess item type
#
if item_type == 'bearer_token':
item_type = 'oauth'
service_info = SERVICE_BY_NAME[item_type] # type: ServiceInfo
for dep_key, dep_info in service_info.object_dependencies.items(): # type: ignore
if not test_item(item, dep_info.get('condition')):
continue
if item.get('security_id') == 'ZATO_SEC_USE_RBAC':
continue
# Special-case HTTP connections
if item_type in ('http_soap', 'web_socket'): # type: ignore
dep_key = resolve_security_field_name(item)
if dep_key not in item:
results.add_error(
(dep_key, dep_info), ERROR_MISSING_DEP, '{} lacks required `{}` field: {}', item_type, dep_key, item)
value:'any_' = item.get(dep_key)
if value != dep_info.get('empty_value'):
dep_type:'any_' = dep_info['dependent_type']
dep_field:'any_' = dep_info['dependent_field']
dep = self.find(dep_type, {dep_field: value})
if dep is None:
key = (dep_type, value)
name:'str' = item.get('name') or ''
# Do not report internal objects that have not been exported
if has_name_zato_prefix(name):
continue
# Same here
if has_name_zato_prefix(value):
continue
if name:
missing_item = name
else:
missing_item = [item_type, key]
missing_items:'any_' = self.missing.setdefault(key, [])
missing_items.append(missing_item)
# ################################################################################################################################
def scan(self) -> 'Results':
results = Results()
for item_type, items in self.json.items():
#
# Preprocess item type
#
item_type = _replace_item_type(True, item_type)
for item in items:
self.scan_item(item_type, item, results)
if not self.ignore_missing:
for (missing_type, missing_name), dep_names in sorted(self.missing.items()): # type: ignore
existing = sorted(item.name for item in self.json.get(missing_type, []))
raw:'any_' = (missing_type, missing_name, dep_names, existing)
results.add_warning(
raw, WARNING_MISSING_DEF, "'{}' is needed by '{}' but was not among '{}'",
missing_name, sorted(dep_names), existing)
return results
# ################################################################################################################################
# ################################################################################################################################
class ObjectImporter:
def __init__(
self,
client, # type: APIClient
logger, # type: Logger
object_mgr, # type: ObjectManager
json, # type: strdict
is_import, # type: bool
is_export, # type: bool
ignore_missing, # type: bool
args # type: any_
) -> 'None':
# Bunch
from bunch import bunchify
# Zato client.
self.client:'any_' = client
self.logger = logger
# Validation result.
self.results = Results()
# ObjectManager instance.
self.object_mgr = object_mgr
# JSON to import.
self.json = bunchify(json)
# Command-line arguments
self.args = args
self.is_import = is_import
self.is_export = is_export
self.ignore_missing = ignore_missing
# ################################################################################################################################
def validate_service_required(self, item_type, item): # type: ignore
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
service_info = SERVICE_BY_NAME[item_type]
item_dict:'any_' = dict(item)
for dep_field, dep_info in iteritems(service_info.service_dependencies): # type: ignore
if not test_item(item, dep_info.get('condition')):
continue
service_name:'any_' = item.get(dep_field)
raw:'any_' = (service_name, item_dict, item_type)
if not service_name:
self.results.add_error(raw, ERROR_SERVICE_NAME_MISSING,
'No {} service key defined type {}: {}', dep_field, item_type, item_dict)
elif service_name not in self.object_mgr.services:
self.results.add_error(raw, ERROR_SERVICE_MISSING,
'Service `{}` from `{}` missing in ODB ({})', service_name, item_dict, item_type)
# ################################################################################################################################
def validate_import_data(self):
results = Results()
dep_scanner = DependencyScanner(
self.json,
self.is_import,
self.is_export,
ignore_missing=self.ignore_missing
)
scan_results = dep_scanner.scan()
if not scan_results.ok:
return scan_results
for warning in scan_results.warnings: # type: ignore
missing_type, missing_name, dep_names, existing = warning.value_raw # type: ignore
if not self.object_mgr.find(missing_type, {'name': missing_name}):
raw:'any_' = (missing_type, missing_name)
results.add_warning(raw, WARNING_MISSING_DEF_INCL_ODB, "Definition '{}' not found in JSON/ODB ({}), needed by '{}'",
missing_name, missing_type, dep_names)
for item_type, items in self.json.items(): # type: ignore
#
# Preprocess item type
#
item_type = _replace_item_type(True, item_type)
for item in items: # type: ignore
self.validate_service_required(item_type, item)
return results
# ################################################################################################################################
def remove_from_import_list(self, item_type, name): # type: ignore
#
# Preprocess item type
#
list_:'any_' = self.json.get(item_type, [])
item = find_first(list_, lambda item: item.name == name) # type: ignore
if item:
_:'any_' = list_.remove(item)
else:
raise KeyError('Tried to remove missing %r named %r' % (item_type, name))
# ################################################################################################################################
def should_skip_item(self, item_type, attrs, is_edit): # type: ignore
# Plain HTTP channels cannot create JSON-RPC ones
if item_type == 'http_soap' and attrs.name.startswith('json.rpc.channel'):
return True
# Root RBAC role cannot be edited
elif item_type == 'rbac_role' and attrs.name == 'Root':
return True
# RBAC client roles cannot be edited
elif item_type == 'rbac_client_role' and is_edit:
return True
# ################################################################################################################################
def _set_generic_connection_secret(self, name, type_, secret): # type: ignore
response:'any_' = self.client.invoke('zato.generic.connection.change-password', {
'name': name,
'type_': type_,
'password1': secret,
'password2': secret
})
if not response.ok:
raise Exception('Unexpected response; e:{}'.format(response))
else:
self.logger.info('Set password for generic connection `%s` (%s)', name, type_)
# ################################################################################################################################
def _needs_change_password(self, item_type, attrs, is_edit): # type: ignore
# By default, assume that we do need to change a given password.
out = True
if is_edit and item_type == 'rbac_role_permission':
out = False
if item_type == 'zato_generic_connection' and attrs.get('type_') == COMMON_GENERIC.CONNECTION.TYPE.OUTCONN_WSX:
out = False
if item_type == 'pubsub_subscription':
out = False
return out
# ################################################################################################################################
def _resolve_attrs(self, item_type:'str', attrs:'any_') -> 'any_':
for key, orig_value in attrs.items():
# .. preprocess values only if they are strings ..
if isinstance(orig_value, str):
if orig_value.startswith(zato_enmasse_env1):
_prefix = zato_enmasse_env1
elif orig_value.startswith(zato_enmasse_env2):
_prefix = zato_enmasse_env2
else:
_prefix = None
if _prefix:
value = orig_value.split(_prefix)
value = value[1]
if not value:
raise Exception('Could not build a value from `{}` in `{}`'.format(orig_value, item_type))
else:
value = os.environ.get(value, NotGiven)
if value is NotGiven:
if key.startswith(('is_', 'should_', 'needs_')):
value = None
else:
value = 'Env-Value-Not-Found-' + orig_value + '.' + uuid4().hex
attrs[key] = value
return attrs
# ################################################################################################################################
def _import(self, item_type:'str', attrs:'any_', is_edit:'bool') -> 'None':
# First, resolve values pointing to parameter placeholders and environment variables ..
attrs = self._resolve_attrs(item_type, attrs)
#
# Preprocess the data to be imported
#
attrs_dict = dict(attrs)
# Generic connections cannot import their IDs during edits
if item_type == 'zato_generic_connection' and is_edit:
_= attrs_dict.pop('id', None)
# We handle security groups only
elif item_type == 'security_groups':
attrs['group_type'] = Common_Groups.Type.API_Clients
# RBAC objects cannot refer to other objects by their IDs
elif item_type == 'rbac_role_permission':
_= attrs_dict.pop('id', None)
_= attrs_dict.pop('perm_id', None)
_= attrs_dict.pop('role_id', None)
_= attrs_dict.pop('service_id', None)
elif item_type == 'rbac_client_role':
_= attrs_dict.pop('id', None)
_= attrs_dict.pop('role_id', None)
elif item_type == 'rbac_role':
_= attrs_dict.pop('id', None)
_= attrs_dict.pop('parent_id', None)
elif item_type == 'oauth':
if not 'data_format' in attrs:
attrs['data_format'] = Data_Format.JSON
if not 'client_id_field' in attrs:
attrs['client_id_field'] = 'client_id'
if not 'client_secret_field' in attrs:
attrs['client_secret_field'] = 'client_secret'
if not 'grant_type' in attrs:
attrs['grant_type'] = 'client_credentials'
if auth_endpoint := attrs.pop('auth_endpoint', None):
attrs['auth_server_url'] = auth_endpoint
if scopes := attrs.get('scopes'):
if isinstance(scopes, list):
scopes = '\n'.join(scopes)
attrs['scopes'] = scopes
if extra_fields := attrs.get('extra_fields'):
if isinstance(extra_fields, list):
extra_fields = '\n'.join(extra_fields)
attrs['extra_fields'] = extra_fields
attrs.cluster_id = self.client.cluster_id
attrs.is_source_external = True
response = self._import_object(item_type, attrs, is_edit)
if response and response.ok:
if self._needs_change_password(item_type, attrs, is_edit):
object_id = response.data['id']
response = self._maybe_change_password(object_id, item_type, attrs)
# We quit on first error encountered
if response and not response.ok:
raw = (item_type, attrs_dict, response.details)
self.results.add_error(raw, ERROR_COULD_NOT_IMPORT_OBJECT,
"Could not import (is_edit {}) '{}' with '{}', response from '{}' was '{}'",
is_edit, attrs.name, attrs_dict, item_type, response.details)
return self.results # type: ignore
# It's been just imported so we don't want to create it in next steps
# (this in fact would result in an error as the object already exists).
if is_edit:
self.remove_from_import_list(item_type, attrs.name)
# If this is a generic connection and it has a secret set (e.g. MongoDB password),
# we need to explicitly set it for the connection we are editing.
if item_type == 'zato_generic_connection' and attrs_dict.get('secret'):
if self._needs_change_password(item_type, attrs, is_edit):
self._set_generic_connection_secret(attrs_dict['name'], attrs_dict['type_'], attrs_dict['secret'])
# We'll see how expensive this call is. Seems to be but let's see in practice if it's a burden.
self.object_mgr.populate_objects_by_type(item_type)
# ################################################################################################################################
def add_warning(self, results, item_type, value_dict, item): # type: ignore
raw:'any_' = (item_type, value_dict)
results.add_warning(
raw, WARNING_ALREADY_EXISTS_IN_ODB, '{} already exists in ODB {} ({})', dict(value_dict), dict(item), item_type)
# ################################################################################################################################
def find_already_existing_odb_objects(self):
results = Results()
for item_type, items in self.json.items(): # type: ignore
#
# Preprocess item type
#
item_type = _replace_item_type(True, item_type)
for item in items: # type: ignore
name:'any_' = item.get('name')
if not name:
raw:'any_' = (item_type, item)
results.add_error(raw, ERROR_KEYS_MISSING, '{} has no `name` key ({})', dict(item), item_type)
if item_type == 'http_soap':
connection:'any_' = item.get('connection')
transport:'any_' = item.get('transport')
existing:'any_' = find_first(self.object_mgr.objects.http_soap,
lambda item: connection == item.connection and transport == item.transport and name == item.name) # type: ignore
if existing is not None:
self.add_warning(results, item_type, item, existing)
else:
existing = self.object_mgr.find(item_type, {'name': name})
if existing is not None:
self.add_warning(results, item_type, item, existing)
return results
# ################################################################################################################################
def may_be_dependency(self, item_type): # type: ignore
""" Returns True if input item_type may be possibly a dependency, for instance,
a security definition may be potentially a dependency of channels or a web socket
object may be a dependency of pub/sub endpoints.
"""
service_by_name = SERVICE_BY_NAME[item_type]
if service_by_name.is_security:
return True
elif 'def' in item_type:
return True
elif item_type in {'web_socket', 'pubsub_endpoint', 'http_soap'}:
return True
else:
return False
# ################################################################################################################################
def _import_basic_auth(self, data:'dictlist', *, is_edit:'bool') -> 'None':
# Local variables
service_name = 'zato.common.import-objects'
import_type = 'edit' if is_edit else 'create'
# Build a request for the service
imports = {
'basic_auth': data
}
# .. details of how many objects we are importing ..
len_imports = {
'basic_auth': len(imports['basic_auth']),
}
# .. log what we are about to do ..
self.logger.info(f'Invoking -> import security ({import_type}) -> {service_name} -> {len_imports}')
_ = self.client.invoke(service_name, imports)
# ################################################################################################################################
def _import_pubsub_objects(self, data:'strlistdict') -> 'None':
# Local variables
service_name = 'zato.common.import-objects'
# Resolve all values first ..
for item_type, values in data.items():
for idx, value in enumerate(values):
value = dict(value)
value = self._resolve_attrs(item_type, value)
values[idx] = value
# .. details of how many objects we are importing ..
len_imports = {
'topics': len(data['pubsub_topic']),
'endpoints': len(data['pubsub_endpoint']),
'subs': len(data['pubsub_subscription']),
}
# .. log what we are about to do ..
self.logger.info(f'Invoking -> import pub/sub -> {service_name} -> {len_imports}')
_ = self.client.invoke(service_name, data)
# ################################################################################################################################
def _trigger_sync_server_objects(self, *, sync_security:'bool'=True, sync_pubsub:'bool'=True):
# Local variables
service_name = 'pub.zato.common.sync-objects'
# Request to send to the server
request = {
'security': sync_security,
'pubsub': sync_pubsub,
}
self.logger.info(f'Invoking -> trigger sync -> {service_name}')
_ = self.client.invoke(service_name, request)
# ################################################################################################################################
def _build_existing_objects_to_edit_during_import(self, already_existing:'any_') -> 'any_':
existing_defs = []
existing_rbac_role = []
existing_rbac_role_permission = []
existing_rbac_client_role = []
existing_other = []
for w in already_existing.warnings: # type: ignore
item_type, value = w.value_raw # type: ignore
value = value
if 'def' in item_type:
existing = existing_defs
elif item_type == 'rbac_role':
existing = existing_rbac_role
elif item_type == 'rbac_role_permission':
existing = existing_rbac_role_permission
elif item_type == 'rbac_client_role':
existing = existing_rbac_client_role
else:
existing = existing_other
existing.append(w)
existing_combined:'any_' = existing_defs + existing_rbac_role + existing_rbac_role_permission + \
existing_rbac_client_role + existing_other
return existing_combined
# ################################################################################################################################
def _build_new_objects_to_create_during_import(self, existing_combined:'any_') -> 'any_':
# stdlib
from collections import OrderedDict
new_defs = []
new_rbac_role = []
new_rbac_role_permission = []
new_rbac_client_role = []
new_other = []
# Use an ordered dict to iterate over the data with dependencies first
self_json = deepcopy(self.json)
self_json_ordered:'any_' = OrderedDict()
# All the potential dependencies will be handled in this specific order
dep_order = [
'def_sec',
'basic_auth',
'apikey',
'ntlm',
'oauth',
'jwt',
'aws',
'tls_key_cert',
'tls_channel_sec',
'security_groups',
# 'wss',
# 'openstack',
# 'xpath_sec',
# 'vault_conn_sec',
'http_soap',
'web_socket',
'pubsub_topic',
'pubsub_endpoint',
]
# Do populate the dependencies first ..
for dep_name in dep_order:
self_json_ordered[dep_name] = self_json.get(dep_name, [])
# .. now, populate everything that is not a dependency.
for key, value in self_json.items(): # type: ignore
if key not in dep_order:
self_json_ordered[key] = value
for item_type, items in self_json_ordered.items(): # type: ignore
#
# Preprocess item type
#
item_type = _replace_item_type(True, item_type)
if self.may_be_dependency(item_type):
if item_type == 'rbac_role':
append_to = new_rbac_role
elif item_type == 'rbac_role_permission':
append_to = new_rbac_role_permission
elif item_type == 'rbac_client_role':
append_to = new_rbac_client_role
else:
append_to = new_defs
else:
append_to = new_other
append_to.append({item_type: items})
# This is everything new that we know about ..
new_combined:'any_' = new_defs + new_rbac_role + new_rbac_role_permission + new_rbac_client_role + new_other
# .. now, go through it once more and filter out elements that we know should be actually edited, not created ..
to_remove = []
for new_elem in new_combined:
for item_type, value_list in new_elem.items():
for value_dict in value_list:
value_dict = value_dict.toDict()
for existing_elem in existing_combined:
existing_item_type, existing_item = existing_elem.value_raw
if item_type == existing_item_type:
if value_dict.get('name', _no_value1) == existing_item.get('name', _no_value2):
to_remove.append({
'item_type': item_type,
'item': value_dict,
})
break
for elem in to_remove: # type: ignore
item_type = elem['item_type'] # type: ignore
item = elem['item'] # type: ignore
for new_elem in new_combined:
for new_item_type, value_list in new_elem.items():
for idx, value_dict in enumerate(value_list):
value_dict = value_dict.toDict()
if new_item_type == item_type:
if value_dict['name'] == item['name']:
value_list.pop(idx)
return new_combined
# ################################################################################################################################
def import_objects(self, already_existing) -> 'Results': # type: ignore
# stdlib
from time import sleep
rbac_sleep = getattr(self.args, 'rbac_sleep', 1)
rbac_sleep = float(rbac_sleep)
existing_combined = self._build_existing_objects_to_edit_during_import(already_existing)
new_combined = self._build_new_objects_to_create_during_import(existing_combined)
# Extract and load Basic Auth definitions as a whole, before any other updates (edit)
basic_auth_edit = self._extract_basic_auth(existing_combined, is_edit=True)
self._import_basic_auth(basic_auth_edit, is_edit=True)
# Extract and load Basic Auth definitions as a whole, before any other updates (create)
basic_auth_create = self._extract_basic_auth(new_combined, is_edit=False)
self._import_basic_auth(basic_auth_create, is_edit=False)
self._trigger_sync_server_objects(sync_pubsub=False)
self.object_mgr.refresh_objects()
for w in existing_combined:
item_type, attrs = w.value_raw
if self.should_skip_item(item_type, attrs, True):
continue
# Basic Auth definitions have been already handled above (edit)
if item_type == Sec_Def_Type.BASIC_AUTH:
continue
# Skip pub/sub objects because they are handled separately (edit)
if item_type.startswith('pubsub'):
continue
results = self._import(item_type, attrs, True)
if 'rbac' in item_type:
sleep(rbac_sleep)
if results:
return results
#
# Create new objects, again, definitions come first ..
#
# A container for pub/sub objects to be handled separately
pubsub_objects:'strlistdict' = {
'pubsub_endpoint': [],
'pubsub_topic': [],
'pubsub_subscription': [],
}
# Extract and load Basic Auth definitions as a whole, before any other updates (create)
self._trigger_sync_server_objects(sync_pubsub=False)
self.object_mgr.refresh_objects()
for elem in new_combined:
for item_type, attr_list in elem.items():
for attrs in attr_list:
if self.should_skip_item(item_type, attrs, False):
continue
# Basic Auth definitions have been already handled above (create)
if item_type == Sec_Def_Type.BASIC_AUTH:
continue
# Pub/sub objects are handled separately at the end of this function (create)
if item_type.startswith('pubsub'):
container = pubsub_objects[item_type]
container.append(attrs)
continue
results = self._import(item_type, attrs, False)
if 'rbac' in item_type:
sleep(rbac_sleep)
if results:
return results
# Handle pub/sub objeccts as a whole here
self._import_pubsub_objects(pubsub_objects)
# Now, having imported all the objects, we can trigger their synchronization among the members of the cluster
self._trigger_sync_server_objects(sync_security=False)
return self.results
# ################################################################################################################################
def _extract_basic_auth(self, data:'any_', *, is_edit:'bool') -> 'dictlist':
out:'dictlist' = []
if is_edit:
for item in data:
value_raw = item.value_raw
item_type, attrs = value_raw
if item_type == Sec_Def_Type.BASIC_AUTH:
attrs = dict(attrs)
attrs = self._resolve_attrs('basic_auth', attrs)
out.append(attrs)
else:
for item in data:
if basic_auth := item.get(Sec_Def_Type.BASIC_AUTH):
for elem in basic_auth:
attrs = dict(elem)
attrs = self._resolve_attrs('basic_auth', attrs)
out.append(attrs)
return out
# ################################################################################################################################
def _swap_service_name(self, required, attrs, first, second): # type: ignore
if first in required and second in attrs:
attrs[first] = attrs[second]
# ################################################################################################################################
def _import_object(self, def_type, item, is_edit): # type: ignore
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
service_info = SERVICE_BY_NAME[def_type]
if is_edit:
service_name:'any_' = service_info.get_service_name('edit')
else:
service_name:'any_' = service_info.get_service_name('create')
# service and service_name are interchangeable
required:'any_' = service_info.get_required_keys()
self._swap_service_name(required, item, 'service', 'service_name')
self._swap_service_name(required, item, 'service_name', 'service')
# Fetch an item from a cache of ODB objects and assign its ID to item so that the Edit service knows what to update.
if is_edit:
lookup_config:'any_' = {'name': item.name}
if def_type == 'http_soap':
lookup_config['connection'] = item.connection
lookup_config['transport'] = item.transport
odb_item:'any_' = self.object_mgr.find(def_type, lookup_config)
item.id = odb_item.id
for field_name, info in iteritems(service_info.object_dependencies): # type: ignore
if item.get('security_id') == 'ZATO_SEC_USE_RBAC':
continue
if field_name in _security_fields:
field_name = resolve_security_field_name(item)
item_type:'any_' = info['dependent_type']
dependent_field:'any_' = info['dependent_field']
if item.get(field_name) != info.get('empty_value') and 'id_field' in info:
id_field:'any_' = info['id_field']
if field_name in _security_keys:
field_value = try_keys(item, _security_keys)
else:
field_value:'any_' = item[field_name]
# Ignore explicit indicators of the absence of a security definition
if field_value != Zato_No_Security:
criteria:'any_' = {dependent_field: field_value}
dep_obj:'any_' = self.object_mgr.find(item_type, criteria)
item[id_field] = dep_obj.id
if service_name and service_info.name != 'def_sec':
self.logger.info(f'Invoking -> import -> {service_name} for {service_info.name} ({def_type})')
response = self.client.invoke(service_name, item)
if response.ok:
verb = 'Updated' if is_edit else 'Created'
self.logger.info('%s object `%s` with %s', verb, item.name, service_name)
return response
# ################################################################################################################################
def _maybe_change_password(self, object_id, item_type, attrs): # type: ignore
# stdlib
from time import sleep
service_info = SERVICE_BY_NAME[item_type]
service_name:'any_' = service_info.get_service_name('change-password')
if service_name is None or 'password' not in attrs:
return None
response = self.client.invoke(service_name, {
'id': object_id,
'password1': attrs.password,
'password2': attrs.password,
})
if response.ok:
self.logger.info("Updated password for '{}' ({})".format(attrs.name, service_name))
# Wait for a moment before continuing to let AMQP connectors change their passwords.
# This is needed because we may want to create channels right after the password
# has been changed and this requires valid credentials, including the very
# which is being changed here.
if item_type == 'def_amqp':
sleep(5)
return response
class ObjectManager:
def __init__(self, client, logger): # type: ignore
self.client = client # type: any_
self.logger = logger # type: Logger
# ################################################################################################################################
def find(self, item_type, fields, *, check_sec=True): # type: ignore
if check_sec:
if item_type == 'def_sec' or item_type in _All_Sec_Def_Types:
return self.find_sec(fields)
# This probably isn't necessary any more:
item_type:'any_' = item_type.replace('-', '_')
objects_by_type:'any_' = self.objects.get(item_type, ())
return find_first(objects_by_type, lambda item: dict_match(item_type, item, fields)) # type: ignore
# ################################################################################################################################
def find_sec(self, fields): # type: ignore
""" Find any security definition with the given name.
"""
for service in SERVICES:
if service.is_security:
item:'any_' = self.find(service.name, fields, check_sec=False)
if item is not None:
return item
# ################################################################################################################################
def refresh(self):
self.refresh_services()
self.refresh_objects()
# ################################################################################################################################
def refresh_services(self):
# Bunch
from bunch import Bunch
response:'any_' = self.client.invoke('zato.service.get-list', {
'cluster_id': self.client.cluster_id, # type: ignore
'name_filter': '*'
})
if not response.ok:
raise Exception('Unexpected response; e:{}'.format(response))
if response.has_data:
# Make sure we access the correct part of the response,
# because it may be wrapped in a pagination structure.
data = self.get_data_from_response_data(response.data)
self.services = {service['name']: Bunch(service) for service in data}
# ################################################################################################################################
def fix_up_odb_object(self, item_type, item): # type: ignore
""" For each ODB object, ensure fields that specify a dependency have their associated name field updated
to match the dependent object. Otherwise, ensure the field is set to the corresponding empty value
(either None or Zato_No_Security).
"""
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
normalize_service_name(item)
service_info = SERVICE_BY_NAME[item_type]
if item_type in ('json_rpc', 'http_soap'):
if item['sec_use_rbac'] is True:
item['security_id'] = 'ZATO_SEC_USE_RBAC'
elif item_type == 'json_rpc' and item['security_id'] is None:
item['security_id'] = 'ZATO_NONE'
for field_name, info in iteritems(service_info.object_dependencies): # type: ignore
if 'id_field' not in info:
continue
if not test_item(item, info.get('condition')):
# If the field's condition is false, then just set empty values and stop.
item[field_name] = info.get('empty_value')
item[info['id_field']] = None
continue
dep_id:'any_' = item.get(info['id_field'])
if dep_id is None:
item[field_name] = info.get('empty_value')
continue
dep = self.find(info['dependent_type'], {'id': dep_id})
if (dep_id != 'ZATO_SEC_USE_RBAC') and (field_name != 'sec_name' and dep is None):
if not dep:
msg = 'Dependency not found, name:`{}`, field_name:`{}`, type:`{}`, dep_id:`{}`, dep:`{}`, item:`{}`'
raise Exception(msg.format(service_info.name, field_name, info['dependent_type'], dep_id, dep,
item.toDict()))
else:
item[field_name] = dep[info['dependent_field']]
# JSON-RPC channels cannot have empty security definitions on exports
if item_type == 'http_soap' and item['name'].startswith('json.rpc.channel'):
if not item['security_id']:
item['security_id'] = 'ZATO_NONE'
return item # type: ignore
# ################################################################################################################################
ignored_names = (
ServiceConst.API_Admin_Invoke_Username,
'pubapi',
)
def is_ignored_name(self, item_type, item, is_sec_def): # type: ignore
if 'name' not in item:
return False
name:'any_' = item.name.lower()
# Special-case scheduler jobs that can be overridden by users
if name.startswith('zato.wsx.cleanup'):
return False
if item_type not in {'pubsub_subscription', 'rbac_role_permission'}:
if name in self.ignored_names:
return True
elif 'zato' in name and (not 'unittest' in name):
if is_sec_def:
return False
else:
return True
# ################################################################################################################################
def delete(self, item_type, item): # type: ignore
service_info = SERVICE_BY_NAME[item_type]
service_name:'any_' = service_info.get_service_name('delete')
if service_name is None:
self.logger.error('Prefix {} has no delete service'.format(item_type))
return
response = self.client.invoke(service_name, {
'cluster_id': self.client.cluster_id,
'id': item.id,
})
if response.ok:
self.logger.info('Deleted {} ID {}'.format(item_type, item.id))
else:
self.logger.error('Could not delete {} ID {}: {}'.format(item_type, item.id, response))
# ################################################################################################################################
def delete_all(self):
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
count = 0
for item_type, items in iteritems(self.objects): # type: ignore
for item in items: # type: ignore
self.delete(item_type, item)
count += 1
return count
# ################################################################################################################################
def get_data_from_response_data(self, response_data): # type: ignore
# Generic connections' GetList includes metadata in responses so we need to dig into actual data
if '_meta' in response_data:
keys:'any_' = list(response_data)
keys.remove('_meta')
response_key:'any_' = keys[0]
data:'any_' = response_data[response_key]
else:
data:'any_' = response_data
return data
# ################################################################################################################################
def populate_objects_by_type(self, item_type:'str') -> 'None':
# Ignore artificial objects
if item_type in {'def_sec'}:
return
# Bunch
from bunch import Bunch
# Zato
from zato.common.const import SECRETS
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
from zato.common.py23_.past.builtins import basestring # type: ignore
service_info = SERVICE_BY_NAME[item_type]
# Temporarily preserve function of the old enmasse.
service_name:'any_' = service_info.get_service_name('get-list')
if service_name is None:
self.logger.info('Type `%s` has no `get-list` service (%s)', service_info, item_type)
return
self.logger.debug('Invoking -> getter -> %s for %s (%s)', service_name, service_info.name, item_type)
request = {
'cluster_id': self.client.cluster_id,
}
if service_name == 'zato.http-soap.get-list':
request['needs_security_group_names'] = True
elif service_name == 'zato.groups.get-list':
request['group_type'] = Common_Groups.Type.API_Clients
request['needs_members'] = True
request['needs_short_members'] = True
response = self.client.invoke(service_name, request)
if not response.ok:
self.logger.warning('Could not fetch objects of type {}: {}'.format(service_info.name, response.details))
return
self.objects[service_info.name] = []
if response.has_data:
data = self.get_data_from_response_data(response.data)
# A flag indicating if this service is related to security definitions
is_sec_def = 'zato.security' in service_name
for item in map(Bunch, data):
if self.is_ignored_name(item_type, item, is_sec_def):
continue
# Passwords are always exported in an encrypted form so we need to decrypt them ourselves
for key, value in iteritems(item): # type: ignore
if isinstance(value, basestring):
if value.startswith(SECRETS.PREFIX):
item[key] = None # Enmasse does not export secrets such as passwords or other auth information
self.objects[service_info.name].append(item)
# ################################################################################################################################
def refresh_security_objects(self):
self.refresh_objects(sec_only=True)
# ################################################################################################################################
def refresh_objects(self, *, sec_only:'bool'=False):
# stdlib
from operator import attrgetter
# Bunch
from bunch import Bunch
self.objects = Bunch()
for service_info in sorted(SERVICES, key=attrgetter('name')):
if sec_only:
if not service_info.is_security:
continue
self.populate_objects_by_type(service_info.name)
for item_type, items in self.objects.items(): # type: ignore
for item in items: # type: ignore
self.fix_up_odb_object(item_type, item)
# ################################################################################################################################
# ################################################################################################################################
class JsonCodec:
extension = '.json'
def load(self, file_, results): # type: ignore
# Zato
from zato.common.json_internal import loads
return loads(file_.read())
def dump(self, file_, object_): # type: ignore
# Zato
from zato.common.json_internal import dumps
file_.write(dumps(object_, indent=1, sort_keys=True))
# ################################################################################################################################
# ################################################################################################################################
class YamlCodec:
extension = '.yaml'
def load(self, file_:'any_', results:'any_') -> 'strdict':
# Local imports
import yaml
from zato.common.util.config import extract_param_placeholders
# Read the data as string ..
data = file_.read()
# .. replace named placeholders ..
params = extract_param_placeholders(data)
# .. go through each placeholder ..
for param in params:
# .. check if it points to an environment variable ..
if zato_enmasse_env2 in param:
# .. we are here if we can find an environment variable ..
# .. based on a placeholder parameter, so we now need ..
# .. to extract the value of this variable or use a default one ..
env_variable_name = param.replace(zato_enmasse_env2, '')
env_variable_name = env_variable_name[1:-1]
# .. let's find this variable or use the default one ..
env_value = os.environ.get(env_variable_name, 'Missing_Value_' + env_variable_name)
# .. now, we can insert this variable in the original value ..
data = data.replace(param, env_value)
# .. and return a dict object representing the file.
out = yaml.load(data, yaml.FullLoader)
return out
def dump(self, file_, object_): # type: ignore
# pyaml
import pyaml
file_.write(pyaml.dump(object_, vspacing=True))
# ################################################################################################################################
# ################################################################################################################################
class InputParser:
def __init__(
self,
path:'str',
logger:'Logger',
codec:'YamlCodec | JsonCodec',
ignore_missing_includes:'bool',
) -> 'None':
# stdlib
import os
self.path = os.path.abspath(path)
self.logger = logger
self.codec = codec
self.ignore_missing_includes = ignore_missing_includes
# ################################################################################################################################
def _load_file(self, path:'str') -> 'strdict':
try:
with open(path) as f:
data:'strdict' = self.codec.load(f, None) # type: ignore
except Exception as e:
from yaml.error import YAMLError
if isinstance(e, YAMLError):
raise
else:
self.logger.info('Caught an exception -> %s', e)
data = {}
return data
# ################################################################################################################################
def _parse_file(self, path:'str', results:'Results') -> 'strdict':
# First, open the main file ..
data:'strdict' = self._load_file(path) or {}
# .. go through all the files that we potentially need to include ..
for item_type, values in deepcopy(data).items():
for item in values:
# .. only include files will be taken into account ..
if self.is_include(item_type, item):
# .. build a full path to the file to be included ..
include_path = self._get_full_path(item)
if path == include_path:
raw = (include_path,)
results.add_error(raw, ERROR_INVALID_INPUT, f'Include cannot include itself `{include_path}`', item)
continue
if not os.path.exists(include_path):
if not self.ignore_missing_includes:
raw = (include_path,)
results.add_error(raw, Error_Include_Not_Found, f'Include not found `{include_path}`', item)
continue
# .. load the actual contents to be included ..
data_to_include = self._parse_file(include_path, results)
# .. go through each of the items that the file to be included defines ..
for item_type_to_include, values_to_include in data_to_include.items():
# .. make sure we append the new data to what we potentially already have ..
if item_type_to_include in data:
data[item_type_to_include].extend(values_to_include)
# .. otherwise, we create a new key for it ..
else:
data[item_type_to_include] = values_to_include
# .. remove any potential include section from further processing ..
_ = data.pop(ModuleCtx.Item_Type_Include, None)
# .. now, we are ready to return the whole data set to our caller.
return data
# ################################################################################################################################
def _get_full_path(self, include_path:'str') -> 'str':
# stdlib
import os
curdir = os.path.dirname(self.path)
joined = os.path.join(curdir, include_path.replace('file://', '')) # type: ignore
return os.path.abspath(joined)
# ################################################################################################################################
def is_include(self, item_type:'str', item:'str | strdict') -> 'bool':
return item_type == ModuleCtx.Item_Type_Include and isinstance(item, str)
# ################################################################################################################################
def parse_def_sec(self, item:'strdict', results:'Results') -> 'None':
# Bunch
from bunch import Bunch
# While reading old enmasse files, expand def_sec entries out to their original service type.
sec_type = item.pop('type', None)
if sec_type is None:
raw = ('def_sec', item)
results.add_error(
raw, ERROR_TYPE_MISSING, "security definition '{}' has no required 'type' key (def_sec)", item)
return
service_names = [elem.name for elem in SERVICES if elem.is_security]
service_names.extend(ModuleCtx.Extra_Security_Types)
if sec_type not in service_names:
raw = (sec_type, service_names, item)
results.add_error(raw, ERROR_INVALID_SEC_DEF_TYPE,
"Invalid type '{}', must be one of '{}' (def_sec)", sec_type, service_names)
return
self.json.setdefault(sec_type, []).append(Bunch(item))
self.json.setdefault('def_sec', []).append(Bunch(item))
# ################################################################################################################################
def parse_item(self, item_type:'str', item:'str | strdict', results:'Results') -> 'None':
# Bunch
from bunch import Bunch
if item_type == 'def_sec':
self.parse_def_sec(cast_('strdict', item), results)
else:
items:'any_' = self.json.get(item_type) or []
_:'any_' = items.append(Bunch(cast_('strdict', item)))
self.json[item_type] = items
# ################################################################################################################################
def _maybe_fixup_http_soap(self, original_item_type:'str', item:'strdict') -> 'str':
# Preserve old format by merging http-soap subtypes into one.
for item_type, connection, transport in HTTP_SOAP_KINDS:
if item_type == original_item_type:
item['connection'] = connection
item['transport'] = transport
return 'http_soap'
return original_item_type
# ################################################################################################################################
def _is_item_type_recognized(self, item_type:'str') -> 'bool':
if item_type == ModuleCtx.Item_Type_Include:
return True
elif item_type in ModuleCtx.Enmasse_Type:
return True
elif item_type in ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse:
return True
elif item_type in SERVICE_BY_NAME:
return True
elif item_type in HTTP_SOAP_ITEM_TYPES:
return True
else:
return False
# ################################################################################################################################
def parse_items(self, data:'strdict', results:'Results') -> 'None':
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
for item_type, items in iteritems(data):
if not self._is_item_type_recognized(item_type):
raw = (item_type,)
results.add_error(raw, ERROR_UNKNOWN_ELEM, 'Ignoring unknown element type {} in the input.', item_type)
continue
for item in items:
current_item_type = item_type
if isinstance(item, dict):
current_item_type = self._maybe_fixup_http_soap(item_type, item)
normalize_service_name(item)
self.parse_item(current_item_type, item, results)
# ################################################################################################################################
def _parse_env_key(self, key:'str') -> 'EnvKeyData':
# Our response to produce
out = EnvKeyData()
# .. remove non-business information first ..
key = key.replace(zato_enmasse_env_value_prefix, '')
# .. turn double underscores into dots that shells do not allow ..
key = key.replace('__', '.')
# .. now, we know that we have components separated by underscores ..
key_split = key.split('_')
# .. we expect for these three components to exist in this order ..
def_type = key_split[0]
name = key_split[1]
attr_name = key_split[2]
# .. populate the response ..
out.def_type = def_type
out.name = name
out.attr_name = attr_name
# .. now, we can return the result.
return out
# ################################################################################################################################
def _extract_config_from_env(self, env:'strstrdict') -> 'list_[EnvKeyData]':
# Our response to produce
out:'list_[EnvKeyData]' = []
# First pass, through environemnt variables as they were defined ..
for key in env.keys():
# . this is the value, to be used as it is ..
value = env.pop(key)
# .. the key needs to be transformed into a business object ..
env_key_data = self._parse_env_key(key)
# .. enrich the business object with the actual value ..
env_key_data.attr_value = value
# .. make use of it ..
out.append(env_key_data)
# .. now, we can return the result to our caller.
return out
# ################################################################################################################################
def _pre_process_input_before_import(self, data:'strdict') -> 'strdict':
# Get all environment variables that we may potentially use ..
env = deepcopy(os.environ)
# .. remove any variables that are not ours ..
for key in list(env):
if not key.startswith(zato_enmasse_env_value_prefix):
_ = env.pop(key)
# .. turn it into a config dict ..
env_config = self._extract_config_from_env(cast_('strdict', env))
# .. this can be built upfront in case it is needed ..
if not 'zato_generic_connection' in data:
data['zato_generic_connection'] = []
# .. turn out simple definitions into generic ones if this is applicable ..
for new_name, old_name in ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse.items():
# .. this should be a generic connection ..
if old_name.startswith('zato_generic_connection'):
# .. extract its type ..
wrapper_type = old_name.replace('zato_generic_connection_', '')
# .. pop a list of such connections to process ..
value_list = data.pop(new_name, [])
# .. go through each of them ..
for value in value_list:
# .. populate the type ..
value['type_'] = wrapper_type
# .. populate wrapper type-specific attributes ..
if wrapper_type == outconn_wsx:
if not 'is_outconn' in value:
value['is_outconn'] = True
if not 'is_channel' in value:
value['is_channel'] = False
if not 'is_internal' in value:
value['is_internal'] = False
if not 'pool_size' in value:
value['pool_size'] = 1
if not 'sec_use_rbac' in value:
value['sec_use_rbac'] = False
if not 'is_zato' in value:
value['is_zato'] = False
if not 'data_format' in value:
value['data_format'] = Data_Format.JSON
if not 'has_auto_reconnect' in value:
value['has_auto_reconnect'] = True
if not 'security_def' in value:
value['security_def'] = Zato_None
elif wrapper_type == outconn_ldap:
# .. passwords are to be turned into secrets ..
if password := value.pop('password', None):
value['secret'] = password
value['is_outconn'] = True
value['is_channel'] = False
if not 'auto_bind' in value:
value['auto_bind'] = COMMON_LDAP.AUTO_BIND.DEFAULT.id
if not 'connect_timeout' in value:
value['connect_timeout'] = COMMON_LDAP.DEFAULT.CONNECT_TIMEOUT
if not 'get_info' in value:
value['get_info'] = COMMON_LDAP.GET_INFO.SCHEMA.id
if not 'ip_mode' in value:
value['ip_mode'] = COMMON_LDAP.IP_MODE.IP_SYSTEM_DEFAULT.id
if not 'is_internal' in value:
value['is_internal'] = False
if not 'is_read_only' in value:
value['is_read_only'] = False
if not 'is_stats_enabled' in value:
value['is_stats_enabled'] = False
if not 'is_tls_enabled' in value:
value['is_tls_enabled'] = False
if not 'pool_exhaust_timeout' in value:
value['pool_exhaust_timeout'] = COMMON_LDAP.DEFAULT.POOL_EXHAUST_TIMEOUT
if not 'pool_ha_strategy' in value:
value['pool_ha_strategy'] = COMMON_LDAP.POOL_HA_STRATEGY.ROUND_ROBIN.id
if not 'pool_keep_alive' in value:
value['pool_keep_alive'] = COMMON_LDAP.DEFAULT.POOL_KEEP_ALIVE
if not 'pool_lifetime' in value:
value['pool_lifetime'] = COMMON_LDAP.DEFAULT.POOL_LIFETIME
if not 'pool_max_cycles' in value:
value['pool_max_cycles'] = COMMON_LDAP.DEFAULT.POOL_MAX_CYCLES
if not 'pool_name' in value:
value['pool_name'] = ''
if not 'pool_size' in value:
value['pool_size'] = COMMON_LDAP.DEFAULT.POOL_SIZE
if not 'sasl_mechanism' in value:
value['sasl_mechanism'] = ''
if not 'sec_use_rbac' in value:
value['sec_use_rbac'] = False
if not 'should_check_names' in value:
value['should_check_names'] = False
if not 'should_log_messages' in value:
value['should_log_messages'] = False
if not 'should_return_empty_attrs' in value:
value['should_return_empty_attrs'] = True
if not 'tls_ciphers' in value:
value['tls_ciphers'] = COMMON_TLS.DEFAULT.CIPHERS
if not 'tls_private_key_file' in value:
value['tls_private_key_file'] = ''
if not 'tls_validate' in value:
value['tls_validate'] = COMMON_TLS.CERT_VALIDATE.CERT_REQUIRED.id
if not 'tls_version' in value:
value['tls_version'] = COMMON_TLS.DEFAULT.VERSION
if not 'use_auto_range' in value:
value['use_auto_range'] = True
if not 'use_tls' in value:
value['use_tls'] = False
# .. finally, we can append it for later use ..
_ = data['zato_generic_connection'].append(value)
# Preprocess all items ..
for item_type, items in data.items():
# Remove IDs from all the generic connections ..
if item_type == 'zato_generic_connection':
for item in items:
_ = item.pop('id', None)
# For values that need to be renamed ..
attr_list_value_rename_reverse = ModuleCtx.Enmasse_Attr_List_Value_Rename_Reverse.get(item_type) or {}
# .. optionally, rename selected values ..
for attr_name, value_map_list in attr_list_value_rename_reverse.items():
for value_map in value_map_list:
for item in items:
if value := item.get(attr_name, NotGiven):
if value is not NotGiven:
if value in value_map:
new_value = value_map[value]
item[attr_name] = new_value
# .. add values for attributes that are optional ..
for def_type, items in data.items():
# .. reusable ..
is_generic_connection = def_type == 'zato_generic_connection'
# .. replace new names with old ones but only for specific types ..
if item_type_name_map_reverse_by_type := ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse_By_Type.get(def_type):
# .. go through each time that will potentially have the old name ..
for item in items:
# .. go through each of the names to be replaced ..
for name_dict in item_type_name_map_reverse_by_type:
for new_name, old_name in name_dict.items():
# .. check if the old name is given on input ..
if new_name_value := item.get(new_name, NotGiven):
# .. we enter here if the old name exists ..
if new_name_value is not NotGiven:
# .. if we are here, we know we can swap the names ..
item[old_name] = item.pop(new_name)
# .. go through each definition ..
for item in items:
# .. this could be an include directive which we can skip here ..
if not isinstance(item, dict):
continue
# .. add type hints ..
item = cast_('strdict', item)
# .. what configuration to look up depends on whether it's a generic connection or not ..
if is_generic_connection:
wrapper_type = item['type_']
by_type_key = f'{def_type}_{wrapper_type}'
else:
by_type_key = def_type
# .. for attributes that should be populated if they do not exist ..
attr_list_default_by_type = ModuleCtx.Enmasse_Attr_List_Default_By_Type.get(by_type_key) or {}
# .. everything is active unless it is configured not to be ..
if not 'is_active' in item:
item['is_active'] = True
# .. add default attributes if they do not exist ..
for default_key, default_value in attr_list_default_by_type.items():
if default_key not in item:
item[default_key] = default_value
# .. populate attributes based on environment variables ..
for env_key_data in env_config:
# .. we need to match the type of the object ..
if def_type == env_key_data.def_type:
# .. as well as its name ..
if item.get('name') == env_key_data.name:
# .. if we do have a match, we can populate the value of an attribute ..
item[env_key_data.attr_name] = env_key_data.attr_value
# .. potentially replace new names that are on input with what the server expects (old names) ..
for new_name, old_name in ModuleCtx.Enmasse_Item_Type_Name_Map_Reverse.items():
value = data.pop(new_name, None) or None
if value is not None:
data[old_name] = value
return data
# ################################################################################################################################
def parse(self):
# A business object reprenting the results of an import .
results = Results()
# .. this is where the actual data is kept ..
self.json = {}
# .. extract a basic dict ..
data = self._parse_file(cast_('str', self.path), results) # type: ignore
# .. pre-process its contents ..
data = self._pre_process_input_before_import(data)
if not results.ok:
return results
self.parse_items(data, results)
return results
# ################################################################################################################################
# ################################################################################################################################
class Enmasse(ManageCommand):
""" Manages server objects en masse.
"""
opts:'dictlist' = [
{'name':'--server-url', 'help':'URL of the server that enmasse should talk to, provided in host[:port] format. Defaults to server.conf\'s \'gunicorn_bind\''}, # noqa: E501
{'name':'--export-local', 'help':'Export local file definitions into one file (can be used with --export)', 'action':'store_true'},
{'name':'--export', 'help':'Export server objects to a file (can be used with --export-local)', 'action':'store_true'},
{'name':'--export-odb', 'help':'Same as --export', 'action':'store_true'},
{'name':'--output', 'help':'Path to a file to export data to', 'action':'store'},
{'name':'--include-type', 'help':'A list of definition types to include in an export', 'action':'store', 'default':'all'},
{'name':'--include-name', 'help':'Only objects containing any of the names provided will be exported', 'action':'store', 'default':'all'},
{'name':'--import', 'help':'Import definitions from a local file (excludes --export-*)', 'action':'store_true'},
{'name':'--clean-odb', 'help':'Delete all ODB definitions before proceeding', 'action':'store_true'},
{'name':'--format', 'help':'Select output format ("json" or "yaml")', 'choices':('json', 'yaml'), 'default':'yaml'},
{'name':'--dump-format', 'help':'Same as --format', 'choices':('json', 'yaml'), 'default':'yaml'},
{'name':'--ignore-missing-defs', 'help':'Ignore missing definitions when exporting to file', 'action':'store_true'},
{'name':'--ignore-missing-includes', 'help':'Ignore include files that do not exist', 'action':'store_true'},
{'name':'--exit-on-missing-file', 'help':'If input file does not exist, exit with status code 0', 'action':'store_true'},
{'name':'--replace', 'help':'Force replacing already server objects during import', 'action':'store_true'},
{'name':'--replace-odb-objects', 'help':'Same as --replace', 'action':'store_true'},
{'name':'--input', 'help':'Path to input file with objects to import'},
{'name':'--initial-wait-time', 'help':'How many seconds to initially wait for a server', 'default':ModuleCtx.Initial_Wait_Time},
{'name':'--missing-wait-time', 'help':'How many seconds to wait for missing objects', 'default':ModuleCtx.Missing_Wait_Time},
{'name':'--env-file', 'help':'Path to an .ini file with environment variables'},
{'name':'--rbac-sleep', 'help':'How many seconds to sleep for after creating an RBAC object', 'default':'1'},
{'name':'--cols-width', 'help':'A list of columns width to use for the table output, default: {}'.format(DEFAULT_COLS_WIDTH), 'action':'store_true'},
]
CODEC_BY_EXTENSION:'strdict' = {
'json': JsonCodec,
'yaml': YamlCodec,
'yml': YamlCodec,
}
# ################################################################################################################################
def _on_server(self, args:'any_') -> 'None':
# stdlib
import os
import sys
from time import sleep
# Bunch
from bunch import Bunch
# Zato
from zato.cli.check_config import CheckConfig
from zato.common.util.api import get_client_from_server_conf
from zato.common.util.env import populate_environment_from_file
# Local aliases
input_path:'strnone' = None
output_path:'strnone' = None
exit_on_missing_file = getattr(self.args, 'exit_on_missing_file', True)
self.args = args
self.curdir = os.path.abspath(self.original_dir)
self.json = {}
has_import = getattr(args, 'import', False)
# For type hints
self.missing_wait_time:'int' = getattr(self.args, 'missing_wait_time', None) or ModuleCtx.Missing_Wait_Time
self.missing_wait_time = int(self.missing_wait_time)
# Assume False unless it is overridden later on
self.is_import = False
self.is_export = False
# Whether we should include files that do not exist
self.ignore_missing_includes = getattr(self.args, 'ignore_missing_includes', False)
# Initialize environment variables ..
env_path = self.normalize_path('env_file', exit_if_missing=False)
_ = populate_environment_from_file(env_path)
self.replace_objects:'bool' = True
self.export_odb:'bool' = getattr(args, 'export', False) or getattr(args, 'export_odb', False)
# .. make sure the input file path is correct ..
if args.export_local or has_import:
input_path = self.normalize_path('input', exit_if_missing=exit_on_missing_file, log_if_missing=True)
# .. make sure the output file path is correct ..
if args.output:
output_path = self.normalize_path(
'output',
exit_if_missing=True,
needs_parent_dir=True,
log_if_missing=True,
)
# .. the output serialization format. Not used for input ..
format:'str' = args.format or args.dump_format
self.codec = self.CODEC_BY_EXTENSION[format]()
#
# Tasks and scenarios
#
# 1) Export all local JSON files into one (--export-local)
# 2) Export all definitions from ODB (--export-odb)
# 3) Export all local JSON files with ODB definitions merged into one (--export-local --export-odb):
# -> 4) Import definitions from a local JSON file (--import)
# 4a) bail out if local JSON overrides any from ODB (no --replace-odb-objects)
# 4b) override whatever is found in ODB with values from JSON (--replace-odb-objects)
#
try:
initial_wait_time = float(args.initial_wait_time)
except Exception:
initial_wait_time = ModuleCtx.Initial_Wait_Time
# Get the client object, waiting until the server is started ..
self.client = get_client_from_server_conf(self.component_dir, initial_wait_time=initial_wait_time) # type: ignore
# .. just to be on the safe side, optionally wait a bit more
initial_wait_time = os.environ.get('ZATO_ENMASSE_INITIAL_WAIT_TIME')
if initial_wait_time:
initial_wait_time = int(initial_wait_time)
self.logger.warning('Sleeping for %s s', initial_wait_time)
sleep(initial_wait_time)
self.object_mgr = ObjectManager(self.client, self.logger)
self.client.invoke('zato.ping')
populate_services_from_apispec(self.client, self.logger)
if True not in (args.export_local, self.export_odb, args.clean_odb, has_import):
self.logger.error('At least one of --clean, --export-local, --export-odb or --import is required, stopping now')
sys.exit(self.SYS_ERROR.NO_OPTIONS)
# Populate the flags for our users
if has_import:
self.is_import = True
if args.export_local or self.export_odb:
self.is_export = True
if args.clean_odb:
self.object_mgr.refresh()
count = self.object_mgr.delete_all()
self.logger.info('Deleted {} items'.format(count))
if self.export_odb or has_import:
# Checks if connections to ODB/Redis are configured properly
cc = CheckConfig(self.args)
cc.show_output = False
cc.execute(Bunch(path='.'))
# Get back to the directory we started in so following commands start afresh as well
os.chdir(self.curdir)
# Imports and export are mutually excluding
if has_import and (args.export_local or self.export_odb):
self.logger.error('Cannot specify import and export options at the same time, stopping now')
sys.exit(self.SYS_ERROR.CONFLICTING_OPTIONS)
if args.export_local or has_import:
self.load_input(input_path)
# .. extract the include lists used to export objects ..
include_type = getattr(args, 'include_type', '')
include_name = getattr(args, 'include_name', '')
include_type = self._extract_include(include_type)
include_name = self._extract_include(include_name)
# 3)
if args.export_local and self.export_odb:
_ = self.report_warnings_errors(self.export_local_odb())
self.write_output(output_path, include_type, include_name)
# 1)
elif args.export_local:
_ = self.report_warnings_errors(self.export())
self.write_output(output_path, include_type, include_name)
# 2)
elif self.export_odb:
if self.report_warnings_errors(self.run_odb_export()):
self.write_output(output_path, include_type, include_name)
# 4) a/b
elif has_import:
warnings_errors_list = self.run_import()
_ = self.report_warnings_errors(warnings_errors_list)
# ################################################################################################################################
def load_input(self, input_path): # type: ignore
# stdlib
import sys
_, _, ext = self.args.input.rpartition('.')
codec_class = self.CODEC_BY_EXTENSION.get(ext.lower())
if codec_class is None:
exts = ', '.join(sorted(self.CODEC_BY_EXTENSION))
self.logger.error('Unrecognized file extension "{}": must be one of {}'.format(ext.lower(), exts))
sys.exit(self.SYS_ERROR.INVALID_INPUT)
parser = InputParser(input_path, self.logger, codec_class(), self.ignore_missing_includes)
results = parser.parse()
if not results.ok:
self.logger.error('Input parsing failed')
_ = self.report_warnings_errors([results])
sys.exit(self.SYS_ERROR.INVALID_INPUT)
self.json = parser.json
# ################################################################################################################################
def normalize_path(
self,
arg_name, # type: str
*,
exit_if_missing, # type: bool
needs_parent_dir=False, # type: bool
log_if_missing=False, # type: bool
) -> 'str':
# Local aliases
path_to_check:'str' = ''
arg_param = getattr(self.args, arg_name, None) or ''
# Potentially, expand the path to our home directory ..
arg_name = os.path.expanduser(arg_param)
# Turn the name into a full path unless it already is one ..
if os.path.isabs(arg_name):
arg_path = arg_name
else:
arg_path = os.path.join(self.curdir, arg_param)
arg_path = os.path.abspath(arg_path)
# .. we need for a directory to exist ..
if needs_parent_dir:
path_to_check = os.path.join(arg_path, '..')
path_to_check = os.path.abspath(path_to_check)
# .. or for the actual file to exist ..
else:
path_to_check = arg_path
# .. make sure that it does exist ..
if not os.path.exists(path_to_check):
# .. optionally, exit the process if it does not ..
if exit_if_missing:
if log_if_missing:
self.logger.info(f'Path not found: `{path_to_check}`')
# Zato
import sys
sys.exit()
# .. if we are here, it means that we have a valid, absolute path to return ..
return arg_path
# ################################################################################################################################
def _extract_include(self, include_type:'str') -> 'strlist': # type: ignore
# Local aliases
out:'strlist' = []
# Turn the string into a list of items that we will process ..
include_type:'strlist' = include_type.split(',') # type: ignore
include_type = [item.strip().lower() for item in include_type]
# .. ignore explicit types if all types are to be returned ..
if ModuleCtx.Include_Type.All in include_type:
include_type = [ModuleCtx.Include_Type.All]
else:
out[:] = include_type
# .. if we do not have anything, it means that we are including all types ..
if not out:
out = [ModuleCtx.Include_Type.All]
# .. now, we are ready to return our response.
return out
# ################################################################################################################################
def _should_write_type_to_output(
self,
item_type, # type: str
item, # type: strdict
include_type, # type: strlist
) -> 'bool':
# Get an include type that matches are item type ..
enmasse_include_type = ModuleCtx.Enmasse_Type.get(item_type)
if not isinstance(enmasse_include_type, (list, tuple)):
enmasse_include_type = [enmasse_include_type]
# .. if there is no match, it means that we do not write it on output ..
if not enmasse_include_type:
return False
# .. check further if this type is what we had on input ..
for elem in enmasse_include_type:
if elem in include_type:
return True
else:
return False
# ################################################################################################################################
def _should_write_name_to_output(
self,
item_type, # type: str
item_name, # type: str
include_name, # type: strlist
) -> 'bool':
# Try every name pattern that we have ..
for name in include_name:
# .. indicate that this item should be written if there is a match
if name in item_name:
return True
# .. if we are here, it means that we have not matched any name earlier, ..
# .. in which case, this item should not be included in the output.
return False
# ################################################################################################################################
def _preprocess_item_attrs_during_export(
self,
attr_key, # type: str
item_type, # type: str
item, # type: strdict
) -> 'strdict':
# Check if there is an explicit list of include attributes to return for the type ..
attr_list_include = ModuleCtx.Enmasse_Attr_List_Include.get(attr_key) or []
# .. as above, for attributes that are explicitly configured to be excluded ..
attr_list_exclude = ModuleCtx.Enmasse_Attr_List_Exclude.get(attr_key) or []
# .. as above, for attributes that need to be renamed ..
attr_list_rename = ModuleCtx.Enmasse_Attr_List_Rename.get(attr_key) or {}
# .. as above, for values that need to be renamed ..
attr_list_value_rename = ModuleCtx.Enmasse_Attr_List_Value_Rename.get(attr_key) or {}
# .. as above, for attributes that need to be turned into a list ..
attr_list_as_list = ModuleCtx.Enmasse_Attr_List_As_List.get(attr_key) or []
# .. as above, for attributes that should be skipped if they are empty ..
attr_list_as_multiline = ModuleCtx.Enmasse_Attr_List_As_Multiline.get(attr_key) or []
# .. as above, for attributes that should be always skipped ..
attr_list_skip_always = ModuleCtx.Enmasse_Attr_List_Skip_Always.get(attr_key) or []
# .. as above, for attributes that should be skipped if they are empty ..
attr_list_skip_if_empty = ModuleCtx.Enmasse_Attr_List_Skip_If_Empty.get(attr_key) or []
# .. as above, for attributes that should be skipped if their value is True ..
attr_list_skip_if_true = ModuleCtx.Enmasse_Attr_List_Skip_If_True.get(attr_key) or []
# .. as above, for attributes that should be skipped if their value is False ..
attr_list_skip_if_false = ModuleCtx.Enmasse_Attr_List_Skip_If_False.get(attr_key) or []
# .. as above, for attributes that should be skipped if they have a specific value ..
attr_list_skip_if_value_matches = ModuleCtx.Enmasse_Attr_List_Skip_If_Value_Matches.get(attr_key) or {}
# .. as above, for attributes that should be skipped if other values match ..
attr_list_skip_if_other_value_matches = ModuleCtx.Enmasse_Attr_List_Skip_If_Other_Value_Matches.get(attr_key) or {}
# .. to make sure the dictionary does not change during iteration ..
item_copy = deepcopy(item)
# .. we enter here if there is anything to be explicitly processed ..
if attr_list_include or attr_list_exclude:
# .. go through everything that we have ..
for attr in item_copy:
# .. remove from the item that we are returning any attr that is not to be included
if attr_list_include:
if attr not in attr_list_include:
_ = item.pop(attr, None)
# .. remove any attribute that is explictly configured to be excluded ..
if attr_list_exclude:
if attr in attr_list_exclude:
_ = item.pop(attr, None)
# .. optionally, rename selected attributes ..
for old_name, new_name in attr_list_rename.items():
if value := item.pop(old_name, NotGiven):
if value is not NotGiven:
item[new_name] = value
# .. optionally, rename selected values ..
for attr_name, value_map_list in attr_list_value_rename.items():
for value_map in value_map_list:
if value := item.get(attr_name, NotGiven):
if value is not NotGiven:
if value in value_map:
new_value = value_map[value]
item[attr_name] = new_value
# .. optionally, turn selected attributes into lists ..
for attr in attr_list_as_list:
if value := item.pop(attr, NotGiven):
if value is not NotGiven:
if isinstance(value, str):
value = value.splitlines()
value.sort()
item[attr] = value
# .. optionally, turn selected attributes into multi-line string objects ..
for attr in attr_list_as_multiline:
if value := item.pop(attr, NotGiven):
if value is not NotGiven:
if isinstance(value, str):
value = value.splitlines()
value = '\n'.join(value)
item[attr] = value
# .. optionally, certain attributes will be always skipped ..
for attr in attr_list_skip_always:
_ = item.pop(attr, NotGiven)
# .. optionally, skip empty attributes ..
for attr in attr_list_skip_if_empty:
if value := item.pop(attr, NotGiven):
if value is not NotGiven:
if value:
item[attr] = value
# .. optionally, skip True attributes ..
for attr in attr_list_skip_if_true:
if value := item.pop(attr, NotGiven):
if value is not True:
if value:
item[attr] = value
# .. optionally, skip False attributes ..
for attr in attr_list_skip_if_false:
if value := item.pop(attr, NotGiven):
if value is not False:
if value:
item[attr] = value
# .. optionally, skip attributes that match configuration ..
for pattern_key, pattern_value in attr_list_skip_if_value_matches.items():
if value := item.pop(pattern_key, NotGiven): # type: ignore
if value != pattern_value:
item[pattern_key] = value
# .. optionally, skip attributes if other attributes have a specific value ..
if attr_list_skip_if_other_value_matches:
for config_dict in attr_list_skip_if_other_value_matches:
criteria = config_dict['criteria']
attrs_to_skip = config_dict['attrs']
for criterion in criteria:
for criterion_key, criterion_value in criterion.items():
item_value = item.get(criterion_key, NotGiven)
if item_value is not NotGiven:
if item_value == criterion_value:
for attr in attrs_to_skip:
_ = item.pop(attr, None)
# .. ID's are never returned ..
_ = item.pop('id', None)
# .. service ID's are never returned ..
_ = item.pop('service_id', None)
# .. the is_active flag is never returned if it has its default value, which is True ..
if item.get('is_active') is True:
_ = item.pop('is_active', None)
# .. names of security definitions attached to an object are also skipped if they are the default ones ..
if item.get('security_name') in ('', None):
_ = item.pop('security_name', None)
# .. the data format of REST objects defaults to JSON which is why we do not return it, unless it is different ..
if item_type in {'channel_plain_http', 'outconn_plain_http', 'zato_generic_rest_wrapper'}:
if item.get('data_format') == Data_Format.JSON:
_ = item.pop('data_format', None)
return item
# ################################################################################################################################
def _sort_item_attrs(
self,
attr_key, # type: str
item, # type: strdict
) -> 'strdict':
# stdlib
from collections import OrderedDict
# Turn the item into an object whose attributes can be sorted ..
item = OrderedDict(item)
# .. go through each of the attribute in the order of preference, assuming that we have any matching one ..
# .. it needs to be reversed because we are pushing each such attribute to the front, as in a stack ..
for attr in reversed(ModuleCtx.Enmasse_Attr_List_Sort_Order.get(attr_key) or []):
if attr in item:
item.move_to_end(attr, last=False)
return item
# ################################################################################################################################
def _should_write_to_output(
self,
item_type, # type: str
item, # type: strdict
include_type, # type: strlist
include_name, # type: strlist
) -> 'bool':
# Type hints
name:'str'
# Local aliases
if item_type == 'pubsub_subscription':
name = item['endpoint_name']
else:
name = item['name']
# By default, assume this item should be written to ouput unless we contradict it below ..
out:'bool' = True
# We will make use of input includes only if we are not to export all of them
has_all_types = ModuleCtx.Include_Type.All in include_type
has_all_names = ModuleCtx.Include_Type.All in include_name
has_type = not has_all_types
has_name = not has_all_names
# Certain internal objects should never be exported ..
if item_type == 'def_sec':
# .. do not write RBAC definitions ..
if 'rbac' in item['type']:
return False
# .. do not write internal definitions ..
if has_name_zato_prefix(name):
return False
# We enter this branch if we are to export specific types ..
if not has_all_types:
out_by_type = self._should_write_type_to_output(item_type, item, include_type)
else:
out_by_type = False
# We enter this branch if we are to export objects of specific names ..
if not has_all_names:
item_name = item.get('name') or ''
item_name = item_name.lower()
out_by_name = self._should_write_name_to_output(item_type, item_name, include_name)
else:
out_by_name = False
# We enter here if we have both type and name on input, which means that we need to and-join them ..
if has_type and has_name:
out = out_by_type and out_by_name
else:
if has_type:
out = out_by_type
elif has_name:
out = out_by_name
# .. we are ready to return our output
return out
# ################################################################################################################################
def _rewrite_pubsub_subscriptions_during_export(self, subs:'dictlist') -> 'dictlist':
# Bunch
from bunch import Bunch, bunchify
# Our response to produce
out:'dictlist' = []
# Local variables
subs_by_key:'anydict' = {}
#
# We are going to group subscriptions by these keys
#
# - Endpoint name
# - Delivery method
# - REST method
# - REST connection
# - Delivery server
#
for sub in subs:
# .. for dot attribute access ..
sub = Bunch(sub)
# .. build a composite key to later add topics to it ..
key:'any_' = (sub.endpoint_name, sub.delivery_method, sub.rest_method, sub.rest_connection, sub.delivery_server)
# .. add the key if it does not already exist ..
if key not in subs_by_key:
subs_by_key[key] = bunchify({
'endpoint_name': sub.get('endpoint_name'),
'endpoint_type': sub.get('endpoint_type'),
'delivery_method': sub.get('delivery_method'),
'rest_method': sub.get('rest_method'),
'rest_connection': sub.get('rest_connection'),
'service_name': sub.get('service_name'),
'delivery_server': sub.get('delivery_server'),
'topic_list': []
})
# .. at this point, we know we have a list for this key so we can access it ..
subs_by_key_dict = subs_by_key[key]
# .. and append the current topic to what we are building for the caller ..
subs_by_key_dict.topic_list.append(sub.topic_name)
# .. now, we can discard the keys and we will be left with subscriptions alone ..
for idx, sub_info in enumerate(subs_by_key.values(), 1):
# .. return topics sorted alphabetically ..
sub_info.topic_list.sort()
# .. make sure we are returning a dict ..
sub_info = sub_info.toDict()
# .. each subscription needs a name ..
sub_info['name'] = 'Subscription.' + str(idx).zfill(9)
# .. append it for our caller ..
out.append(sub_info)
# .. now, we can return the result
return out
# ################################################################################################################################
def write_output(
self,
output_path, # type: strnone
include_type, # type: strlist
include_name, # type: strlist
) -> 'None':
# stdlib
import os
import re
from datetime import datetime
from collections import OrderedDict
# Bunch
from zato.bunch import debunchify
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
# Local aliases
to_write:'strdict' = {}
# Make a copy and remove Bunch; pyaml does not like Bunch instances.
output:'strdict' = debunchify(self.json)
output = deepcopy(output)
# Preserve old format by splitting out particular types of http-soap.
for item in output.pop('http_soap', []):
for item_type, connection, transport in HTTP_SOAP_KINDS:
if item['connection'] == connection and item['transport'] == transport:
output.setdefault(item_type, []).append(item)
# Preserve old format by wrapping security services into one key.
output['def_sec'] = []
for service_info in SERVICES:
if service_info.is_security:
output['def_sec'].extend(
dict(item, type=service_info.name)
for item in output.pop(service_info.name, [])
)
# .. rewrite pub/sub subscriptions in a way that is easier to handle ..
if subs := output.get('pubsub_subscription'):
output['pubsub_subscription'] = self._rewrite_pubsub_subscriptions_during_export(subs)
# .. go through everything that we collected in earlier steps in the process ..
for item_type, items in iteritems(output): # type: ignore
# .. reusable ..
is_generic_connection = item_type == 'zato_generic_connection'
# .. add type hints ..
items = cast_('dictlist', items)
# .. this is a new list of items to write ..
# .. based on the list from output ..
to_write_items:'dictlist' = []
# .. now, go through each item in the original output ..
for item in items:
# .. add type hints ..
item = cast_('strdict', item)
item = deepcopy(item)
# .. normalize attributes ..
normalize_service_name(item)
# .. make sure we want to write this item on output ..
if not self._should_write_to_output(item_type, item, include_type, include_name):
continue
# .. this is required because generic connections are differentiated ..
# .. by their embedded 'type_' attribute, rather than by item_type itself ..
if is_generic_connection:
wrapper_type = item['type_']
attr_key = f'{item_type}_{wrapper_type}'
# .. make sure to filter out include types embedded in generic connections, ..
# .. but first, confirm if we are not to return all the types ..
if not ModuleCtx.Include_Type.All in include_type:
# .. if we are here, it means that we need to check the actual include type ..
# .. which will be equal to the generic connection's wrapper type ..
if not wrapper_type in include_type:
continue
else:
attr_key = item_type
# .. this will rename or remove any attributes from this item that we do not need ..
item = self._preprocess_item_attrs_during_export(attr_key, item_type, item)
# .. sort the attributes in the order we want them to appear in the outpur file ..
item = self._sort_item_attrs(attr_key, item)
# .. if we are here, it means that we want to include this item on output ..
to_write_items.append(item)
# .. sort item lists to be written ..
to_write_items.sort(key=lambda item: item.get('name', '').lower())
# .. now, append this new list to what is to be written ..
# .. but only if there is anything to be written for that type ..
if to_write_items:
to_write[item_type] = to_write_items
# .. replace non-generic connection item type names ..
for old_name, new_name in ModuleCtx.Enmasse_Item_Type_Name_Map.items():
value = to_write.pop(old_name, None)
if value:
to_write[new_name] = value
# .. now, replace generic connection types which are more involved ..
new_names:'any_' = {
'outgoing_ldap': [],
'outgoing_wsx': [],
}
for old_name, value_list in to_write.items():
value_list = cast_('anylist', value_list)
if old_name == 'zato_generic_connection':
for idx, value in enumerate(value_list):
if wrapper_type := value.get('type_'):
attr_key = f'{old_name}_{wrapper_type}'
if new_name := ModuleCtx.Enmasse_Item_Type_Name_Map.get(attr_key):
_ = value.pop('type_')
new_names[new_name].append(value)
value_list.pop(idx)
# .. append the new names extracted from generic connections to what we need to write ..
for new_name, value_list in new_names.items():
if value_list:
to_write[new_name] = value_list
# .. if there are no generic connections left at this point, this key can be deleted ..
if not to_write.get('zato_generic_connection'):
_ = to_write.pop('zato_generic_connection', None)
# .. this lets us move individual keys around ..
to_write = OrderedDict(to_write)
# .. certain keys should be stored in a specific order at the head of the output ..
key_order = reversed([
'security_groups',
'security',
'channel_rest',
'outgoing_rest',
])
# .. do move the keys now, in the order specified above ..
for key in key_order:
if key in to_write:
to_write.move_to_end(key, last=False)
# .. if we have the name of a file to use, do use it ..
if output_path:
name = output_path
# .. otherwise, use a new file ..
else:
now = datetime.now().isoformat() # Not in UTC, we want to use user's TZ
name = 'zato-export-{}{}'.format(re.sub('[.:]', '_', now), self.codec.extension)
with open(os.path.join(self.curdir, name), 'w') as f:
self.codec.dump(f, to_write)
self.logger.info('Data exported to {}'.format(f.name))
# ################################################################################################################################
def get_warnings_errors(self, items): # type: ignore
warn_idx = 1
error_idx = 1
warn_err = {}
for item in items: # type: ignore
for warning in item.warnings: # type: ignore
warn_err['warn{:04}/{} {}'.format(warn_idx, warning.code.symbol, warning.code.desc)] = warning.value
warn_idx += 1
for error in item.errors: # type: ignore
warn_err['err{:04}/{} {}'.format(error_idx, error.code.symbol, error.code.desc)] = error.value
error_idx += 1
warn_no = warn_idx-1
error_no = error_idx-1
return warn_err, warn_no, error_no # type: ignore
# ################################################################################################################################
def report_warnings_errors(self, items): # type: ignore
# stdlib
import logging
warn_err, warn_no, error_no = self.get_warnings_errors(items)
table = self.get_table(warn_err)
warn_plural = '' if warn_no == 1 else 's' # type: ignore
error_plural = '' if error_no == 1 else 's' # type: ignore
if warn_no or error_no:
if error_no:
level = logging.ERROR
else:
level = logging.WARN
prefix = '{} warning{} and {} error{} found:\n'.format(warn_no, warn_plural, error_no, error_plural)
self.logger.log(level, prefix + table.draw()) # type: ignore
else:
# A signal that we found no warnings nor errors
return True
# ################################################################################################################################
def get_table(self, out): # type: ignore
# texttable
import texttable
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
cols_width = self.args.cols_width if getattr(self.args, 'cols_width', None) else DEFAULT_COLS_WIDTH
cols_width = (elem.strip() for elem in cols_width.split(','))
cols_width = [int(elem) for elem in cols_width]
table = texttable.Texttable()
_ = table.set_cols_width(cols_width)
# Use text ('t') instead of auto so that boolean values don't get converted into ints
_ = table.set_cols_dtype(['t', 't'])
rows = [['Key', 'Value']]
rows.extend(sorted(iteritems(out)))
_ = table.add_rows(rows)
return table
# ################################################################################################################################
def merge_odb_json(self):
# stdlib
import copy
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
results = Results()
merged = copy.deepcopy(self.object_mgr.objects)
for json_key, json_elems in iteritems(self.json): # type: ignore
if 'http' in json_key or 'soap' in json_key:
odb_key = 'http_soap'
else:
odb_key:'any_' = json_key
if odb_key not in merged:
sorted_merged:'any_' = sorted(merged)
raw:'any_' = (json_key, odb_key, sorted_merged)
results.add_error(raw, ERROR_INVALID_KEY, "JSON key '{}' not one of '{}'", odb_key, sorted_merged)
else:
for json_elem in json_elems: # type: ignore
if 'http' in json_key or 'soap' in json_key:
connection, transport = json_key.split('_', 1)
connection = 'outgoing' if connection == 'outconn' else connection
for odb_elem in merged.http_soap: # type: ignore
if odb_elem.get('transport') == transport and odb_elem.get('connection') == connection:
if odb_elem.name == json_elem.name:
merged.http_soap.remove(odb_elem)
else:
for odb_elem in merged[odb_key]: # type: ignore
if odb_elem.name == json_elem.name:
merged[odb_key].remove(odb_elem)
merged[odb_key].append(json_elem)
if results.ok:
self.json = merged
return results
# ################################################################################################################################
def export(self):
# Find any definitions that are missing
dep_scanner = DependencyScanner(self.json, self.is_import, self.is_export, ignore_missing=self.args.ignore_missing_defs)
missing_defs = dep_scanner.scan()
if not missing_defs.ok:
self.logger.error('Failed to find all definitions needed')
return [missing_defs]
# Validate if every required input element has been specified.
results = InputValidator(self.json).validate()
if not results.ok:
self.logger.error('Required elements missing')
return [results]
return [] # type: ignore
# ################################################################################################################################
def export_local_odb(self, needs_local=True): # type: ignore
self.object_mgr.refresh()
self.logger.info('ODB objects read')
results = self.merge_odb_json()
if not results.ok:
return [results]
self.logger.info('ODB objects merged in')
return self.export()
# ################################################################################################################################
def run_odb_export(self):
return self.export_local_odb(False)
# ################################################################################################################################
def _get_missing_objects(self, warnings_errors:'list_[Results]') -> 'strlist':
# Our response to produce
out:'strlist' = []
for item in warnings_errors:
for elem in chain(item.warnings, item.errors): # type: ignore
elem = cast_('Notice', elem)
if elem.code == ERROR_SERVICE_MISSING:
enmasse_elem:'stranydict' = elem.value_raw[1]
service_name = enmasse_elem['service']
out.append(service_name)
# Return everything we have found to our caller, sorted alphabetically
return sorted(out)
# ################################################################################################################################
def run_import(self) -> 'anylist':
# Local variables
start_time = datetime.utcnow()
wait_until = start_time + timedelta(seconds=self.missing_wait_time)
# Run the initial import ..
warnings_errors = self._run_import()
while warnings_errors:
# Loop variables
now = datetime.utcnow()
# .. if we have already waited enough, we can already return ..
if now > wait_until:
return warnings_errors
# .. if there is anything that we need to wait for, ..
# .. such as missing services, we will keep running ..
missing = self._get_missing_objects(warnings_errors)
# .. if nothing is missing, we can return as well ..
if not missing:
return warnings_errors
# .. for reporting purposes, get information on how much longer are to wait ..
wait_delta = wait_until - now
# .. report what we are waiting for ..
msg = f'Enmasse waiting; timeout -> {wait_delta}; missing -> {missing}'
self.logger.info(msg)
# .. do wait now ..
sleep(2)
# .. re-run the import ..
warnings_errors = self._run_import()
# .. either we run out of time or we have succeed, in either case, we can return.
return warnings_errors
# ################################################################################################################################
def _run_import(self) -> 'anylist':
# Make sure we have the latest state of information ..
self.object_mgr.refresh()
# .. build an object that will import the definitions ..
importer = ObjectImporter(self.client, self.logger, self.object_mgr, self.json, # type: ignore
self.is_import, self.is_export, ignore_missing=self.args.ignore_missing_defs, args=self.args)
# .. find channels and jobs that require services that do not exist ..
results = importer.validate_import_data()
if not results.ok:
return [results]
already_existing = importer.find_already_existing_odb_objects()
if not already_existing.ok and not self.replace_objects:
return [already_existing]
results = importer.import_objects(already_existing)
if not results.ok:
return [results]
return []
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
import sys
# Bunch
from bunch import Bunch
args = Bunch()
args.verbose = True
args.store_log = False
args.store_config = False
args.format = 'yaml'
args.export_local = False
args.export_odb = False
args.clean_odb = False
args.ignore_missing_defs = False
args.output = None
args.rbac_sleep = 1
# args['replace'] = True
# args['import'] = True
args['export'] = True
args.path = sys.argv[1]
args.input = sys.argv[2] if 'import' in args else ''
enmasse = Enmasse(args)
enmasse.run(args)
| 157,720
|
Python
|
.py
| 3,138
| 39.24283
| 180
| 0.503322
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,447
|
create_scheduler.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_scheduler.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from copy import deepcopy
from dataclasses import dataclass
# Bunch
from bunch import Bunch
# Zato
from zato.cli import common_odb_opts, common_scheduler_server_address_opts, common_scheduler_server_api_client_opts, \
sql_conf_contents, ZatoCommand
from zato.common.api import SCHEDULER
from zato.common.const import ServiceConst
from zato.common.crypto.api import SchedulerCryptoManager
from zato.common.crypto.const import well_known_data
from zato.common.odb.model import Cluster
from zato.common.scheduler import startup_jobs
from zato.common.util.config import get_scheduler_api_client_for_server_auth_required, \
get_scheduler_api_client_for_server_password, get_scheduler_api_client_for_server_username
from zato.common.util.open_ import open_w
from zato.common.util.platform_ import is_linux
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import any_, anydict, strdict
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
config_template = """[bind]
host={scheduler_bind_host}
port={scheduler_bind_port}
[cluster]
id=1
stats_enabled=False
[server]
server_path={server_path}
server_host={server_host}
server_port={server_port}
server_username={server_username}
server_password={server_password}
server_use_tls={server_use_tls}
server_tls_verify=False
server_prefer_odb_config={server_prefer_odb_config}
[misc]
initial_sleep_time={initial_sleep_time}
[odb]
engine={odb_engine}
db_name={odb_db_name}
host={odb_host}
port={odb_port}
username={odb_username}
password={odb_password}
pool_size=1
extra=
use_async_driver=True
is_active=True
[secret_keys]
key1={secret_key1}
[crypto]
well_known_data={well_known_data}
use_tls={tls_use}
tls_version={tls_version}
tls_ciphers={tls_ciphers}
tls_client_certs={tls_client_certs}
priv_key_location={tls_priv_key_location}
pub_key_location={tls_pub_key_location}
cert_location={tls_cert_location}
ca_certs_location={tls_ca_certs_location}
[api_clients]
auth_required={scheduler_api_client_for_server_auth_required}
{scheduler_api_client_for_server_username}={scheduler_api_client_for_server_password}
[command_pause]
[command_resume]
[command_set_server]
"""
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class ServerConfigForScheduler:
server_host: 'str'
server_port: 'int'
server_path: 'str'
server_use_tls: 'bool'
is_auth_from_server_required: 'bool'
class api_client:
class from_server_to_scheduler:
username: 'str'
password: 'str'
class from_scheduler_to_server:
username: 'str' = ServiceConst.API_Admin_Invoke_Username
password: 'str'
# ################################################################################################################################
# ################################################################################################################################
class Create(ZatoCommand):
""" Creates a new scheduler instance.
"""
needs_empty_dir = True
# Redis options are no longer used by they are kept here for pre-3.2 backward compatibility
opts:'any_' = deepcopy(common_odb_opts)
opts.append({'name':'--pub-key-path', 'help':'Path to scheduler\'s public key in PEM'})
opts.append({'name':'--priv-key-path', 'help':'Path to scheduler\'s private key in PEM'})
opts.append({'name':'--cert-path', 'help':'Path to the admin\'s certificate in PEM'})
opts.append({'name':'--ca-certs-path', 'help':'Path to a bundle of CA certificates to be trusted'})
opts.append({'name':'--cluster-name', 'help':'Name of the cluster this scheduler will belong to'})
opts.append({'name':'--cluster-id', 'help':'ID of the cluster this scheduler will belong to'})
opts.append({'name':'--secret-key', 'help':'Scheduler\'s secret crypto key'})
opts.append({'name':'--server-path', 'help':'Local path to a Zato server'})
opts.append({'name':'--server-host', 'help':'Deprecated. Use --server-address-for-scheduler instead.'})
opts.append({'name':'--server-port', 'help':'Deprecated. Use --server-address-for-scheduler instead.'})
opts.append({'name':'--server-username', 'help':'Deprecated. Use --server-api-client-for-scheduler-username'})
opts.append({'name':'--server-password', 'help':'Deprecated. Use --server-api-client-for-scheduler-password'})
opts.append({'name':'--bind-host', 'help':'Local address to start the scheduler on'})
opts.append({'name':'--bind-port', 'help':'Local TCP port to start the scheduler on'})
opts.append({'name':'--tls-enabled', 'help':'Whether the scheduler should use TLS'})
opts.append({'name':'--tls-version', 'help':'What TLS version to use'})
opts.append({'name':'--tls-ciphers', 'help':'What TLS ciphers to use'})
opts.append({'name':'--tls-client-certs', 'help':'Whether TLS client certificates are required or optional'})
opts.append({'name':'--tls-priv-key-location', 'help':'Scheduler\'s private key location'})
opts.append({'name':'--tls-pub-key-location', 'help':'Scheduler\'s public key location'})
opts.append({'name':'--tls-cert', 'help':'Scheduler\'s certificate location'})
opts.append({'name':'--tls-ca-certs', 'help':'Scheduler\'s CA certificates location'})
opts.append({'name':'--initial-sleep-time', 'help':'How many seconds to sleep initially when the scheduler starts'})
opts += deepcopy(common_scheduler_server_address_opts)
opts += deepcopy(common_scheduler_server_api_client_opts)
# ################################################################################################################################
def __init__(self, args:'any_') -> 'None':
self.target_dir = os.path.abspath(args.path)
super(Create, self).__init__(args)
# ################################################################################################################################
def allow_empty_secrets(self):
return True
# ################################################################################################################################
def _get_cluster_id(self, args:'any_') -> 'any_':
engine = self._get_engine(args)
session = self._get_session(engine) # type: ignore
cluster_id_list = session.query(Cluster.id).all() # type: ignore
if not cluster_id_list:
raise Exception('No cluster found in `{}`'.format(args))
else:
cluster_id_list.sort()
return cluster_id_list[0][0] # type: ignore
# ################################################################################################################################
def _get_server_admin_invoke_credentials(self, cm:'SchedulerCryptoManager', odb_config:'anydict') -> 'any_':
# Zato
from zato.common.util.api import get_server_client_auth
_config = Bunch()
_config_odb = Bunch()
_config.odb = _config_odb
_config_odb.engine = odb_config['odb_engine']
_config_odb.username = odb_config['odb_username']
_config_odb.password = odb_config['odb_password']
_config_odb.host = odb_config['odb_host']
_config_odb.port = odb_config['odb_port']
_config_odb.db_name = odb_config['odb_db_name']
server_username, server_password = get_server_client_auth(_config, None, cm, True)
return server_username, server_password
# ################################################################################################################################
def _get_server_config(self, args:'any_', cm:'SchedulerCryptoManager', odb_config:'strdict') -> 'ServerConfigForScheduler':
# Our response to produce
out = ServerConfigForScheduler()
server_path = self.get_arg('server_path') or ''
server_host = self.get_arg('server_host', '127.0.0.1')
server_port = self.get_arg('server_port', 17010)
scheduler_api_client_for_server_auth_required = get_scheduler_api_client_for_server_auth_required(args)
scheduler_api_client_for_server_username = get_scheduler_api_client_for_server_username(args)
scheduler_api_client_for_server_password = get_scheduler_api_client_for_server_password(args, cm)
out.server_path = server_path
out.server_host = server_host
out.server_port = server_port
out.is_auth_from_server_required = scheduler_api_client_for_server_auth_required # type: ignore
out.api_client.from_server_to_scheduler.username = scheduler_api_client_for_server_username
out.api_client.from_server_to_scheduler.password = scheduler_api_client_for_server_password
# Handle both ..
server_password = self.get_arg('server_password')
server_api_client_for_scheduler_password = self.get_arg('server_api_client_for_scheduler_password')
# .. but prefer the latter ..
server_api_client_for_scheduler_password = server_password or server_api_client_for_scheduler_password
# .. it still may be empty ..
if not server_api_client_for_scheduler_password:
# .. in which case, we look it up in the database ..
_, server_api_client_for_scheduler_password = self._get_server_admin_invoke_credentials(cm, odb_config)
# .. note that the username is always the same and we only set the password
out.api_client.from_scheduler_to_server.password = server_api_client_for_scheduler_password
# Extract basic information about the scheduler the server will be invoking ..
server_use_tls, server_host, server_port = self._extract_address_data(
args,
'server_address_for_scheduler',
'server_host',
'server_port',
'127.0.0.1',
17010,
)
out.server_use_tls = server_use_tls
out.server_host = server_host
out.server_port = server_port
# .. finally, return the response to our caller.
return out
# ################################################################################################################################
def execute(self, args:'Namespace', show_output:'bool'=True, needs_created_flag:'bool'=False):
# Zato
from zato.common.util.logging_ import get_logging_conf_contents
# Navigate to the directory that the component will be created in.
os.chdir(self.target_dir)
repo_dir = os.path.join(self.target_dir, 'config', 'repo')
conf_path = os.path.join(repo_dir, 'scheduler.conf')
startup_jobs_conf_path = os.path.join(repo_dir, 'startup_jobs.conf')
sql_conf_path = os.path.join(repo_dir, 'sql.conf')
os.mkdir(os.path.join(self.target_dir, 'logs'))
os.mkdir(os.path.join(self.target_dir, 'config'))
os.mkdir(repo_dir)
self.copy_scheduler_crypto(repo_dir, args)
if hasattr(args, 'get'):
secret_key = args.get('secret_key')
else:
secret_key = args.secret_key
secret_key = secret_key or SchedulerCryptoManager.generate_key()
cm = SchedulerCryptoManager.from_secret_key(secret_key)
odb_engine=args.odb_type
if odb_engine.startswith('postgresql'):
odb_engine = 'postgresql+pg8000'
# There will be always one cluster in the database.
cluster_id = self._get_cluster_id(args)
# We need to have a reference to it before we encrypt it later on.
odb_password = args.odb_password or ''
odb_password = odb_password.encode('utf8')
odb_password = cm.encrypt(odb_password, needs_str=True)
# Collect ODB configuration in one place as it will be reusable further below.
odb_config:'strdict' = {
'odb_engine': odb_engine,
'odb_password': odb_password,
'odb_db_name': args.odb_db_name or args.sqlite_path,
'odb_host': args.odb_host or '',
'odb_port': args.odb_port or '',
'odb_username': args.odb_user or '',
}
server_config = self._get_server_config(args, cm, odb_config)
initial_sleep_time = self.get_arg('initial_sleep_time', SCHEDULER.InitialSleepTime)
scheduler_bind_host = self.get_arg('bind_host', SCHEDULER.DefaultBindHost)
scheduler_bind_port = self.get_arg('bind_port', SCHEDULER.DefaultBindPort)
zato_well_known_data = well_known_data.encode('utf8')
zato_well_known_data = cm.encrypt(zato_well_known_data, needs_str=True)
if is_linux:
tls_version = SCHEDULER.TLS_Version_Default_Linux
tls_ciphers = SCHEDULER.TLS_Ciphers_13
else:
tls_version = SCHEDULER.TLS_Version_Default_Windows
tls_ciphers = SCHEDULER.TLS_Ciphers_12
tls_use = self.get_arg('tls_enabled', SCHEDULER.TLS_Enabled)
tls_client_certs = self.get_arg('tls_client_certs', SCHEDULER.TLS_Client_Certs)
priv_key_location = self.get_arg('priv_key_location', SCHEDULER.TLS_Private_Key_Location)
pub_key_location = self.get_arg('pub_key_location', SCHEDULER.TLS_Public_Key_Location)
cert_location = self.get_arg('cert_location', SCHEDULER.TLS_Cert_Location)
ca_certs_location = self.get_arg('ca_certs_location', SCHEDULER.TLS_CA_Certs_Key_Location)
tls_version = self.get_arg('tls_version', tls_version)
tls_ciphers = self.get_arg('tls_ciphers', tls_ciphers)
if isinstance(secret_key, (bytes, bytearray)):
secret_key = secret_key.decode('utf8')
# If a server address was provided on input, it means that we prefer direct communication ..
if self.get_arg('server_address_for_scheduler'):
server_prefer_odb_config = False
# .. otherwise, we look up the server connection details in ODB.
else:
server_prefer_odb_config = False # Set it to True for pre-3.2 backward compatibility
config:'strdict' = {
'scheduler_api_client_for_server_auth_required': server_config.is_auth_from_server_required,
'scheduler_api_client_for_server_username': server_config.api_client.from_server_to_scheduler.username,
'scheduler_api_client_for_server_password': server_config.api_client.from_server_to_scheduler.password,
'cluster_id': cluster_id,
'secret_key1': secret_key,
'well_known_data': zato_well_known_data,
'server_path': server_config.server_path,
'server_host': server_config.server_host,
'server_port': server_config.server_port,
'server_use_tls': server_config.server_use_tls,
'server_username': server_config.api_client.from_scheduler_to_server.username,
'server_password': server_config.api_client.from_scheduler_to_server.password,
'server_prefer_odb_config': server_prefer_odb_config,
'initial_sleep_time': initial_sleep_time,
'scheduler_bind_host': scheduler_bind_host,
'scheduler_bind_port': scheduler_bind_port,
'tls_use': tls_use,
'tls_version': tls_version,
'tls_ciphers': tls_ciphers,
'tls_client_certs': tls_client_certs,
'tls_priv_key_location': priv_key_location,
'tls_pub_key_location': pub_key_location,
'tls_cert_location': cert_location,
'tls_ca_certs_location': ca_certs_location,
}
config.update(odb_config)
logging_conf_contents = get_logging_conf_contents()
_ = open_w(os.path.join(repo_dir, 'logging.conf')).write(logging_conf_contents)
_ = open_w(conf_path).write(config_template.format(**config))
_ = open_w(startup_jobs_conf_path).write(startup_jobs)
_ = open_w(sql_conf_path).write(sql_conf_contents)
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.SCHEDULER.code)
if show_output:
if self.verbose:
msg = """Successfully created a scheduler instance.
You can start it with the 'zato start {path}' command.""".format(path=os.path.abspath(os.path.join(os.getcwd(), self.target_dir)))
self.logger.debug(msg)
else:
self.logger.info('OK')
# We return it only when told to explicitly so when the command runs from CLI
# it doesn't return a non-zero exit code.
if needs_created_flag:
return True
# ################################################################################################################################
# ################################################################################################################################
| 17,656
|
Python
|
.py
| 314
| 48.968153
| 134
| 0.590569
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,448
|
start.py
|
zatosource_zato/code/zato-cli/src/zato/cli/start.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from shutil import copy as shutil_copy
from zipfile import ZipFile
# Zato
from zato.cli import ManageCommand
from zato.common.util.file_system import get_tmp_path
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, anydict, callnone, dictnone, strdict
# During development, it is convenient to configure it here to catch information that should be logged
# even prior to setting up main loggers in each of components.
# stdlib
import logging
log_level = logging.INFO
log_format = '%(asctime)s - %(levelname)s - %(process)d:%(threadName)s - %(name)s:%(lineno)d - %(message)s'
logging.basicConfig(level=log_level, format=log_format)
# ################################################################################################################################
# ################################################################################################################################
stderr_sleep_fg = 0.9
stderr_sleep_bg = 1.2
# ################################################################################################################################
# ################################################################################################################################
class ModuleCtx:
Deploy_Dirs = {'code', 'config-server', 'config-user', 'enmasse', 'env', 'lib', 'pip'}
# ################################################################################################################################
# ################################################################################################################################
class Start(ManageCommand):
"""Starts a Zato component installed in the 'path'. The same command is used for starting servers, load-balancer and web admin instances. 'path' must point to a directory into which the given component has been installed. # noqa: E501
Examples:
- Assuming a Zato server has been installed in /opt/zato/server1, the command to start the server is 'zato start /opt/zato/server1'.
- If a load-balancer has been installed in /home/zato/lb1, the command to start it is 'zato start /home/zato/lb1'."""
opts = [
{'name':'--fg', 'help':'If given, the component will run in foreground', 'action':'store_true'},
{'name':'--deploy', 'help':'Resources to deploy', 'action':'store'},
{'name':'--sync-internal', 'help':"Whether to synchronize component's internal state with ODB", 'action':'store_true'},
{'name':'--secret-key', 'help':"Component's secret key", 'action':'store'},
{'name':'--env-file', 'help':'Path to a file with environment variables to use', 'action':'store'},
{'name':'--stop-after', 'help':'After how many seconds to stop all the Zato components in the system', 'action':'store'},
{'name':'--stderr-path', 'help':'Where to redirect stderr', 'action':'store'}
]
# ################################################################################################################################
def run_check_config(self) -> 'None':
# Bunch
from bunch import Bunch
# Zato
from zato.cli.check_config import CheckConfig
cc = CheckConfig(self.args)
cc.show_output = False
cc.execute(Bunch({
'path': '.',
'ensure_no_pidfile': True,
'check_server_port_available': True,
'stdin_data': self.stdin_data,
'secret_key': self.args.secret_key,
}))
# ################################################################################################################################
def delete_pidfile(self) -> 'None':
# stdlib
import os
# Zato
from zato.common.api import MISC
# Local aliases
path = None
try:
path = os.path.join(self.component_dir, MISC.PIDFILE)
os.remove(path)
except Exception as e:
self.logger.info('Pidfile `%s` could not be deleted `%s`', path, e)
# ################################################################################################################################
def check_pidfile(self, pidfile:'str'='') -> 'int':
# stdlib
import os
# Zato
from zato.common.api import MISC
pidfile = pidfile or os.path.join(self.config_dir, MISC.PIDFILE)
# If we have a pidfile of that name then we already have a running
# server, in which case we refrain from starting new processes now.
if os.path.exists(pidfile):
msg = 'Error - found pidfile `{}`'.format(pidfile)
self.logger.info(msg)
return self.SYS_ERROR.COMPONENT_ALREADY_RUNNING
# Returning None would have sufficed but let's be explicit.
return 0
# ################################################################################################################################
def start_component(
self,
py_path:'str',
name:'str',
program_dir:'str',
on_keyboard_interrupt:'callnone'=None,
*,
extra_options: 'dictnone' = None,
) -> 'int':
""" Starts a component in background or foreground, depending on the 'fg' flag.
"""
# Type hints
env_file:'str'
# Zato
from zato.common.util.proc import start_python_process
# We need for it to be an absolute path because the component
# may want to listen for changes to its contents.
if env_file := self.args.env_file: # type: ignore
if not os.path.abspath(env_file):
env_file = os.path.join(self.original_dir, env_file)
env_file = os.path.abspath(env_file)
else:
env_file = os.path.expanduser(env_file)
options:'strdict' = {
'sync_internal': self.args.sync_internal,
'secret_key': self.args.secret_key or '',
'stderr_path': self.args.stderr_path,
'env_file': env_file,
'stop_after': self.args.stop_after,
}
if extra_options:
options.update(extra_options)
exit_code = start_python_process(
name, self.args.fg, py_path, program_dir, on_keyboard_interrupt, self.SYS_ERROR.FAILED_TO_START, options,
stderr_path=self.args.stderr_path,
stdin_data=self.stdin_data)
if self.show_output:
if not self.args.fg and self.verbose:
self.logger.debug('Zato {} `{}` starting in background'.format(name, self.component_dir))
else:
# Print out the success message only if there is no specific exit code,
# meaning that it is neither 0 nor None.
if not exit_code:
self.logger.info('OK')
return exit_code
# ################################################################################################################################
def _handle_deploy_local_dir_impl(self, src_path:'str') -> 'anydict':
return {'deploy_auto_from':src_path}
# ################################################################################################################################
def _handle_deploy_local_dir(self, src_path:'str', *, delete_src_path:'bool'=False) -> 'dictnone':
# Local aliases
has_one_name = False
top_name_is_dir = False
top_name_path = 'zato-does-not-exist_handle_deploy_local_dir'
should_recurse = False
top_name_is_not_internal = False
# If there is only one directory inside the source path, we drill into it
# because this is where we expect to find our assets to deploy.
names = os.listdir(src_path)
has_one_name = len(names) == 1
if has_one_name:
top_name = names[0]
top_name_path = os.path.join(src_path, top_name)
top_name_is_dir = os.path.isdir(top_name_path)
top_name_is_not_internal = not (top_name in ModuleCtx.Deploy_Dirs)
should_recurse = top_name_is_dir and top_name_is_not_internal
# .. if we have a single top-level directory, we can recurse into that ..
if should_recurse:
return self._handle_deploy_local_dir(top_name_path)
else:
# If we are here, we have a leaf location to actually deploy from ..
return self._handle_deploy_local_dir_impl(src_path)
# ################################################################################################################################
def _handle_deploy_local_zip(self, src_path:'str', *, delete_src_path:'bool'=False) -> 'dictnone':
# Extract the file name for later use
zip_name = os.path.basename(src_path)
# This will be a new directory ..
tmp_path = get_tmp_path(body='deploy')
# .. do create it now ..
os.mkdir(tmp_path)
# .. move the archive to the new location ..
shutil_copy(src_path, tmp_path)
# .. get the zip file new location's full path ..
zip_file_path = os.path.join(tmp_path, zip_name)
# .. do extract it now ..
with ZipFile(zip_file_path) as zip_file:
# .. first, run a CRC test ..
result = zip_file.testzip()
if result:
raise ValueError(f'Zip contents CRC file error -> {result}')
# .. we can proceed with the extraction ..
zip_file.extractall(tmp_path)
# .. always delete the temporary zip file ..
os.remove(zip_file_path)
# .. optionally, delete the original, source file ..
if delete_src_path:
os.remove(src_path)
# .. we can now treat it as deployment from a local directory ..
return self._handle_deploy_local_dir(tmp_path)
# ################################################################################################################################
def _maybe_set_up_deploy(self) -> 'dictnone':
# Local aliases
env_from1 = os.environ.get('Zato_Deploy_From') or ''
env_from2 = os.environ.get('ZATO_DEPLOY_FROM') or ''
# Zato_Deploy_Auto_Path_To_Delete
# Zato_Deploy_Auto_Enmasse
# First goes the command line, then both of the environment variables
deploy:'str' = self.args.deploy or env_from1 or env_from2 or ''
# We have a resource to deploy ..
if deploy:
is_ssh = deploy.startswith('ssh://')
is_http = deploy.startswith('http://')
is_https = deploy.startswith('https//')
is_local = not (is_ssh or is_http or is_https)
# .. handle a local path ..
if is_local:
# .. this can be done upfront if it is a local path ..
deploy = os.path.expanduser(deploy)
# .. deploy local .zip archives ..
if deploy.endswith('.zip'):
# .. do handle the input now ..
return self._handle_deploy_local_zip(deploy)
# ################################################################################################################################
def _on_server(self, show_output:'bool'=True, *ignored:'any_') -> 'int':
# Potentially sets up the deployment of any assets given on input
extra_options = self._maybe_set_up_deploy()
# Check basic configuration
self.run_check_config()
# Start the server now
return self.start_component(
'zato.server.main',
'server',
self.component_dir,
self.delete_pidfile,
extra_options=extra_options
)
# ################################################################################################################################
def _on_lb(self, *ignored:'any_') -> 'None':
# stdlib
import os
import sys
# Zato
from zato.cli.stop import Stop
from zato.common.util.api import get_haproxy_agent_pidfile
self.run_check_config()
def stop_haproxy():
Stop(self.args).stop_haproxy(self.component_dir)
found_pidfile = self.check_pidfile()
if not found_pidfile:
found_agent_pidfile = self.check_pidfile(get_haproxy_agent_pidfile(self.component_dir))
if not found_agent_pidfile:
_ = self.start_component(
'zato.agent.load_balancer.main', 'load-balancer', os.path.join(self.config_dir, 'repo'), stop_haproxy)
return
# Will be returned if either of pidfiles was found
sys.exit(self.SYS_ERROR.FOUND_PIDFILE)
# ################################################################################################################################
def _on_web_admin(self, *ignored:'any_') -> 'None':
self.run_check_config()
_ = self.start_component('zato.admin.main', 'web-admin', '', self.delete_pidfile)
# ################################################################################################################################
def _on_scheduler(self, *ignored:'any_') -> 'None':
self.run_check_config()
_ = self.check_pidfile()
_ = self.start_component('zato.scheduler.main', 'scheduler', '', self.delete_pidfile)
# ################################################################################################################################
# ################################################################################################################################
| 14,107
|
Python
|
.py
| 258
| 45.554264
| 238
| 0.479162
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,449
|
ca_create_ca.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ca_create_ca.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os, uuid, tempfile
# Zato
from zato.cli import ca_defaults, default_ca_name, ZatoCommand
from zato.common.util.open_ import open_w
openssl_template = """
dir = {target_dir}
[ ca ]
default_ca = CA_default
[ CA_default ]
serial = {ca_serial}
database = {ca_certindex}
new_certs_dir = {target_dir_rel}
certificate = {ca_key}
private_key = {private_key}
default_days = 3650
default_md = sha1
preserve = no
email_in_dn = no
nameopt = default_ca
certopt = default_ca
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = supplied
commonName = supplied
[ req ]
default_bits = 2048
default_md = sha1
string_mask = nombstr
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
0.organizationName = Organization Name (company)
organizationalUnitName = Organization Unit Name (department, division)
localityName = Locality Name (city, district)
stateOrProvinceName = State or Province Name (full name)
countryName = Country Name (2 letter code)
countryName_min = 2
countryName_max = 2
commonName = Common Name (hostname, IP, or your name)
commonName_max = 64
0.organizationName_default = {organization}
organizationalUnitName_default = {organizational_unit}
localityName_default = {locality}
stateOrProvinceName_default = {state_or_province}
countryName_default = {country}
commonName_default = {common_name}
[ v3_ca ]
basicConstraints = CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always
[ v3_server ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
extendedKeyUsage = serverAuth
[ v3_client_server ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
extendedKeyUsage = serverAuth,clientAuth
"""
class Create(ZatoCommand):
"""Creates a new certificate authority
"""
opts = [
{'name':'--organization', 'help':'CA organization name (defaults to {organization})'.format(**ca_defaults)},
{'name':'--organizational-unit',
'help':'CA organizational unit name (defaults to {default})'.format(default=default_ca_name)},
{'name':'--locality', 'help':'CA locality name (defaults to {locality})'.format(**ca_defaults)},
{'name':'--state-or-province',
'help':'CA state or province name (defaults to {state_or_province})'.format(**ca_defaults)},
{'name':'--country', 'help':'CA country (defaults to {country})'.format(**ca_defaults)},
{'name':'--common-name', 'help':'CA common name (defaults to {default})'.format(default=default_ca_name)},
]
needs_empty_dir = True
def __init__(self, args):
super(Create, self).__init__(args)
self.target_dir = os.path.abspath(args.path)
def execute(self, args, show_output=True):
self.logger.info('Create CA execute')
# Prepare the directory layout
os.mkdir(os.path.join(self.target_dir, 'ca-material'))
open_w(os.path.join(self.target_dir, 'ca-material', 'ca-serial')).write('01')
open_w(os.path.join(self.target_dir, 'ca-material', 'ca-password')).write(uuid.uuid4().hex)
open_w(os.path.join(self.target_dir, 'ca-material', 'ca-certindex'))
open_w(os.path.join(self.target_dir, 'ca-material', 'ca-certindex.attr')).write('unique_subject = no')
open_w(os.path.join(self.target_dir, 'ca-material', 'openssl-template.conf')).write(openssl_template)
# Create the CA's cert and the private key
template_args = {}
for name in('organization', 'organizational_unit', 'locality', 'state_or_province', 'country'):
value = self._get_arg(args, name, ca_defaults[name])
template_args[name] = value
template_args['common_name'] = self._get_arg(args, 'common_name', default_ca_name)
template_args['target_dir'] = self.target_dir
template_args['ca_serial'] = '$dir/ca-material/ca-serial'
template_args['ca_certindex'] = '$dir/ca-material/ca-certindex'
template_args['target_dir_rel'] = '$dir'
template_args['ca_key'] = '$dir/ca-material/ca-cert.pem'
template_args['private_key'] = '$dir/ca-material/ca-key.pem'
import platform
system = platform.system()
is_windows = 'windows' in system.lower()
if is_windows:
template_args['target_dir'] = self.target_dir.replace('\\','/')
template_args['ca_serial'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-serial')).replace('\\','/')
template_args['ca_certindex'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-certindex')).replace('\\','/')
template_args['target_dir_rel'] = os.path.relpath(self.target_dir).replace('\\','/')
template_args['ca_key'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-cert.pem')).replace('\\','/')
template_args['private_key'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-key.pem')).replace('\\','/')
f = tempfile.NamedTemporaryFile(mode='w+')
f.write(openssl_template.format(**template_args))
f.flush()
ca_key = os.path.join(self.target_dir, 'ca-material', 'ca-key.pem')
ca_cert = os.path.join(self.target_dir, 'ca-material', 'ca-cert.pem')
ca_password = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-password'))
if is_windows:
ca_key = os.path.join(self.target_dir, 'ca-material', 'ca-key.pem').replace('\\','\\\\')
ca_cert = os.path.join(self.target_dir, 'ca-material', 'ca-cert.pem').replace('\\','\\\\')
ca_password = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-password')).replace('\\','\\\\')
cmd = """openssl req -batch -new -x509 -newkey rsa:2048 -extensions v3_ca -keyout \
{ca_key} -out {ca_cert} -days 3650 \
-config {config} -passout file:{ca_password}""".format(
config=f.name,
ca_key=ca_key,
ca_cert=ca_cert,
ca_password=ca_password
)
os.system(cmd)
f.close()
for name in('csr', 'cert', 'priv', 'pub'):
os.mkdir(os.path.join(self.target_dir, 'out-{}'.format(name)))
# Mark the directory being a Zato CA one.
open_w(os.path.join(self.target_dir, '.zato-ca-dir'))
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.CA.code)
if show_output:
if self.verbose:
msg = 'Successfully created a certificate authority in {path}'.format(
path=os.path.abspath(os.path.join(os.getcwd(), self.target_dir)))
self.logger.debug(msg)
else:
self.logger.info('OK')
| 7,836
|
Python
|
.py
| 149
| 45.946309
| 139
| 0.589157
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,450
|
_run_zato.py
|
zatosource_zato/code/zato-cli/src/zato/cli/_run_zato.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
import re
import sys
# Zato
from zato.cli.zato_command import main
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
# ################################################################################################################################
# ################################################################################################################################
| 898
|
Python
|
.py
| 17
| 49.705882
| 130
| 0.246277
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,451
|
ide.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ide.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
from contextlib import closing
# Zato
from zato.cli import ManageCommand
from zato.common.const import SECRETS
from zato.common.util.api import get_odb_session_from_server_dir
# ################################################################################################################################
# ################################################################################################################################
class SetIDEPassword(ManageCommand):
""" Sets password of the default API user used by IDEs to connect to Zato.
"""
opts = [
{'name':'--password', 'help':'Password to set for the IDE user', 'default':''},
{'name':'--skip-stdout', 'help':'Should the password be printed to studout', 'action':'store_true'},
]
def is_password_required(self):
return False
# ################################################################################################################################
def execute(self, args):
# stdlib
import sys
# Zato
from zato.common.api import IDEDeploy
from zato.common.crypto.api import CryptoManager, ServerCryptoManager
from zato.common.odb.model import HTTPBasicAuth
from zato.common.util.cli import CommandLineServiceInvoker
password = self.args.password or CryptoManager.generate_password()
password = password if isinstance(password, str) else password.decode('utf8')
password = password.replace('-', '').replace('_', '').replace('=', '')
path = args.path
path = os.path.expanduser(path)
path = os.path.abspath(path)
if not os.path.exists(path):
self.logger.warning('Path not found: %s', path)
sys.exit(self.SYS_ERROR.NOT_A_ZATO_SERVER)
encrypted = self._encrypt(ServerCryptoManager, self.args, password, False)
encrypted = SECRETS.PREFIX + encrypted.decode('utf8')
session = None
security_id = None
# Obtain an SQL session to the configuration database ..
with closing(get_odb_session_from_server_dir(args.path)) as session:
# .. get our IDE invoker's security definition ..
security = session.query(HTTPBasicAuth).\
filter(HTTPBasicAuth.username == IDEDeploy.Username).\
one() # type: HTTPBasicAuth
# .. extract the ID needed to change the password ..
security_id = security.id
# .. and invoke the service that changes the password ..
invoker = CommandLineServiceInvoker(check_stdout=False, server_location=path)
invoker.invoke('zato.security.basic-auth.change-password', {
'id': security_id,
'password1': password,
'password2': password,
})
if not args.skip_stdout:
sys.stdout.write(password)
sys.stdout.write('\n')
sys.stdout.flush()
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# Zato
from zato.cli.util import run_cli_command
run_cli_command(SetIDEPassword, {
'password': None,
'skip_stdout': False
})
# ################################################################################################################################
# ################################################################################################################################
| 3,836
|
Python
|
.py
| 75
| 43.72
| 130
| 0.485408
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,452
|
ca_create_scheduler.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ca_create_scheduler.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import CACreateCommand, common_ca_create_opts
class Create(CACreateCommand):
""" Creates crypto material for a Zato scheduler.
"""
opts = [
{'name':'cluster_name', 'help':'Cluster name'},
{'name':'scheduler_name', 'help':'Scheduler name'},
{'name':'--organizational-unit', 'help':'Organizational unit name (defaults to cluster_name:scheduler_name)'},
]
opts += common_ca_create_opts
def get_file_prefix(self, file_args):
return '{cluster_name}-{scheduler_name}'.format(**file_args)
def get_organizational_unit(self, args):
return args.cluster_name + ':' + args.scheduler_name
def execute(self, args, show_output=True):
self._execute(args, 'v3_client_server', show_output)
| 1,016
|
Python
|
.py
| 23
| 39.347826
| 118
| 0.680203
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,453
|
create_odb.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_odb.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from datetime import datetime
from getpass import getuser
from socket import gethostname
# SQLAlchemy
from sqlalchemy.dialects.postgresql.base import PGTypeCompiler
# Zato
from zato.cli import common_odb_opts, is_arg_given, ZatoCommand
from zato.common.odb.model import AlembicRevision, Base, ZatoInstallState
LATEST_ALEMBIC_REVISION = '0028_ae3419a9'
VERSION = 1
# ################################################################################################################################
# ################################################################################################################################
class Create(ZatoCommand):
""" Creates a new Zato ODB (Operational Database)
"""
opts = common_odb_opts
opts.append({
'name':'--skip-if-exists',
'help':'Return without raising an error if ODB already exists',
'action':'store_true'
})
# ################################################################################################################################
def allow_empty_secrets(self):
return True
# ################################################################################################################################
def execute(self, args, show_output=True):
# Alembic
try:
from alembic.migration import MigrationContext
from alembic.operations import Operations
except ImportError:
has_alembic = False
else:
has_alembic = True
engine = self._get_engine(args)
session = self._get_session(engine)
if engine.dialect.has_table(engine.connect(), 'install_state'):
if is_arg_given(args, 'skip-if-exists'):
if show_output:
if self.verbose:
self.logger.debug('ODB already exists, skipped its creation')
else:
self.logger.info('OK')
else:
if show_output:
version = session.query(ZatoInstallState.version).one().version
msg = (
'The ODB (v. {}) already exists, not creating it. ' + \
"Use the 'zato delete odb' command first if you'd like to start afresh and " + \
'recreate all ODB objects.').format(version)
self.logger.error(msg)
return self.SYS_ERROR.ODB_EXISTS
else:
# This is needed so that PubSubMessage.data can continue to use length
# in the column's specification which in itself is needed for MySQL to use LONGTEXT.
def _render_string_type(self, type_, name):
text = name
if type_.length and name != 'TEXT':
text += '(%d)' % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
PGTypeCompiler._render_string_type = _render_string_type
Base.metadata.create_all(engine)
if has_alembic:
state = ZatoInstallState(None, VERSION, datetime.now(), gethostname(), getuser())
alembic_rev = AlembicRevision(LATEST_ALEMBIC_REVISION)
session.add(state)
session.add(alembic_rev)
session.commit()
# We need to add a foreign key to this SSO table because we are conducting
# an ODB installation that combines base tables with SSO ones.
alembic_ctx = MigrationContext.configure(engine.connect())
alembic_ops = Operations(alembic_ctx)
# There is no support for FKs during ALTER TABLE statements in SQLite.
if args.odb_type != 'sqlite':
alembic_ops.create_foreign_key(
'fk_sso_linked_base_id',
'zato_sso_linked_auth',
'sec_base',
['auth_id'], ['id'],
ondelete='CASCADE',
)
if show_output:
if self.verbose:
self.logger.debug('ODB created successfully')
else:
self.logger.info('OK')
# ################################################################################################################################
# ################################################################################################################################
| 4,845
|
Python
|
.py
| 97
| 37.701031
| 130
| 0.473606
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,454
|
wait.py
|
zatosource_zato/code/zato-cli/src/zato/cli/wait.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import intnone
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class Wait(ZatoCommand):
opts = [
{'name':'--path', 'help':'Path to a local Zato server', 'default':''},
{'name':'--address', 'help':'Address of a remote Zato server', 'default':''},
{'name':'--url-path', 'help':'URL path of an endpoint to invoke', 'default':'/zato/ping'},
{'name':'--timeout', 'help':'How many seconds to wait for the server', 'default':'60'},
{'name':'--interval', 'help':'How often to check if the server is up, in seconds', 'default':'0.1'},
{'name':'--silent', 'help':'Whether to log details of connection attempts', 'action':'store_true'},
]
def execute(self, args:'Namespace', needs_sys_exit:'bool'=True) -> 'intnone':
# stdlib
import sys
# Zato
from zato.common.util.api import get_client_from_server_conf
from zato.common.util.tcp import wait_for_zato
# We need to have a local or remote server on input ..
if not (args.path or args.address):
self.logger.warning('Exactly one of --path or --address is required (#1)')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# .. but we cannot have both of them at the same time.
if args.path and args.address:
self.logger.warning('Exactly one of --path or --address is required (#2)')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# We need to look up the server's address through its client ..
if args.path:
client = get_client_from_server_conf(args.path, False)
address = client.address # type: str
# .. unless we are given it explicitly.
else:
address = args.address # type: str
# Check whether we should log progress
silent = getattr(args, 'silent', False)
if silent:
needs_log = False
else:
needs_log = True
# We can now wait for the server to respond
is_success = wait_for_zato(address, args.url_path, int(args.timeout), float(args.interval), needs_log=needs_log)
if not is_success:
if needs_sys_exit:
sys.exit(self.SYS_ERROR.SERVER_TIMEOUT)
else:
raise Exception('No response from `{}{}` after {}s'.format(address, args.url_path, args.timeout))
else:
return 0
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
args.address = ''
args.url_path = ''
args.timeout = 60
args.interval = 0.1
command = Wait(args)
command.run(args)
# ################################################################################################################################
# ################################################################################################################################
| 3,967
|
Python
|
.py
| 78
| 43.5
| 130
| 0.46727
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,455
|
openapi_.py
|
zatosource_zato/code/zato-cli/src/zato/cli/openapi_.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand
from zato.common.util.open_ import open_w
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_
# ################################################################################################################################
# ################################################################################################################################
stderr_sleep_fg = 0.9
stderr_sleep_bg = 1.2
# ################################################################################################################################
# ################################################################################################################################
internal_patterns = [
'zato.*',
'pub.zato.*',
'helpers.*',
]
# ################################################################################################################################
# ################################################################################################################################
class OpenAPI(ZatoCommand):
"""OpenAPI specification generator."""
opts = [
{'name':'--include', 'help':'A comma-separated list of patterns to include services by', 'default':'*'},
{'name':'--with-internal', 'help':'Whether internal services should be included on output', 'action':'store_true'},
{'name':'--exclude', 'help':'A comma-separated list of patterns to exclude services by',
'default':','.join(internal_patterns)},
{'name':'--file', 'help':'Directory to save the output to', 'default':'openapi.yaml'},
{'name':'--delete-file', 'help':'If given, --dir will be deleted before the output is saved', 'action':'store_true'},
{'name':'--with-api-invoke', 'help':'If given, OpenAPI spec for --api-invoke-path endpoints will be generated',
'action':'store_true', 'default':True},
{'name':'--with-rest-channels', 'help':'If given, OpenAPI spec for individual REST endpoints will be generated',
'action':'store_true', 'default':True},
{'name':'--api-invoke-path', 'help':'A comma-separated list of URL paths to invoke API services through'},
{'name':'--tags', 'help':'A comma-separated list of docstring tags to generate documentation for',
'default':'public'},
]
# ################################################################################################################################
# ################################################################################################################################
def execute(self, args:'any_') -> 'None':
# stdlib
import os
# Zato
from zato.common.util.api import get_client_from_server_conf
from zato.common.util.file_system import fs_safe_now
client = get_client_from_server_conf(args.path)
exclude = args.exclude.split(',') or []
exclude = [elem.strip() for elem in exclude]
tags = args.tags.split(',')
tags = [elem.strip() for elem in tags]
if args.with_internal:
for item in internal_patterns:
try:
exclude.remove(item)
except ValueError:
pass
request = {
'return_internal': args.with_internal,
'include': args.include,
'exclude': ','.join(exclude),
'needs_api_invoke': args.with_api_invoke,
'needs_rest_channels': args.with_rest_channels,
'needs_sphinx': True,
'tags': tags,
}
if args.with_api_invoke:
request['api_invoke_path'] = args.api_invoke_path if args.api_invoke_path else '/zato/api/invoke/{service_name}'
if not args.file:
now = fs_safe_now()
out_file = '{}.{}'.format('apispec', now)
else:
out_file = args.file
out_file = os.path.abspath(out_file)
if os.path.exists(out_file):
if args.delete_file:
self.logger.info('Deleting %s', out_file)
os.remove(out_file)
else:
self.logger.warning('Output file %s already exists and --delete-file was not provided', out_file)
return
# We are expecting for this file to be returned by the server
def_name = 'openapi.yaml'
# Invoke the server ..
response = client.invoke('zato.apispec.get-api-spec', request)
# .. get all specifications ..
data = response.data['response']['data']
# .. check all the files the server returned ..
for file_path, contents in data.items():
# .. save the OpenAPI definition if it is found ..
if def_name in file_path:
f = open_w(out_file)
f.write(contents)
f.close()
self.logger.info('Output saved to %s', out_file)
break
# .. otherwise, report an error.
else:
self.logger.warning('No OpenAPI definition (%s) found among files received -> %s', def_name, sorted(data))
# ################################################################################################################################
# ################################################################################################################################
| 5,827
|
Python
|
.py
| 105
| 46.67619
| 130
| 0.425936
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,456
|
create_cluster.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_cluster.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from copy import deepcopy
# Zato
from zato.cli import common_odb_opts, is_arg_given, ZatoCommand
from zato.common.api import SSO
from zato.common.const import ServiceConst
# ################################################################################################################################
zato_services = {
# Hot-deploy
'zato.hot-deploy.create': 'zato.server.service.internal.hot_deploy.Create',
'zato.ide-deploy.create': 'zato.server.service.internal.ide_deploy.Create',
# JWT
'zato.security.jwt.auto-clean-up':'zato.server.service.internal.security.jwt.AutoCleanUp',
'zato.security.jwt.change-password':'zato.server.service.internal.security.jwt.ChangePassword',
'zato.security.jwt.create':'zato.server.service.internal.security.jwt.Create',
'zato.security.jwt.delete':'zato.server.service.internal.security.jwt.Delete',
'zato.security.jwt.edit':'zato.server.service.internal.security.jwt.Edit',
'zato.security.jwt.get-list':'zato.server.service.internal.security.jwt.GetList',
'zato.security.jwt.log-in':'zato.server.service.internal.security.jwt.LogIn',
'zato.security.jwt.log-out':'zato.server.service.internal.security.jwt.LogOut',
# Service invocations
'zato.service.invoke':'zato.server.service.internal.service.Invoke',
'pub.zato.service.service-invoker':'zato.server.service.internal.service.ServiceInvoker',
}
# ################################################################################################################################
class Create(ZatoCommand):
""" Creates a new Zato cluster in the ODB
"""
opts = deepcopy(common_odb_opts)
opts.append({'name':'cluster_name', 'help':'Name of the cluster to create'})
opts.append({'name':'--lb-host', 'help':'Load-balancer host', 'default':'127.0.0.1'})
opts.append({'name':'--lb-port', 'help':'Load-balancer port', 'default':'11223'})
opts.append({'name':'--lb-agent_port', 'help':'Load-balancer agent\'s port', 'default':'20151'})
opts.append({'name':'--secret-key', 'help':'Secret key that servers will use for decryption and decryption'})
opts.append({'name':'--admin-invoke-password', 'help':'Password for config API clients to connect to servers with'})
opts.append({'name':'--skip-if-exists',
'help':'Return without raising an error if cluster already exists', 'action':'store_true'})
# ################################################################################################################################
def execute(self, args, show_output=True):
# stdlib
from datetime import datetime
from traceback import format_exc
# SQLAlchemy
from sqlalchemy.exc import IntegrityError
# Zato
from zato.common.api import IDEDeploy
from zato.common.odb.model import Cluster, HTTPBasicAuth
from zato.common.odb.post_process import ODBPostProcess
from zato.common.pubsub import PUBSUB
_pubsub_default = PUBSUB.DEFAULT
engine = self._get_engine(args)
session = self._get_session(engine)
if engine.dialect.has_table(engine.connect(), 'install_state'):
if is_arg_given(args, 'skip-if-exists', 'skip_if_exists'):
if show_output:
if self.verbose:
self.logger.debug('Cluster already exists, skipped its creation')
else:
self.logger.info('OK')
return
with session.no_autoflush:
cluster = Cluster()
cluster.name = args.cluster_name
cluster.description = 'Created by {} on {} (UTC)'.format(self._get_user_host(), datetime.utcnow().isoformat())
for name in(
'odb_type', 'odb_host', 'odb_port', 'odb_user', 'odb_db_name',
'lb_host', 'lb_port', 'lb_agent_port'):
setattr(cluster, name, getattr(args, name))
session.add(cluster)
# With a cluster object in place, we can construct the ODB post-processor
odb_post_process = ODBPostProcess(session, cluster, None)
# admin.invoke user's password may be possibly in one of these attributes,
# but if it is now, generate a new one.
admin_invoke_password = getattr(args, 'admin-invoke-password', None)
if not admin_invoke_password:
admin_invoke_password = getattr(args, 'admin_invoke_password', None)
if not admin_invoke_password:
admin_invoke_password = self.generate_password()
admin_invoke_sec = HTTPBasicAuth(None, ServiceConst.API_Admin_Invoke_Username, True,
ServiceConst.API_Admin_Invoke_Username, 'Zato admin invoke', admin_invoke_password, cluster)
session.add(admin_invoke_sec)
pubapi_sec = HTTPBasicAuth(None, _pubsub_default.PUBAPI_SECDEF_NAME, True, _pubsub_default.PUBAPI_USERNAME,
'Zato public API', self.generate_password(), cluster)
session.add(pubapi_sec)
internal_invoke_sec = HTTPBasicAuth(None, 'zato.internal.invoke', True, 'zato.internal.invoke.user',
'Zato internal invoker', self.generate_password(), cluster)
session.add(internal_invoke_sec)
self.add_default_rbac_permissions(session, cluster)
root_rbac_role = self.add_default_rbac_roles(session, cluster)
ide_pub_rbac_role = self.add_rbac_role_and_acct(
session, cluster, root_rbac_role, 'IDE Publishers', IDEDeploy.Username, IDEDeploy.Username)
# We need to flush the session here, after adding default RBAC permissions
# which are needed by REST channels with security delegated to RBAC.
session.flush()
self.add_internal_services(session, cluster, admin_invoke_sec, pubapi_sec, internal_invoke_sec, ide_pub_rbac_role)
self.add_ping_services(session, cluster)
self.add_default_caches(session, cluster)
self.add_cache_endpoints(session, cluster)
self.add_crypto_endpoints(session, cluster)
self.add_pubsub_sec_endpoints(session, cluster)
# IBM MQ connections / connectors
self.add_internal_callback_wmq(session, cluster)
# SFTP connections / connectors
self.add_sftp_credentials(session, cluster)
# Account to access cache services with
self.add_cache_credentials(session, cluster)
# SSO
self.add_sso_endpoints(session, cluster)
# Run ODB post-processing tasks
odb_post_process.run()
try:
session.commit()
except IntegrityError as e:
msg = 'SQL IntegrityError caught `{}`'.format(e.message)
if self.verbose:
msg += '\nDetails:`{}`'.format(format_exc().decode('utf-8'))
self.logger.error(msg)
self.logger.error(msg)
session.rollback()
return self.SYS_ERROR.CLUSTER_NAME_ALREADY_EXISTS
if show_output:
if self.verbose:
msg = 'Successfully created a new cluster [{}]'.format(args.cluster_name)
self.logger.debug(msg)
else:
self.logger.info('OK')
# ################################################################################################################################
def generate_password(self):
# stdlib
from uuid import uuid4
# cryptography
from cryptography.fernet import Fernet
# Zato
from zato.common.const import SECRETS
# New password
password = uuid4().hex
# This is optional
secret_key = getattr(self.args, 'secret_key', None)
# Return encrypted if we have the secret key, otherwise, as it is.
if secret_key:
password = Fernet(secret_key).encrypt(password.encode('utf8'))
password = password.decode('utf8')
password = SECRETS.PREFIX + password
return password
# ################################################################################################################################
def add_api_invoke(self, session, cluster, service, pubapi_sec):
from zato.common.api import APISPEC
from zato.common.odb.model import HTTPSOAP
for url_path in (APISPEC.GENERIC_INVOKE_PATH,):
channel = HTTPSOAP(None, url_path, True, True, 'channel', 'plain_http',
None, url_path, None, '', None, None,
merge_url_params_req=True, service=service, security=pubapi_sec,
cluster=cluster)
session.add(channel)
# ################################################################################################################################
def add_internal_services(self, session, cluster, admin_invoke_sec, pubapi_sec, internal_invoke_sec, ide_pub_rbac_role):
""" Adds these Zato internal services that can be accessed through SOAP requests.
"""
#
# HTTPSOAP + services
#
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import Service
from zato.common.util.api import get_http_json_channel
for name, impl_name in iteritems(zato_services):
service = Service(None, name, True, impl_name, True, cluster)
session.add(service)
if name == 'pub.zato.service.service-invoker':
self.add_api_invoke(session, cluster, service, pubapi_sec)
elif name == 'zato.service.invoke':
self.add_admin_invoke(session, cluster, service, admin_invoke_sec)
self.add_internal_invoke(session, cluster, service, internal_invoke_sec)
elif name == 'zato.security.jwt.log-in':
self.add_jwt_log_in(session, cluster, service)
elif name == 'zato.security.jwt.log-out':
self.add_jwt_log_out(session, cluster, service)
elif name == 'zato.ide-deploy.create':
self.add_rbac_channel(session, cluster, service, ide_pub_rbac_role, '/ide-deploy', permit_write=True,
data_format=DATA_FORMAT.JSON)
elif 'check' in name:
self.add_check(session, cluster, service, pubapi_sec)
session.add(get_http_json_channel(name, service, cluster, pubapi_sec))
# ################################################################################################################################
def add_ping_services(self, session, cluster):
""" Adds a ping service and channels, with and without security checks.
"""
# Zato
from zato.common.api import SIMPLE_IO
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, Service
passwords = {
'ping.plain_http.basic_auth': None,
}
for password in passwords:
passwords[password] = self.generate_password()
ping_impl_name = 'zato.server.service.internal.Ping'
ping_service_name = 'zato.ping'
ping_service = Service(None, ping_service_name, True, ping_impl_name, True, cluster)
session.add(ping_service)
#
# .. no security ..
#
# TODO
# Change it to /zato/json/ping
# and add an actual /zato/ping with no data format specified.
ping_no_sec_channel = HTTPSOAP(
None, 'zato.ping', True, True, 'channel',
'plain_http', None, '/zato/ping', None, '', None, SIMPLE_IO.FORMAT.JSON, service=ping_service, cluster=cluster)
session.add(ping_no_sec_channel)
#
# All the possible options
#
# Plain HTTP / Basic auth
#
transports = ['plain_http',]
for transport in transports:
data_format = SIMPLE_IO.FORMAT.JSON
base_name = 'ping.{0}.basic_auth'.format(transport)
zato_name = 'zato.{0}'.format(base_name)
url = '/zato/{0}'.format(base_name)
soap_action, soap_version = (zato_name, '1.1') if transport == 'soap' else ('', None)
password = passwords[base_name]
sec = HTTPBasicAuth(None, zato_name, True, zato_name, 'Zato ping', password, cluster)
session.add(sec)
channel = HTTPSOAP(
None, zato_name, True, True, 'channel', transport, None, url, None, soap_action,
soap_version, data_format, service=ping_service, security=sec, cluster=cluster)
session.add(channel)
# ################################################################################################################################
def add_admin_invoke(self, session, cluster, service, admin_invoke_sec):
""" Adds an admin channel for invoking services from web admin and CLI.
"""
# Zato
from zato.common.api import MISC, SIMPLE_IO
from zato.common.odb.model import HTTPSOAP
channel = HTTPSOAP(
None, MISC.DefaultAdminInvokeChannel, True, True, 'channel', 'plain_http',
None, ServiceConst.API_Admin_Invoke_Url_Path, None, '', None,
SIMPLE_IO.FORMAT.JSON, service=service, cluster=cluster,
security=admin_invoke_sec)
session.add(channel)
# ################################################################################################################################
def add_internal_invoke(self, session, cluster, service, internal_invoke_sec):
""" Adds an internal channel for invoking services from other servers.
"""
# Zato
from zato.common.api import SIMPLE_IO
from zato.common.odb.model import HTTPSOAP
channel = HTTPSOAP(
None, 'zato.internal.invoke', True, True, 'channel', 'plain_http',
None, '/zato/internal/invoke', None, '', None, SIMPLE_IO.FORMAT.JSON, service=service, cluster=cluster,
security=internal_invoke_sec)
session.add(channel)
# ################################################################################################################################
def add_default_rbac_permissions(self, session, cluster):
""" Adds default CRUD permissions used by RBAC.
"""
# Zato
from zato.common.odb.model import RBACPermission
for name in('Create', 'Read', 'Update', 'Delete'):
item = RBACPermission()
item.name = name
item.cluster = cluster
session.add(item)
# ################################################################################################################################
def add_default_rbac_roles(self, session, cluster):
""" Adds default roles used by RBAC.
"""
# Zato
from zato.common.odb.model import RBACRole
root = RBACRole(name='Root', parent=None, cluster=cluster)
session.add(root)
return root
# ################################################################################################################################
def add_rbac_role_and_acct(self, session, cluster, root_rbac_role, role_name, account_name, realm):
""" Create an RBAC role alongside a default account and random password for accessing it via Basic Auth.
:param session: SQLAlchemy session instance
:param cluster: Cluster model instance
:param root_rbac_role: RBACRole model instance for the root role
:param role_name: "My Descriptive RBAC Role Name"
:param account_name: "my_default_rbac_user"
:param realm: "http_auth_service_realm"
:return: New RBACRole model instance
"""
# Zato
from zato.common.api import MISC
from zato.common.odb.model import HTTPBasicAuth, RBACClientRole, RBACRole
role = RBACRole(name=role_name, parent=root_rbac_role, cluster=cluster)
session.add(role)
auth = HTTPBasicAuth(None, account_name, True, account_name, realm, self.generate_password(), cluster)
session.add(auth)
client_role_def = MISC.SEPARATOR.join(('sec_def', 'basic_auth', account_name))
client_role_name = MISC.SEPARATOR.join((client_role_def, role.name))
client_role = RBACClientRole(name=client_role_name, client_def=client_role_def, role=role, cluster=cluster)
session.add(client_role)
return role
# ################################################################################################################################
def add_default_caches(self, session, cluster):
""" Adds default caches to the cluster.
"""
# Zato
from zato.common.api import CACHE
from zato.common.odb.model import CacheBuiltin
# This is the default cache that is used if a specific one is not selected by users
item = CacheBuiltin()
item.cluster = cluster
item.name = CACHE.Default_Name.Main
item.is_active = True
item.is_default = True
item.max_size = CACHE.DEFAULT.MAX_SIZE
item.max_item_size = CACHE.DEFAULT.MAX_ITEM_SIZE
item.extend_expiry_on_get = True
item.extend_expiry_on_set = True
item.cache_type = CACHE.TYPE.BUILTIN
item.sync_method = CACHE.SYNC_METHOD.IN_BACKGROUND.id
item.persistent_storage = CACHE.PERSISTENT_STORAGE.SQL.id
session.add(item)
# This is used for Bearer tokens - note that it does not extend the key's expiration on .get.
# Otherwise, it is the same as the default one.
item = CacheBuiltin()
item.cluster = cluster
item.name = CACHE.Default_Name.Bearer_Token
item.is_active = True
item.is_default = True
item.max_size = CACHE.DEFAULT.MAX_SIZE
item.max_item_size = CACHE.DEFAULT.MAX_ITEM_SIZE
item.extend_expiry_on_get = False
item.extend_expiry_on_set = True
item.cache_type = CACHE.TYPE.BUILTIN
item.sync_method = CACHE.SYNC_METHOD.IN_BACKGROUND.id
item.persistent_storage = CACHE.PERSISTENT_STORAGE.SQL.id
session.add(item)
# ################################################################################################################################
def add_jwt_log_in(self, session, cluster, service):
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPSOAP
channel = HTTPSOAP(None, 'zato.security.jwt.log-in', True, True, 'channel', 'plain_http',
None, '/zato/jwt/log-in', None, '', None, DATA_FORMAT.JSON, merge_url_params_req=True, service=service,
cluster=cluster)
session.add(channel)
# ################################################################################################################################
def add_jwt_log_out(self, session, cluster, service):
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPSOAP
channel = HTTPSOAP(None, 'zato.security.jwt.log-out', True, True, 'channel', 'plain_http',
None, '/zato/jwt/log-out', None, '', None, DATA_FORMAT.JSON, merge_url_params_req=True, service=service,
cluster=cluster)
session.add(channel)
# ################################################################################################################################
def add_rbac_channel(self, session, cluster, service, rbac_role, url_path, permit_read=True, permit_write=False, **kwargs):
""" Create an RBAC-authenticated plain HTTP channel associated with a service.
:param session: SQLAlchemy session instance
:param cluster: Cluster model instance
:param service: Service model instance
:param rbac_role: RBACRole model instance
:param url_path: "/url/path" to expose service as
"""
# Zato
from zato.common.odb.model import HTTPSOAP, RBACPermission, RBACRolePermission
name = 'admin.' + service.name.replace('zato.', '')
channel = HTTPSOAP(name=name, is_active=True, is_internal=True, connection='channel', transport='plain_http',
url_path=url_path, soap_action='', has_rbac=True, sec_use_rbac=True, merge_url_params_req=True, service=service,
cluster=cluster, **kwargs)
session.add(channel)
perms = []
for permitted, perm_names in ((permit_read, ('Read',)), (permit_write, ('Create', 'Update'))):
if permitted:
perms.extend(session.query(RBACPermission).\
filter(RBACPermission.name.in_(perm_names)))
for perm in perms:
session.add(RBACRolePermission(role=rbac_role, perm=perm, service=service, cluster=cluster))
# ################################################################################################################################
def add_check(self, session, cluster, service, pubapi_sec):
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPSOAP
data_formats = [DATA_FORMAT.JSON]
for data_format in data_formats:
name = 'zato.checks.{}.{}'.format(data_format, service.name)
url_path = '/zato/checks/{}/{}'.format(data_format, service.name)
channel = HTTPSOAP(None, name, True, True, 'channel', 'plain_http', None, url_path, None, '', None, data_format,
merge_url_params_req=True, service=service, cluster=cluster, security=pubapi_sec)
session.add(channel)
# ################################################################################################################################
def add_cache_endpoints(self, session, cluster):
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, Service
service_to_endpoint = {
# For single keys
'zato.cache.builtin.pubapi.single-key-service': '/zato/cache/{key}',
# Multi-key get
'zato.cache.builtin.pubapi.get-by-prefix': '/zato/cache/get/by-prefix/{key}',
'zato.cache.builtin.pubapi.get-by-regex': '/zato/cache/get/by-regex/{key}',
'zato.cache.builtin.pubapi.get-by-suffix': '/zato/cache/get/by-suffix/{key}',
'zato.cache.builtin.pubapi.get-contains': '/zato/cache/get/contains/{key}',
'zato.cache.builtin.pubapi.get-not-contains': '/zato/cache/get/not-contains/{key}',
'zato.cache.builtin.pubapi.get-contains-all': '/zato/cache/get/contains-all',
'zato.cache.builtin.pubapi.get-contains-any': '/zato/cache/get/contains-any',
# Multi-key set
'zato.cache.builtin.pubapi.set-by-prefix': '/zato/cache/set/by-prefix/{key}',
'zato.cache.builtin.pubapi.set-by-regex': '/zato/cache/set/by-regex/{key}',
'zato.cache.builtin.pubapi.set-by-suffix': '/zato/cache/set/by-suffix/{key}',
'zato.cache.builtin.pubapi.set-contains': '/zato/cache/set/contains/{key}',
'zato.cache.builtin.pubapi.set-not-contains': '/zato/cache/set/not-contains/{key}',
'zato.cache.builtin.pubapi.set-contains-all': '/zato/cache/set/contains-all',
'zato.cache.builtin.pubapi.set-contains-any': '/zato/cache/set/contains-any',
# Multi-key delete
'zato.cache.builtin.pubapi.delete-by-prefix': '/zato/cache/delete/by-prefix/{key}',
'zato.cache.builtin.pubapi.delete-by-regex': '/zato/cache/delete/by-regex/{key}',
'zato.cache.builtin.pubapi.delete-by-suffix': '/zato/cache/delete/by-suffix/{key}',
'zato.cache.builtin.pubapi.delete-contains': '/zato/cache/delete/contains/{key}',
'zato.cache.builtin.pubapi.delete-not-contains': '/zato/cache/delete/not-contains/{key}',
'zato.cache.builtin.pubapi.delete-contains-all': '/zato/cache/delete/contains-all',
'zato.cache.builtin.pubapi.delete-contains-any': '/zato/cache/delete/contains-any',
# Multi-key expire
'zato.cache.builtin.pubapi.expire-by-prefix': '/zato/cache/expire/by-prefix/{key}',
'zato.cache.builtin.pubapi.expire-by-regex': '/zato/cache/expire/by-regex/{key}',
'zato.cache.builtin.pubapi.expire-by-suffix': '/zato/cache/expire/by-suffix/{key}',
'zato.cache.builtin.pubapi.expire-contains': '/zato/cache/expire/contains/{key}',
'zato.cache.builtin.pubapi.expire-not-contains': '/zato/cache/expire/not-contains/{key}',
'zato.cache.builtin.pubapi.expire-contains-all': '/zato/cache/expire/contains-all',
'zato.cache.builtin.pubapi.expire-contains-any': '/zato/cache/expire/contains-any',
}
service_to_impl = {
# For single keys
'zato.cache.builtin.pubapi.single-key-service': 'zato.server.service.internal.cache.builtin.pubapi.SingleKeyService',
# Multi-key get
'zato.cache.builtin.pubapi.get-by-prefix': 'zato.server.service.internal.cache.builtin.pubapi.GetByPrefix',
'zato.cache.builtin.pubapi.get-by-regex': 'zato.server.service.internal.cache.builtin.pubapi.GetByRegex',
'zato.cache.builtin.pubapi.get-by-suffix': 'zato.server.service.internal.cache.builtin.pubapi.GetBySuffix',
'zato.cache.builtin.pubapi.get-contains': 'zato.server.service.internal.cache.builtin.pubapi.GetContains',
'zato.cache.builtin.pubapi.get-not-contains': 'zato.server.service.internal.cache.builtin.pubapi.GetNotContains',
'zato.cache.builtin.pubapi.get-contains-all': 'zato.server.service.internal.cache.builtin.pubapi.GetContainsAll',
'zato.cache.builtin.pubapi.get-contains-any': 'zato.server.service.internal.cache.builtin.pubapi.GetContainsAny',
# Multi-key set
'zato.cache.builtin.pubapi.set-by-prefix': 'zato.server.service.internal.cache.builtin.pubapi.SetByPrefix',
'zato.cache.builtin.pubapi.set-by-regex': 'zato.server.service.internal.cache.builtin.pubapi.SetByRegex',
'zato.cache.builtin.pubapi.set-by-suffix': 'zato.server.service.internal.cache.builtin.pubapi.SetBySuffix',
'zato.cache.builtin.pubapi.set-contains': 'zato.server.service.internal.cache.builtin.pubapi.SetContains',
'zato.cache.builtin.pubapi.set-not-contains': 'zato.server.service.internal.cache.builtin.pubapi.SetNotContains',
'zato.cache.builtin.pubapi.set-contains-all': 'zato.server.service.internal.cache.builtin.pubapi.SetContainsAll',
'zato.cache.builtin.pubapi.set-contains-any': 'zato.server.service.internal.cache.builtin.pubapi.SetContainsAny',
# Multi-key delete
'zato.cache.builtin.pubapi.delete-by-prefix': 'zato.server.service.internal.cache.builtin.pubapi.DeleteByPrefix',
'zato.cache.builtin.pubapi.delete-by-regex': 'zato.server.service.internal.cache.builtin.pubapi.DeleteByRegex',
'zato.cache.builtin.pubapi.delete-by-suffix': 'zato.server.service.internal.cache.builtin.pubapi.DeleteBySuffix',
'zato.cache.builtin.pubapi.delete-contains': 'zato.server.service.internal.cache.builtin.pubapi.DeleteContains',
'zato.cache.builtin.pubapi.delete-not-contains': 'zato.server.service.internal.cache.builtin.pubapi.DeleteNotContains',
'zato.cache.builtin.pubapi.delete-contains-all': 'zato.server.service.internal.cache.builtin.pubapi.DeleteContainsAll',
'zato.cache.builtin.pubapi.delete-contains-any': 'zato.server.service.internal.cache.builtin.pubapi.DeleteContainsAny',
# Multi-key expire
'zato.cache.builtin.pubapi.expire-by-prefix': 'zato.server.service.internal.cache.builtin.pubapi.ExpireByPrefix',
'zato.cache.builtin.pubapi.expire-by-regex': 'zato.server.service.internal.cache.builtin.pubapi.ExpireByRegex',
'zato.cache.builtin.pubapi.expire-by-suffix': 'zato.server.service.internal.cache.builtin.pubapi.ExpireBySuffix',
'zato.cache.builtin.pubapi.expire-contains': 'zato.server.service.internal.cache.builtin.pubapi.ExpireContains',
'zato.cache.builtin.pubapi.expire-not-contains': 'zato.server.service.internal.cache.builtin.pubapi.ExpireNotContains',
'zato.cache.builtin.pubapi.expire-contains-all': 'zato.server.service.internal.cache.builtin.pubapi.ExpireContainsAll',
'zato.cache.builtin.pubapi.expire-contains-any': 'zato.server.service.internal.cache.builtin.pubapi.ExpireContainsAny',
}
sec = HTTPBasicAuth(None, 'zato.default.cache.client', True, 'zato.cache', 'Zato cache', self.generate_password(), cluster)
session.add(sec)
for name, impl_name in iteritems(service_to_impl):
service = Service(None, name, True, impl_name, True, cluster)
session.add(service)
url_path = service_to_endpoint[name]
http_soap = HTTPSOAP(None, name, True, True, 'channel', 'plain_http', None, url_path, None, '',
None, DATA_FORMAT.JSON, security=sec, service=service, cluster=cluster)
session.add(http_soap)
# ################################################################################################################################
def add_crypto_endpoints(self, session, cluster):
# Python 2/3 compatibility
from zato.common.ext.future.utils import iteritems
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, Service
service_to_endpoint = {
'zato.crypto.encrypt': '/zato/crypto/encrypt',
'zato.crypto.decrypt': '/zato/crypto/decrypt',
'zato.crypto.hash-secret': '/zato/crypto/hash-secret',
'zato.crypto.verify-hash': '/zato/crypto/verify-hash',
'zato.crypto.generate-secret': '/zato/crypto/generate-secret',
'zato.crypto.generate-password': '/zato/crypto/generate-password',
}
service_to_impl = {
'zato.crypto.encrypt': 'zato.server.service.internal.crypto.Encrypt',
'zato.crypto.decrypt': 'zato.server.service.internal.crypto.Decrypt',
'zato.crypto.hash-secret': 'zato.server.service.internal.crypto.HashSecret',
'zato.crypto.verify-hash': 'zato.server.service.internal.crypto.VerifyHash',
'zato.crypto.generate-secret': 'zato.server.service.internal.crypto.GenerateSecret',
'zato.crypto.generate-password': 'zato.server.service.internal.crypto.GeneratePassword',
}
sec = HTTPBasicAuth(None, 'zato.default.crypto.client', True, 'zato.crypto', 'Zato crypto', self.generate_password(), cluster)
session.add(sec)
for name, impl_name in iteritems(service_to_impl):
service = Service(None, name, True, impl_name, True, cluster)
session.add(service)
url_path = service_to_endpoint[name]
http_soap = HTTPSOAP(None, name, True, True, 'channel', 'plain_http', None, url_path, None, '',
None, DATA_FORMAT.JSON, security=sec, service=service, cluster=cluster)
session.add(http_soap)
# ################################################################################################################################
def add_pubsub_sec_endpoints(self, session, cluster):
from zato.common.api import CONNECTION, DATA_FORMAT, PUBSUB, URL_TYPE
from zato.common.json_internal import dumps
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, PubSubEndpoint, PubSubSubscription, PubSubTopic, \
Service
from zato.common.pubsub import new_sub_key
from zato.common.util.time_ import utcnow_as_ms
sec_pubsub_default = HTTPBasicAuth(
None, PUBSUB.DEFAULT.DEFAULT_SECDEF_NAME, True, PUBSUB.DEFAULT.DEFAULT_USERNAME,
'Zato pub/sub default', self.generate_password(), cluster)
session.add(sec_pubsub_default)
sec_pubsub_test = HTTPBasicAuth(
None, PUBSUB.DEFAULT.TEST_SECDEF_NAME, True, PUBSUB.DEFAULT.TEST_USERNAME,
'Zato pub/sub test', self.generate_password(), cluster)
session.add(sec_pubsub_test)
sec_default_internal = HTTPBasicAuth(None, PUBSUB.DEFAULT.INTERNAL_SECDEF_NAME, True, PUBSUB.DEFAULT.INTERNAL_USERNAME,
'Zato pub/sub internal', self.generate_password(), cluster)
session.add(sec_default_internal)
impl_name1 = 'zato.server.service.internal.pubsub.pubapi.TopicService'
impl_name2 = 'zato.server.service.internal.pubsub.pubapi.SubscribeService'
impl_name3 = 'zato.server.service.internal.pubsub.pubapi.MessageService'
impl_demo = 'zato.server.service.internal.helpers.JSONRawRequestLogger'
service_topic = Service(None, 'zato.pubsub.pubapi.topic-service', True, impl_name1, True, cluster)
service_sub = Service(None, 'zato.pubsub.pubapi.subscribe-service', True, impl_name2, True, cluster)
service_msg = Service(None, 'zato.pubsub.pubapi.message-service', True, impl_name3, True, cluster)
service_demo = Service(None, 'pub.helpers.raw-request-logger', True, impl_demo, True, cluster)
service_test = service_demo
# Opaque data that lets clients use topic contain slash characters
opaque = dumps({'match_slash':True})
chan_topic = HTTPSOAP(None, 'zato.pubsub.topic.topic_name', True, True, CONNECTION.CHANNEL,
URL_TYPE.PLAIN_HTTP, None, '/zato/pubsub/topic/{topic_name}',
None, '', None, DATA_FORMAT.JSON, security=None, service=service_topic, opaque=opaque,
cluster=cluster)
chan_sub = HTTPSOAP(None, 'zato.pubsub.subscribe.topic.topic_name', True, True, CONNECTION.CHANNEL,
URL_TYPE.PLAIN_HTTP, None, '/zato/pubsub/subscribe/topic/{topic_name}',
None, '', None, DATA_FORMAT.JSON, security=None, service=service_sub, opaque=opaque,
cluster=cluster)
chan_msg = HTTPSOAP(None, 'zato.pubsub.msg.msg_id', True, True, CONNECTION.CHANNEL,
URL_TYPE.PLAIN_HTTP, None, '/zato/pubsub/msg/{msg_id}',
None, '', None, DATA_FORMAT.JSON, security=None, service=service_msg, opaque=opaque,
cluster=cluster)
chan_demo = HTTPSOAP(None, 'pubsub.demo.sample.channel', True, True, CONNECTION.CHANNEL,
URL_TYPE.PLAIN_HTTP, None, '/zato/pubsub/zato.demo.sample',
None, '', None, DATA_FORMAT.JSON, security=sec_pubsub_default, service=service_demo, opaque=opaque,
cluster=cluster)
chan_test = HTTPSOAP(None, 'pubsub.test.sample.channel', True, True, CONNECTION.CHANNEL,
URL_TYPE.PLAIN_HTTP, None, '/zato/pubsub/zato.test.sample',
None, '', None, DATA_FORMAT.JSON, security=sec_pubsub_test, service=service_test, opaque=opaque,
cluster=cluster)
outconn_demo = HTTPSOAP(None, 'pubsub.demo.sample.outconn', True, True, CONNECTION.OUTGOING,
URL_TYPE.PLAIN_HTTP, 'http://127.0.0.1:17010', '/zato/pubsub/zato.demo.sample',
None, '', None, DATA_FORMAT.JSON, security=sec_pubsub_default, opaque=opaque,
cluster=cluster)
outconn_test = HTTPSOAP(None, 'pubsub.test.sample.outconn', True, True, CONNECTION.OUTGOING,
URL_TYPE.PLAIN_HTTP, 'http://127.0.0.1:17010', '/zato/pubsub/zato.test.sample',
None, '', None, DATA_FORMAT.JSON, security=sec_pubsub_test, opaque=opaque,
cluster=cluster)
endpoint_default_internal = PubSubEndpoint()
endpoint_default_internal.name = PUBSUB.DEFAULT.INTERNAL_ENDPOINT_NAME
endpoint_default_internal.is_internal = True
endpoint_default_internal.role = PUBSUB.ROLE.PUBLISHER_SUBSCRIBER.id
endpoint_default_internal.topic_patterns = 'pub=/*\nsub=/*'
endpoint_default_internal.security = sec_default_internal
endpoint_default_internal.cluster = cluster
endpoint_default_internal.endpoint_type = PUBSUB.ENDPOINT_TYPE.INTERNAL.id
endpoint_default_rest = PubSubEndpoint()
endpoint_default_rest.name = 'zato.pubsub.default.rest'
endpoint_default_rest.is_internal = False
endpoint_default_rest.role = PUBSUB.ROLE.PUBLISHER_SUBSCRIBER.id
endpoint_default_rest.topic_patterns = 'pub=/*\nsub=/*'
endpoint_default_rest.security = sec_pubsub_default
endpoint_default_rest.cluster = cluster
endpoint_default_rest.endpoint_type = PUBSUB.ENDPOINT_TYPE.REST.id
endpoint_default_service = PubSubEndpoint()
endpoint_default_service.name = 'zato.pubsub.default.service'
endpoint_default_service.is_internal = False
endpoint_default_service.role = PUBSUB.ROLE.PUBLISHER_SUBSCRIBER.id
endpoint_default_service.topic_patterns = 'pub=/*\nsub=/*'
endpoint_default_service.cluster = cluster
endpoint_default_service.endpoint_type = PUBSUB.ENDPOINT_TYPE.SERVICE.id
endpoint_default_service.service = service_demo
endpoint_test = PubSubEndpoint()
endpoint_test.name = 'zato.pubsub.test.endpoint'
endpoint_test.is_internal = True
endpoint_test.role = PUBSUB.ROLE.PUBLISHER_SUBSCRIBER.id
endpoint_test.topic_patterns = 'pub=/zato/test/*\nsub=/zato/test/*'
endpoint_test.security = sec_pubsub_test
endpoint_test.cluster = cluster
endpoint_test.endpoint_type = PUBSUB.ENDPOINT_TYPE.REST.id
topic_demo = PubSubTopic()
topic_demo.name = '/zato/demo/sample'
topic_demo.is_active = True
topic_demo.is_api_sub_allowed = True
topic_demo.is_internal = True
topic_demo.max_depth = 100
topic_demo.has_gd = False
topic_demo.cluster = cluster
topic_unique = PubSubTopic()
topic_unique.name = '/zato/demo/unique'
topic_unique.is_active = True
topic_unique.is_api_sub_allowed = True
topic_unique.is_internal = True
topic_unique.max_depth = 100
topic_unique.has_gd = False
topic_unique.cluster = cluster
topic_test = PubSubTopic()
topic_test.name = '/zato/test/sample'
topic_test.is_active = True
topic_test.is_api_sub_allowed = True
topic_test.is_internal = False
topic_test.max_depth = 100
topic_test.has_gd = False
topic_test.cluster = cluster
sub_default_rest = PubSubSubscription()
sub_default_rest.creation_time = utcnow_as_ms()
sub_default_rest.topic = topic_demo
sub_default_rest.endpoint = endpoint_default_rest
sub_default_rest.sub_key = new_sub_key(endpoint_default_rest.endpoint_type)
sub_default_rest.has_gd = False
sub_default_rest.sub_pattern_matched = 'sub=/*'
sub_default_rest.active_status = PUBSUB.QUEUE_ACTIVE_STATUS.FULLY_ENABLED.id
sub_default_rest.cluster = cluster
sub_default_rest.wrap_one_msg_in_list = False
sub_default_rest.delivery_err_should_block = False
sub_default_rest.out_http_soap = outconn_demo
sub_default_service = PubSubSubscription()
sub_default_service.creation_time = utcnow_as_ms()
sub_default_service.topic = topic_demo
sub_default_service.endpoint = endpoint_default_service
sub_default_service.sub_key = new_sub_key(endpoint_default_service.endpoint_type)
sub_default_service.has_gd = False
sub_default_service.sub_pattern_matched = 'sub=/*'
sub_default_service.active_status = PUBSUB.QUEUE_ACTIVE_STATUS.FULLY_ENABLED.id
sub_default_service.cluster = cluster
sub_default_service.wrap_one_msg_in_list = False
sub_default_service.delivery_err_should_block = False
sub_test = PubSubSubscription()
sub_test.creation_time = utcnow_as_ms()
sub_test.topic = topic_test
sub_test.endpoint = endpoint_test
sub_test.sub_key = new_sub_key(endpoint_test.endpoint_type)
sub_test.has_gd = False
sub_test.sub_pattern_matched = 'sub=/zato/test/*'
sub_test.active_status = PUBSUB.QUEUE_ACTIVE_STATUS.FULLY_ENABLED.id
sub_test.cluster = cluster
sub_test.wrap_one_msg_in_list = False
sub_test.delivery_err_should_block = False
sub_test.out_http_soap = outconn_test
session.add(endpoint_default_internal)
session.add(endpoint_default_rest)
session.add(topic_demo)
session.add(topic_test)
session.add(sub_default_rest)
session.add(sub_default_service)
session.add(sub_test)
session.add(service_topic)
session.add(service_sub)
session.add(service_msg)
session.add(chan_topic)
session.add(chan_sub)
session.add(chan_msg)
session.add(chan_demo)
session.add(chan_test)
session.add(outconn_demo)
session.add(outconn_test)
# ################################################################################################################################
def add_internal_callback_wmq(self, session, cluster):
from zato.common.api import IPC
from zato.common.odb.model import HTTPBasicAuth, HTTPSOAP, Service
impl_name = 'zato.server.service.internal.channel.jms_wmq.OnMessageReceived'
service = Service(None, 'zato.channel.jms-wmq.on-message-received', True, impl_name, True, cluster)
username = IPC.CONNECTOR.USERNAME.IBM_MQ
sec = HTTPBasicAuth(None, username, True, username, 'Zato IBM MQ', self.generate_password(), cluster)
channel = HTTPSOAP(None, 'zato.internal.callback.wmq', True, True, 'channel', 'plain_http', None,
'/zato/internal/callback/wmq',
None, '', None, None, security=sec, service=service, cluster=cluster)
session.add(sec)
session.add(service)
session.add(channel)
# ################################################################################################################################
def add_sftp_credentials(self, session, cluster):
from zato.common.api import IPC
from zato.common.odb.model import HTTPBasicAuth
username = IPC.CONNECTOR.USERNAME.SFTP
sec = HTTPBasicAuth(None, username, True, username, 'Zato SFTP', self.generate_password(), cluster)
session.add(sec)
# ################################################################################################################################
def add_cache_credentials(self, session, cluster):
from zato.common.api import CACHE
from zato.common.odb.model import HTTPBasicAuth
username = CACHE.API_USERNAME
sec = HTTPBasicAuth(None, username, True, username, 'Zato Cache', self.generate_password(), cluster)
session.add(sec)
# ################################################################################################################################
def add_sso_endpoints(self, session, cluster):
from zato.common.api import DATA_FORMAT
from zato.common.odb.model import HTTPSOAP, Service
prefix = SSO.Default.RESTPrefix
data = [
# Users
['zato.sso.user.create', 'zato.server.service.internal.sso.user.Create', f'{prefix}/user/create'],
['zato.sso.user.signup', 'zato.server.service.internal.sso.user.Signup', f'{prefix}/user/signup'],
['zato.sso.user.approve', 'zato.server.service.internal.sso.user.Approve', f'{prefix}/user/approve'],
['zato.sso.user.reject', 'zato.server.service.internal.sso.user.Reject', f'{prefix}/user/reject'],
['zato.sso.user.login', 'zato.server.service.internal.sso.user.Login', f'{prefix}/user/login'],
['zato.sso.user.logout', 'zato.server.service.internal.sso.user.Logout', f'{prefix}/user/logout'],
['zato.sso.user.user', 'zato.server.service.internal.sso.user.User', f'{prefix}/user'],
['zato.sso.user.password', 'zato.server.service.internal.sso.user.Password', f'{prefix}/user/password'],
['zato.sso.user.search', 'zato.server.service.internal.sso.user.Search', f'{prefix}/user/search'],
['zato.sso.user.totp', 'zato.server.service.internal.sso.user.TOTP', f'{prefix}/user/totp'],
['zato.sso.user.lock', 'zato.server.service.internal.sso.user.Lock', f'{prefix}/user/lock'],
# Linked accounts
['zato.sso.user.linked-auth', 'zato.server.service.internal.sso.user.LinkedAuth', f'{prefix}/user/linked'],
# User sessions
['zato.sso.session.session', 'zato.server.service.internal.sso.session.Session', f'{prefix}/user/session'],
['zato.sso.session.session-list', 'zato.server.service.internal.sso.session.SessionList', f'{prefix}/user/session/list'],
# User attributes
['zato.sso.user-attr.user-attr', 'zato.server.service.internal.sso.user_attr.UserAttr', f'{prefix}/user/attr'],
['zato.sso.user-attr.user-attr-exists', 'zato.server.service.internal.sso.user_attr.UserAttrExists', f'{prefix}/user/attr/exists'],
['zato.sso.user-attr.user-attr-names', 'zato.server.service.internal.sso.user_attr.UserAttrNames', f'{prefix}/user/attr/names'],
# Session attributes
['zato.sso.session-attr.session-attr', 'zato.server.service.internal.sso.session_attr.SessionAttr', f'{prefix}/session/attr'],
['zato.sso.session-attr.session-attr-exists', 'zato.server.service.internal.sso.session_attr.SessionAttrExists', f'{prefix}/session/attr/exists'],
['zato.sso.session-attr.session-attr-names', 'zato.server.service.internal.sso.session_attr.SessionAttrNames', f'{prefix}/session/attr/names'],
# Password reset
['zato.sso.password-reset.password-reset', 'zato.server.service.internal.sso.password_reset.PasswordReset', f'{prefix}/password/reset'],
]
for name, impl_name, url_path in data:
service = Service(None, name, True, impl_name, True, cluster)
channel = HTTPSOAP(None, url_path, True, True, 'channel', 'plain_http', None, url_path, None, '', None,
DATA_FORMAT.JSON, security=None, service=service, cluster=cluster)
session.add(service)
session.add(channel)
# ################################################################################################################################
| 47,380
|
Python
|
.py
| 747
| 52.983936
| 158
| 0.608913
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,457
|
create_web_admin.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_web_admin.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from copy import deepcopy
# Zato
from zato.cli import common_odb_opts, ZatoCommand
from zato.common.const import ServiceConst
from zato.common.util.open_ import open_r, open_w
config_template = """{{
"host": "{host}",
"port": {port},
"db_type": "{db_type}",
"log_config": "./config/repo/{log_config}",
"lb_agent_use_tls": {lb_agent_use_tls},
"lb_use_tls": false,
"lb_tls_verify": true,
"zato_secret_key": "{zato_secret_key}",
"well_known_data": "{well_known_data}",
"is_totp_enabled": false,
"DEBUG": 0,
"ALLOWED_HOSTS": ["*"],
"DATABASE_NAME": "{DATABASE_NAME}",
"DATABASE_USER": "{DATABASE_USER}",
"DATABASE_PASSWORD": "{DATABASE_PASSWORD}",
"DATABASE_HOST": "{DATABASE_HOST}",
"DATABASE_PORT": "{DATABASE_PORT}",
"DATABASE_OPTIONS": {{"timeout": 30}},
"TIME_ZONE": "America/New_York",
"LANGUAGE_CODE": "en-us",
"SITE_ID": {SITE_ID},
"SECRET_KEY": "{SECRET_KEY}",
"ADMIN_INVOKE_NAME": "{ADMIN_INVOKE_NAME}",
"ADMIN_INVOKE_PASSWORD": "{ADMIN_INVOKE_PASSWORD}",
"ADMIN_INVOKE_PATH": "/zato/admin/invoke"
}}
""" # noqa
initial_data_json = """[{{
"pk": {SITE_ID},
"model": "sites.site",
"fields": {{
"name": "web admin",
"domain":"webadmin-{SITE_ID}.example.com"
}}
}}]
""" # noqa
class Create(ZatoCommand):
""" Creates a new web admin web console
"""
needs_empty_dir = True
opts = deepcopy(common_odb_opts)
opts.append({'name':'--pub-key-path', 'help':'Path to the web admin\'s public key in PEM'})
opts.append({'name':'--priv_key-path', 'help':'Path to the web admin\'s private key in PEM'})
opts.append({'name':'--cert-path', 'help':'Path to the web admin\'s certificate in PEM'})
opts.append({'name':'--ca-certs-path', 'help':'Path to a bundle of CA certificates to be trusted'})
opts.append({'name':'--admin-invoke-password', 'help':'Password for web-admin to connect to servers with'})
def __init__(self, args):
# stdlib
import os
self.target_dir = os.path.abspath(args.path)
super(Create, self).__init__(args)
# ################################################################################################################################
def allow_empty_secrets(self):
return True
def execute(self, args, show_output=True, admin_password=None, needs_admin_created_flag=False):
# We need it here to make Django accept PyMySQL as if it was MySQLdb.
import pymysql
pymysql.install_as_MySQLdb()
# stdlib
import os, json
from random import getrandbits
from uuid import uuid4
# Django
from django.core.management import call_command
# Python 2/3 compatibility
from zato.common.py23_.past.builtins import unicode
# Zato
# TODO: There really shouldn't be any direct dependency between zato-cli and zato-web-admin
from zato.admin.zato_settings import update_globals
from zato.cli import is_arg_given
from zato.common.crypto.api import WebAdminCryptoManager
from zato.common.crypto.const import well_known_data
from zato.common.defaults import web_admin_host, web_admin_port
from zato.common.util.logging_ import get_logging_conf_contents
os.chdir(self.target_dir)
repo_dir = os.path.join(self.target_dir, 'config', 'repo')
web_admin_conf_path = os.path.join(repo_dir, 'web-admin.conf')
initial_data_json_path = os.path.join(repo_dir, 'initial-data.json')
os.mkdir(os.path.join(self.target_dir, 'logs'))
os.mkdir(os.path.join(self.target_dir, 'config'))
os.mkdir(repo_dir)
user_name = 'admin'
admin_password = admin_password if admin_password else WebAdminCryptoManager.generate_password()
# If we have a CA's certificate then it implicitly means that there is some CA
# which tells us that we are to trust both the CA and the certificates that it issues,
# and the only certificate we are interested in is the one to the load-balancer.
# This is why, if we get ca_certs_path, it must be because we are to use TLS
# in communication with the load-balancer's agent which in turn means that we have crypto material on input.
has_crypto = is_arg_given(args, 'ca_certs_path')
if has_crypto:
self.copy_web_admin_crypto(repo_dir, args)
zato_secret_key = WebAdminCryptoManager.generate_key()
cm = WebAdminCryptoManager.from_secret_key(zato_secret_key)
django_secret_key = uuid4().hex.encode('utf8')
django_site_id = getrandbits(20)
admin_invoke_password = getattr(args, 'admin_invoke_password', None)
if not admin_invoke_password:
admin_invoke_password = 'create_wa.admin.' + uuid4().hex
if isinstance(admin_invoke_password, unicode):
admin_invoke_password = admin_invoke_password.encode('utf8')
odb_password = args.odb_password or ''
odb_password = odb_password.encode('utf8')
config = {
'host': web_admin_host,
'port': web_admin_port,
'db_type': args.odb_type,
'log_config': 'logging.conf',
'lb_agent_use_tls': 'false',
'zato_secret_key':zato_secret_key,
'well_known_data': cm.encrypt(well_known_data.encode('utf8')),
'DATABASE_NAME': args.odb_db_name or args.sqlite_path,
'DATABASE_USER': args.odb_user or '',
'DATABASE_PASSWORD': cm.encrypt(odb_password),
'DATABASE_HOST': args.odb_host or '',
'DATABASE_PORT': args.odb_port or '',
'SITE_ID': django_site_id,
'SECRET_KEY': cm.encrypt(django_secret_key),
'ADMIN_INVOKE_NAME':ServiceConst.API_Admin_Invoke_Username,
'ADMIN_INVOKE_PASSWORD':cm.encrypt(admin_invoke_password),
}
import platform
system = platform.system()
is_windows = 'windows' in system.lower()
if is_windows:
config['DATABASE_NAME'] = config['DATABASE_NAME'].replace('\\', '\\\\')
for name in 'zato_secret_key', 'well_known_data', 'DATABASE_PASSWORD', 'SECRET_KEY', 'ADMIN_INVOKE_PASSWORD':
config[name] = config[name].decode('utf8')
logging_conf_contents = get_logging_conf_contents()
open_w(os.path.join(repo_dir, 'logging.conf')).write(logging_conf_contents)
open_w(web_admin_conf_path).write(config_template.format(**config))
open_w(initial_data_json_path).write(initial_data_json.format(**config))
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.WEB_ADMIN.code)
config = json.loads(open_r(os.path.join(repo_dir, 'web-admin.conf')).read())
config['config_dir'] = self.target_dir
update_globals(config, self.target_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zato.admin.settings'
import django
django.setup()
self.reset_logger(args, True)
# Can't import these without DJANGO_SETTINGS_MODULE being set
from django.contrib.auth.models import User
from django.core.management.base import CommandError
from django.db import connection
from django.db.utils import IntegrityError
call_command('migrate', run_syncdb=True, interactive=False, verbosity=0)
call_command('loaddata', initial_data_json_path, verbosity=0)
try:
call_command(
'createsuperuser', interactive=False, username=user_name, email='admin@invalid.example.com')
admin_created = True
user = User.objects.get(username=user_name)
user.set_password(admin_password)
user.save()
except (CommandError, IntegrityError):
# This will happen if user 'admin' already exists, e.g. if this is not the first cluster in this database
admin_created = False
connection._rollback()
# Needed because Django took over our logging config
self.reset_logger(args, True)
if show_output:
if self.verbose:
msg = """Successfully created a web admin instance.
You can start it with the 'zato start {path}' command.""".format(path=os.path.abspath(os.path.join(os.getcwd(), self.target_dir)))
self.logger.debug(msg)
else:
self.logger.info('OK')
# We return it only when told to explicitly so when the command runs from CLI
# it doesn't return a non-zero exit code.
if needs_admin_created_flag:
return admin_created
| 8,831
|
Python
|
.py
| 183
| 40.213115
| 134
| 0.634121
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,458
|
cache.py
|
zatosource_zato/code/zato-cli/src/zato/cli/cache.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import ManageCommand
# ################################################################################################################################
if 0:
# stdlib
from argparse import Namespace
# Zato
from zato.client import JSONResponse
JSONResponse = JSONResponse
Namespace = Namespace
# ################################################################################################################################
_not_given = '_zato_not_given'
_modifiers = 'by_prefix', 'by_regex', 'by_suffix', 'contains', 'contains_all', 'contains_any', 'not_contains'
# ################################################################################################################################
common_cache_opts = [
{'name':'--cache', 'help':'Cache to use, the default one will be used if not given on input', 'default':'default'},
{'name':'--path', 'help':'Path to a local Zato server', 'default':''},
{'name':'--is-https', 'help':'When connecting via --path, should HTTPS be used', 'action':'store_true'},
{'name':'--address', 'help':'HTTP(S) address of a Zato server'},
{'name':'--username', 'help':'Username to authenticate with to a remote Zato server', 'default':_not_given},
{'name':'--password', 'help':'Password to authenticate with to a remote Zato server', 'default':_not_given},
]
data_type_opts = [
{'name':'--string-value', 'help':'In get and set operations, whether values should be treated as strings', 'action':'store_true'},
{'name':'--int-value', 'help':'In get and set operations, whether values should be treated as integers', 'action':'store_true'},
{'name':'--bool-value', 'help':'In get and set operations, whether values should be treated as booleans', 'action':'store_true'},
]
# ################################################################################################################################
# ################################################################################################################################
class CacheCommand(ManageCommand):
""" Base class for cache-related commands.
"""
opts = common_cache_opts
def _on_server(self, args, _modifiers=_modifiers):
# type: (Namespace, tuple)
# stdlib
import sys
# Zato
from zato.common.api import NotGiven
from zato.common.util.cache import Client as CacheClient, CommandConfig
if args.address:
client = CacheClient.from_dict({
'address': args.address,
'username': args.username,
'password': args.password,
'cache_name': args.cache,
'is_https': args.is_https,
})
else:
client = CacheClient.from_server_conf(self.component_dir, args.cache, args.is_https)
command = args.command
command = command.replace('cache_', '') # type: str
modifier = None # type: str
for elem in _modifiers:
if getattr(args, elem, None):
modifier = elem
break
command_config = CommandConfig()
command_config.command = command
command_config.modifier = modifier
command_config.key = args.key
command_config.value = getattr(args, 'value', NotGiven)
if command_config.command in ('get', 'set'):
command_config.is_int_value = args.int_value
command_config.is_string_value = args.string_value
command_config.is_bool_value = args.bool_value
response = client.run_command(command_config)
# Report what was found ..
sys.stdout.write(response.text)
sys.stdout.flush()
# .. and exit with a non-zero code if there was an error
if not response.has_value:
sys.exit(self.SYS_ERROR.CACHE_KEY_NOT_FOUND)
# ################################################################################################################################
# ################################################################################################################################
class CacheGet(CacheCommand):
opts = common_cache_opts + data_type_opts + [
{'name':'key', 'help':'Key to get value of'},
]
# ################################################################################################################################
# ################################################################################################################################
class CacheSet(CacheCommand):
opts = common_cache_opts + data_type_opts + [
{'name':'key', 'help':'Key to set value of'},
{'name':'value', 'help':'Value to set'},
]
# ################################################################################################################################
# ################################################################################################################################
class CacheDelete(CacheCommand):
opts = common_cache_opts + [
{'name':'key', 'help':'Key to delete'},
]
# ################################################################################################################################
# ################################################################################################################################
| 5,637
|
Python
|
.py
| 100
| 49.68
| 134
| 0.436182
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,459
|
sso.py
|
zatosource_zato/code/zato-cli/src/zato/cli/sso.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand, common_odb_opts, common_totp_opts
from zato.common.typing_ import cast_
from zato.common.util.api import as_bool
# ################################################################################################################################
if 0:
from argparse import Namespace
from bunch import Bunch
from zato.common.odb.model import SSOUser
from zato.common.typing_ import any_, intnone
from zato.sso.api import UserAPI
Bunch = Bunch
Namespace = Namespace
SSOUser = SSOUser
UserAPI = UserAPI
# ################################################################################################################################
# ################################################################################################################################
class SSOCommand(ZatoCommand):
""" Base class for SSO-related commands.
"""
user_required = True
# ################################################################################################################################
def _get_cid(self) -> 'str':
return 'cli'
# ################################################################################################################################
def _get_current_app(self) -> 'str':
return 'zato-cli'
# ################################################################################################################################
def _get_current_host(self) -> 'str':
# Zato
from zato.common.util.api import current_host
return current_host()
# ################################################################################################################################
def _get_user(self) -> 'str':
# Zato
from zato.common.util.api import current_host, current_user
return '{}@{}'.format(current_user(), current_host())
# ################################################################################################################################
def _get_sso_config(self, args:'Namespace', repo_location:'str', secrets_conf:'Bunch') -> 'UserAPI':
# Zato
from zato.common.crypto.api import CryptoManager
from zato.common.util.api import get_config
from zato.sso.api import UserAPI
from zato.sso.util import new_user_id, normalize_password_reject_list
sso_conf = get_config(repo_location, 'sso.conf', needs_user_config=False)
normalize_password_reject_list(sso_conf)
crypto_manager = CryptoManager.from_secret_key(secrets_conf.secret_keys.key1)
crypto_manager.add_hash_scheme('sso.super-user', sso_conf.hash_secret.rounds, sso_conf.hash_secret.salt_size)
server_conf = get_config(
repo_location, 'server.conf', needs_user_config=False, crypto_manager=crypto_manager, secrets_conf=secrets_conf)
def _get_session() -> 'any_':
return self.get_odb_session_from_server_config(server_conf, None)
def _hash_secret(_secret:'any_') -> 'any_':
return crypto_manager.hash_secret(_secret, 'sso.super-user')
user_api = UserAPI(
server=cast_('any_', None),
sso_conf=sso_conf,
totp=cast_('any_', None),
odb_session_func=cast_('any_', None),
encrypt_func=crypto_manager.encrypt,
decrypt_func=crypto_manager.decrypt,
hash_func=_hash_secret,
verify_hash_func=cast_('any_', None),
new_user_id_func=new_user_id
)
user_api.post_configure(_get_session, True, False)
return user_api
# ################################################################################################################################
def execute(self, args:'Namespace') -> 'any_':
# stdlib
import os
# Zato
from zato.common.util.api import get_config
# This will exist the process if path does not point to a server
self.ensure_path_is_a_server(args.path)
repo_location = os.path.join(args.path, 'config', 'repo')
secrets_conf = get_config(repo_location, 'secrets.conf', needs_user_config=False)
user_api = self._get_sso_config(args, repo_location, secrets_conf)
if self.user_required:
user = user_api.get_user_by_username(self._get_cid(), args.username)
if not user:
self.logger.warning('No such user `%s`', args.username)
return self.SYS_ERROR.NO_SUCH_SSO_USER
else:
user = None
return self._on_sso_command(args, cast_('any_', user), cast_('any_', user_api))
# ################################################################################################################################
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'Bunch') -> 'any_':
raise NotImplementedError('Must be implement by subclasses')
# ################################################################################################################################
class _CreateUser(SSOCommand):
user_type:'str' = ''
create_func:'str' = ''
user_required:'bool' = False
allow_empty_secrets:'bool' = False
opts = [
{'name': 'username', 'help': 'Username to use'},
{'name': '--email', 'help': "Person's email"},
{'name': '--display-name', 'help': "Person's display name"},
{'name': '--first-name', 'help': "Person's first name"},
{'name': '--middle-name', 'help': "Person's middle name"},
{'name': '--last-name', 'help': "Person's middle name"},
{'name': '--password', 'help': 'Password'},
]
# ################################################################################################################################
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'intnone':
# Bunch
from bunch import Bunch
# Zato
from zato.common.crypto.api import CryptoManager
from zato.sso import ValidationError
if user_api.get_user_by_username('', args.username):
self.logger.warning('User already exists `%s`', args.username)
return self.SYS_ERROR.USER_EXISTS
try:
user_api.validate_password(args.password)
except ValidationError as e:
self.logger.warning('Password validation error, reason code:`%s`', ', '.join(e.sub_status))
return self.SYS_ERROR.VALIDATION_ERROR
data = Bunch()
data.username = args.username
data.email = args.email or b''
data.display_name = args.display_name or b''
data.first_name = args.first_name or b''
data.middle_name = args.middle_name or b''
data.last_name = args.last_name or b''
data.password = args.password
data.sign_up_confirm_token = 'cli.{}'.format(CryptoManager.generate_secret().decode('utf8'))
data.is_rate_limit_active = False
data.rate_limit_def = None
data.rate_limit_type = None
data.rate_limit_check_parent_def = False
func = getattr(user_api, self.create_func)
func(self._get_cid(), data, require_super_user=False, auto_approve=True)
self.logger.info('Created %s `%s`', self.user_type, data.username)
# ################################################################################################################################
class CreateUser(_CreateUser):
""" Creates a new regular SSO user
"""
user_type = 'user'
create_func = 'create_user'
# ################################################################################################################################
class CreateSuperUser(_CreateUser):
""" Creates a new SSO super-user
"""
user_type = 'super-user'
create_func = 'create_super_user'
# ################################################################################################################################
class DeleteUser(SSOCommand):
""" Deletes an existing user from SSO (super-user or a regular one).
"""
opts = [
{'name': 'username', 'help': 'Username to delete'},
{'name': '--yes', 'help': 'Do not prompt for confirmation, assume yes', 'action': 'store_true'},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
if not args.yes:
template = 'Delete user? `{}`'.format(user.username)
if not self.get_confirmation(template):
self.logger.info('User `%s` kept intact', user.username)
return
user_api.delete_user_by_username(
self._get_cid(), args.username, None, self._get_current_app(), self._get_current_host(), skip_sec=True)
self.logger.info('Deleted user `%s`', args.username)
# ################################################################################################################################
class LockUser(SSOCommand):
""" Locks a user account. The person may not log in.
"""
opts = [
{'name': 'username', 'help': 'User account to lock'},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
user_api.lock_user(
self._get_cid(), user.user_id, None, self._get_current_app(), self._get_current_host(), False, self._get_user())
self.logger.info('Locked user account `%s`', args.username)
# ################################################################################################################################
class UnlockUser(SSOCommand):
""" Unlocks a user account
"""
opts = [
{'name': 'username', 'help': 'User account to unlock'},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
user_api.unlock_user(
self._get_cid(), user.user_id, None, self._get_current_app(), self._get_current_host(), False, self._get_user())
self.logger.info('Unlocked user account `%s`', args.username)
# ################################################################################################################################
class Login(SSOCommand):
""" Logs a user in.
"""
opts = [
{'name': 'username', 'help': 'User to log in as (no password is required)'},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
# Zato
from zato.common.util.api import current_host
response = user_api.login(
self._get_cid(), args.username, None, None, '127.0.0.1', user_agent='Zato CLI {}'.format(current_host()),
skip_sec=True)
self.logger.info('User logged in %s', response.to_dict())
# ################################################################################################################################
class Logout(SSOCommand):
""" Logs a user out by their UST.
"""
user_required = False
opts = [
{'name': 'ust', 'help': 'User session token to log out by'},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
user_api.logout(self._get_cid(), args.ust, None, '127.0.0.1', skip_sec=True)
self.logger.info('User logged out by UST')
# ################################################################################################################################
class ChangeUserPassword(SSOCommand):
""" Changes password of a user given on input. Use reset-user-password if new password should be auto-generated.
"""
opts = [
{'name': 'username', 'help': 'User to change the password of'},
{'name': '--password', 'help': 'New password'},
{'name': '--expiry', 'help': "Password's expiry in days"},
{'name': '--must-change', 'help': 'A flag indicating whether the password must be changed on next login', 'type':as_bool},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'intnone':
# Zato
from zato.sso import ValidationError
try:
user_api.set_password(
self._get_cid(), user.user_id, args.password, args.must_change, args.expiry, self._get_current_app(),
self._get_current_host())
except ValidationError as e:
self.logger.warning('Password validation error, reason code:`%s`', ', '.join(e.sub_status))
return self.SYS_ERROR.VALIDATION_ERROR
else:
self.logger.info('Changed password for user `%s`', args.username)
# ################################################################################################################################
class ResetUserPassword(SSOCommand):
""" Sets a new random for user and returns it on output. Use change-password if new password must be given on input.
"""
opts = [
{'name': 'username', 'help': 'User to reset the password of'},
{'name': '--expiry', 'help': "Password's expiry in hours or days"},
{'name': '--must-change', 'help': 'A flag indicating whether the password must be changed on next login', 'type':as_bool},
]
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
# Zato
from zato.common.crypto.api import CryptoManager
new_password = CryptoManager.generate_password()
if isinstance(new_password, bytes):
new_password = new_password.decode('utf8')
user_api.set_password(
self._get_cid(), user.user_id, new_password, args.must_change, args.expiry, self._get_current_app(),
self._get_current_host())
self.logger.info('Password for user `%s` reset to `%s`', args.username, new_password)
# ################################################################################################################################
class ResetTOTPKey(SSOCommand):
""" Resets a user's TOTP secret key. Returns the key on output if one was not given on input.
"""
opts = common_totp_opts
def _on_sso_command(self, args:'Namespace', user:'SSOUser', user_api:'UserAPI') -> 'None':
# Zato
from zato.cli.util import get_totp_info_from_args
key, key_label = get_totp_info_from_args(args)
user_api.reset_totp_key(
self._get_cid(), None, user.user_id, key, key_label, self._get_current_app(), self._get_current_host(), skip_sec=True)
# Output key only if it was not given on input
if not args.key:
self.logger.info('TOTP key for user `%s` reset to `%s`', args.username, key)
# ################################################################################################################################
class CreateODB(ZatoCommand):
""" Creates a new Zato SSO ODB (Operational Database)
"""
opts = common_odb_opts
def execute(self, args:'Namespace', show_output:'bool'=True):
# Zato
from zato.common.odb.model.sso import \
_SSOAttr, \
_SSOPasswordReset, \
_SSOGroup, \
_SSOLinkedAuth, \
_SSOSession, \
_SSOUser, \
_SSOUserGroup, \
Base as SSOModelBase
_sso_tables = [
_SSOAttr.__table__,
_SSOGroup.__table__,
_SSOPasswordReset.__table__,
_SSOLinkedAuth.__table__,
_SSOSession.__table__,
_SSOUser.__table__,
_SSOUserGroup.__table__,
]
engine = self._get_engine(args)
SSOModelBase.metadata.create_all(engine, tables=_sso_tables)
if show_output:
if self.verbose:
self.logger.debug('SSO ODB created successfully')
else:
self.logger.info('OK')
# ################################################################################################################################
| 16,261
|
Python
|
.py
| 304
| 45.447368
| 130
| 0.491986
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,460
|
__init__.py
|
zatosource_zato/code/zato-cli/src/zato/cli/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from json import dumps, loads
# Zato
from zato.common.api import NotGiven
from zato.common.util.open_ import open_r, open_w
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.client import ZatoClient
from zato.common.typing_ import any_, anydict, callnone, stranydict
# ################################################################################################################################
# ################################################################################################################################
# Some objects are re-defined here to avoid importing them from zato.common = improves CLI performance.
class MS_SQL:
ZATO_DIRECT = 'zato+mssql1'
ZATO_INFO_FILE = '.zato-info'
SUPPORTED_DB_TYPES = ('mysql', 'postgresql', 'sqlite')
# ################################################################################################################################
_opts_odb_type = 'Operational database type, must be one of {}'.format(SUPPORTED_DB_TYPES) # noqa
_opts_odb_host = 'Operational database host'
_opts_odb_port = 'Operational database port'
_opts_odb_user = 'Operational database user'
_opts_odb_schema = 'Operational database schema'
_opts_odb_db_name = 'Operational database name'
# ################################################################################################################################
ca_defaults = {
'organization': 'My Company',
'organizational_unit': 'My Unit', # When it's an optional argument
'organizational-unit': 'My Unit', # When it's a required one
'locality': 'My Town',
'state_or_province': 'My State',
'country': 'US'
}
# ################################################################################################################################
default_ca_name = 'Sample CA'
default_common_name = 'localhost'
# ################################################################################################################################
common_odb_opts = [
{'name':'--odb-type', 'help':_opts_odb_type, 'choices':SUPPORTED_DB_TYPES, 'default':'sqlite'}, # noqa
{'name':'--odb-host', 'help':_opts_odb_host},
{'name':'--odb-port', 'help':_opts_odb_port},
{'name':'--odb-user', 'help':_opts_odb_user},
{'name':'--odb-db-name', 'help':_opts_odb_db_name},
{'name':'--postgresql-schema', 'help':_opts_odb_schema + ' (PostgreSQL only)'},
{'name':'--odb-password', 'help':'ODB database password', 'default':''},
]
common_ca_create_opts = [
{'name':'--organization', 'help':'Organization name (defaults to {organization})'.format(**ca_defaults)},
{'name':'--locality', 'help':'Locality name (defaults to {locality})'.format(**ca_defaults)},
{'name':'--state-or-province', 'help':'State or province name (defaults to {state_or_province})'.format(**ca_defaults)},
{'name':'--country', 'help':'Country (defaults to {country})'.format(**ca_defaults)},
{'name':'--common-name', 'help':'Common name (defaults to {default})'.format(default=default_common_name)},
]
common_totp_opts = [
{'name': 'username', 'help': 'Username to reset the TOTP secret key of'},
{'name': '--key', 'help': 'Key to use'},
{'name': '--key-label', 'help': 'Label to apply to the key'},
]
common_scheduler_server_address_opts = [
{'name':'--scheduler-address-for-server', 'help':'Address of the scheduler for servers to invoke'},
{'name':'--server-address-for-scheduler', 'help':'Address of the server for a scheduler to invoke'},
]
common_scheduler_server_api_client_opts = [
{
'name':'--scheduler-api-client-for-server-username',
'help':'Name of the API user that the server connects to the scheduler with'
},
{
'name':'--scheduler-api-client-for-server-password',
'help':'Password of the API user that the server connects to the scheduler with'
},
{
'name':'--server-api-client-for-scheduler-username',
'help':'Name of the API user that the scheduler connects to the server with'
},
{
'name':'--server-api-client-for-scheduler-password',
'help':'Password of the API user that the scheduler connects to the server with'
},
]
# ################################################################################################################################
sql_conf_contents = """
# ######### ######################## ######### #
# ######### Engines defined by Zato ######### #
# ######### ######################## ######### #
[mysql+pymysql]
display_name=MySQL
ping_query=SELECT 1+1
[postgresql+pg8000]
display_name=PostgreSQL
ping_query=SELECT 1
[oracle]
display_name=Oracle
ping_query=SELECT 1 FROM dual
[{}]
display_name="MS SQL (Direct)"
ping_query=SELECT 1
# ######### ################################# ######### #
# ######### User-defined SQL engines go below ######### #
# ######### ################################# ######### #
#[label]
#friendly_name=My DB
#sqlalchemy_driver=sa-name
""".lstrip().format(MS_SQL.ZATO_DIRECT) # nopep8
# ################################################################################################################################
command_imports = (
('apispec', 'zato.cli.apispec.APISpec'),
('ca_create_ca', 'zato.cli.ca_create_ca.Create'),
('ca_create_lb_agent', 'zato.cli.ca_create_lb_agent.Create'),
('ca_create_scheduler', 'zato.cli.ca_create_scheduler.Create'),
('ca_create_server', 'zato.cli.ca_create_server.Create'),
('ca_create_web_admin', 'zato.cli.ca_create_web_admin.Create'),
('cache_delete', 'zato.cli.cache.CacheDelete'),
('cache_get', 'zato.cli.cache.CacheGet'),
('cache_set', 'zato.cli.cache.CacheSet'),
('change_password', 'zato.cli.security.basic_auth.ChangePassword'),
('check_config', 'zato.cli.check_config.CheckConfig'),
('component_version', 'zato.cli.component_version.ComponentVersion'),
('create_api_key', 'zato.cli.security.api_key.CreateDefinition'),
('create_basic_auth', 'zato.cli.security.basic_auth.CreateDefinition'),
('create_cluster', 'zato.cli.create_cluster.Create'),
('create_lb', 'zato.cli.create_lb.Create'),
('create_odb', 'zato.cli.create_odb.Create'),
('create_rest_channel', 'zato.cli.rest.channel.CreateChannel'),
('create_scheduler', 'zato.cli.create_scheduler.Create'),
('create_server', 'zato.cli.create_server.Create'),
('create_secret_key', 'zato.cli.crypto.CreateSecretKey'),
('create_user', 'zato.cli.web_admin_auth.CreateUser'),
('create_web_admin', 'zato.cli.create_web_admin.Create'),
('create_wsx_channel', 'zato.cli.wsx.CreateChannel'),
('create_wsx_outconn', 'zato.cli.wsx.CreateOutconn'),
('crypto_create_secret_key', 'zato.cli.crypto.CreateSecretKey'),
('delete_odb', 'zato.cli.delete_odb.Delete'),
('delete_api_key', 'zato.cli.security.api_key.DeleteDefinition'),
('delete_basic_auth', 'zato.cli.security.basic_auth.DeleteDefinition'),
('delete_rest_channel', 'zato.cli.rest.channel.DeleteChannel'),
('delete_wsx_channel', 'zato.cli.wsx.DeleteChannel'),
('delete_wsx_outconn', 'zato.cli.wsx.DeleteOutconn'),
('decrypt', 'zato.cli.crypto.Decrypt'),
('encrypt', 'zato.cli.crypto.Encrypt'),
('enmasse', 'zato.cli.enmasse.Enmasse'),
('from_config', 'zato.cli.FromConfig'),
('hash_get_rounds', 'zato.cli.crypto.GetHashRounds'),
('hl7_mllp_send', 'zato.cli.hl7_.MLLPSend'),
('info', 'zato.cli.info.Info'),
('openapi', 'zato.cli.openapi_.OpenAPI'),
('pubsub_cleanup', 'zato.cli.pubsub.cleanup.Cleanup'),
('pubsub_create_endpoint', 'zato.cli.pubsub.endpoint.CreateEndpoint'),
('pubsub_create_topic', 'zato.cli.pubsub.topic.CreateTopic'),
('pubsub_create_test_topics', 'zato.cli.pubsub.topic.CreateTestTopics'),
('pubsub_delete_endpoint', 'zato.cli.pubsub.endpoint.DeleteEndpoint'),
('pubsub_delete_topic', 'zato.cli.pubsub.topic.DeleteTopics'),
('pubsub_delete_topics', 'zato.cli.pubsub.topic.DeleteTopics'),
('pubsub_get_topic', 'zato.cli.pubsub.topic.GetTopics'),
('pubsub_get_topics', 'zato.cli.pubsub.topic.GetTopics'),
('reset_totp_key', 'zato.cli.web_admin_auth.ResetTOTPKey'),
('quickstart_create', 'zato.cli.quickstart.Create'),
('service_invoke', 'zato.cli.service.Invoke'),
('set_ide_password', 'zato.cli.ide.SetIDEPassword'),
('set_admin_invoke_password', 'zato.cli.web_admin_auth.SetAdminInvokePassword'),
('sso_change_user_password', 'zato.cli.sso.ChangeUserPassword'),
('sso_create_odb', 'zato.cli.sso.CreateODB'),
('sso_create_user', 'zato.cli.sso.CreateUser'),
('sso_create_super_user', 'zato.cli.sso.CreateSuperUser'),
('sso_delete_user', 'zato.cli.sso.DeleteUser'),
('sso_login', 'zato.cli.sso.Login'),
('sso_logout', 'zato.cli.sso.Logout'),
('sso_lock_user', 'zato.cli.sso.LockUser'),
('sso_reset_totp_key', 'zato.cli.sso.ResetTOTPKey'),
('sso_reset_user_password', 'zato.cli.sso.ResetUserPassword'),
('sso_unlock_user', 'zato.cli.sso.UnlockUser'),
('start', 'zato.cli.start.Start'),
('stop', 'zato.cli.stop.Stop'),
('update_password', 'zato.cli.web_admin_auth.UpdatePassword'),
('wait', 'zato.cli.wait.Wait'),
)
# ################################################################################################################################
def run_command(args):
# Zato
from zato.common.util.import_ import import_string
# This may be needed in two places.
sorted_command_imports = sorted(command_imports)
# Iterate over all the commands that we know ..
for command_name, class_dotted_name in sorted_command_imports:
# Try to match the command given with our configuration ..
if command_name == args.command:
class_ = import_string(class_dotted_name)
instance = class_(args)
# .. we found a match so we can run the command.
return instance.run(args)
# .. if we are here, it means that configuration from zato_command.py does not much our.
else:
raise Exception('Could not find `{}` among `{}`'.format(args.command, [elem[0] for elem in sorted_command_imports]))
# ################################################################################################################################
class ZatoCommand:
""" A base class for all Zato CLI commands. Handles common things like parsing
the arguments, checking whether a config file or command line switches should
be used, asks for passwords etc.
"""
needs_empty_dir = False
file_needed = None
needs_secrets_confirm = True
add_config_file = True
target_dir = None
show_output = True
opts = []
# ################################################################################################################################
class SYS_ERROR:
""" All non-zero sys.exit return codes the commands may use.
"""
ODB_EXISTS = 1
FILE_MISSING = 2
NOT_A_ZATO_COMPONENT = 3
NO_ODB_FOUND = 4
DIR_NOT_EMPTY = 5
CLUSTER_NAME_ALREADY_EXISTS = 6
SERVER_NAME_ALREADY_EXISTS = 7
NO_SUCH_CLUSTER = 8
COMPONENT_ALREADY_RUNNING = 9
NO_PID_FOUND = 10
NO_SUCH_WEB_ADMIN_USER = 11
NO_INPUT = 12
CONFLICTING_OPTIONS = 13
NO_OPTIONS = 14
INVALID_INPUT = 15
EXCEPTION_CAUGHT = 16
CANNOT_MIGRATE = 17
FAILED_TO_START = 18
FOUND_PIDFILE = 19
USER_EXISTS = 20
VALIDATION_ERROR = 21
NO_SUCH_SSO_USER = 22
NOT_A_ZATO_SERVER = 23
NOT_A_ZATO_WEB_ADMIN = 24
NOT_A_ZATO_LB = 25
NOT_A_ZATO_SCHEDULER = 26
CACHE_KEY_NOT_FOUND = 27
SERVER_TIMEOUT = 28
PARAMETER_MISSING = 29
PATH_NOT_A_FILE = 30
NO_SUCH_PATH = 31
# ################################################################################################################################
class COMPONENTS:
class _ComponentName:
def __init__(self, code, name):
self.code = code
self.name = name
CA = _ComponentName('CA', 'Certificate authority')
LOAD_BALANCER = _ComponentName('LOAD_BALANCER', 'Load balancer')
SCHEDULER = _ComponentName('SCHEDULER', 'Scheduler')
SERVER = _ComponentName('SERVER', 'Server')
WEB_ADMIN = _ComponentName('WEB_ADMIN', 'Dashboard')
# ################################################################################################################################
def __init__(self, args):
# stdlib
import os
# Zato
from zato.common.util.cli import read_stdin_data
self.args = args
self.original_dir = os.getcwd()
self.show_output = False if 'ZATO_CLI_DONT_SHOW_OUTPUT' in os.environ else True
self.verbose = args.verbose
self.reset_logger(args)
self.engine = None
if args.store_config:
self.store_config(args)
# Get input from sys.stdin, if any was provided at all. It needs to be read once here,
# because subprocesses will not be able to do it once we read it all in in the parent
# one, so we read it here and give other processes explicitly on input, if they need it.
self.stdin_data = read_stdin_data()
# ################################################################################################################################
def exit(self, exit_code:'int'=0) -> 'None':
# stdlib
import sys
_ = sys.exit(exit_code)
# ################################################################################################################################
def allow_empty_secrets(self):
return False
# ################################################################################################################################
def get_arg(self, name, default='') -> 'any_':
if hasattr(self.args, 'get'):
return self.args.get(name) or default
else:
return getattr(self.args, name, default)
# ################################################################################################################################
def _extract_address_data(
self,
args:'any_',
main_arg_name:'str',
host_arg_name:'str',
port_arg_name:'str',
default_host:'str',
default_port:'int'
) -> 'any_':
# stdlib
from urllib.parse import urlparse
# Local variables
use_tls = NotGiven
# Try to extract the scheduler's address from a single option
if address := getattr(args, main_arg_name, None):
# Make sure we have a scheme ..
if not '://' in address:
address = 'https://' + address
# .. parse out the individual components ..
address = urlparse(address)
# .. now we know if TLS should be used ..
use_tls = address.scheme == 'https'
# .. extract the host and port ..
address = address.netloc.split(':')
host = address[0]
if len(address) == 2:
port = address[1]
port = int(port)
else:
port = default_port
else:
# Extract the scheduler's address from individual pieces
host = self.get_arg(host_arg_name, default_host)
port = self.get_arg(port_arg_name, default_port)
if use_tls is NotGiven:
use_tls = False
return use_tls, host, port
# ################################################################################################################################
def _encrypt(self, CryptoManagerClass, args, to_encrypt=None, needs_log_info=True):
# stdlib
import os
os.chdir(self.original_dir)
repo_dir = os.path.abspath(os.path.join(args.path, 'config', 'repo'))
cm = CryptoManagerClass(repo_dir=repo_dir)
encrypted = cm.encrypt(to_encrypt or args.data)
if needs_log_info:
self.logger.info(encrypted.decode('utf8'))
return encrypted
# ################################################################################################################################
def reset_logger(self, args, reload_=False):
# stdlib
import logging
import sys
from imp import reload
# Zato
from zato.common.util.file_system import fs_safe_now
if reload_:
logging.shutdown() # noqa
reload(logging) # noqa
self.logger = logging.getLogger(self.__class__.__name__) # noqa
self.logger.setLevel(logging.DEBUG if self.verbose else logging.INFO) # noqa
self.logger.handlers[:] = []
console_handler = logging.StreamHandler(sys.stdout) # noqa
console_formatter = logging.Formatter('%(message)s') # noqa
console_handler.setFormatter(console_formatter)
self.logger.addHandler(console_handler)
if args.store_log:
verbose_handler = logging.FileHandler('zato.{}.log'.format(fs_safe_now())) # noqa
verbose_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # noqa
verbose_handler.setFormatter(verbose_formatter)
self.logger.addHandler(verbose_handler)
# ################################################################################################################################
def _get_secret(self, template, needs_confirm, allow_empty, secret_name='password'):
""" Runs an infinite loop until a user enters the secret. User needs
to confirm the secret if 'needs_confirm' is True. New line characters
are always stripped before returning the secret, so that "\n" becomes
"", "\nsecret\n" becomes "secret" and "\nsec\nret\n" becomes "sec\nret".
"""
# stdlib
from getpass import getpass
self.logger.info('')
while True:
secret1 = getpass(template + ' (will not echo): ')
if not needs_confirm:
return secret1.strip('\n')
secret2 = getpass('{} again (will not echo): '.format(template))
if secret1 != secret2:
self.logger.info('{}s do not match'.format(template))
else:
if not secret1 and not allow_empty:
self.logger.info('No {} entered'.format(secret_name))
else:
return secret1.strip('\n')
# ################################################################################################################################
def get_confirmation(self, template, yes_char='y', no_char='n'):
# stdlib
from builtins import input as raw_input
template = '{} [{}/{}] '.format(template, yes_char, no_char)
while True:
value = raw_input(template)
if value == yes_char:
return True
elif value == no_char:
return False
# ################################################################################################################################
def _get_now(self, time_=None):
# stdlib
import time
if not time_:
time_ = time.gmtime() # noqa
return time.strftime('%Y-%m-%d_%H-%M-%S', time_) # noqa
# ################################################################################################################################
def _get_user_host(self):
# stdlib
from getpass import getuser
from socket import gethostname
return getuser() + '@' + gethostname()
# ################################################################################################################################
def store_initial_info(self, target_dir, component):
# stdlib
import os
from datetime import datetime
# Zato
from zato.common.json_internal import dumps
from zato.common.version import get_version
zato_version = get_version()
info = {'version': zato_version, # noqa
'created_user_host': self._get_user_host(),
'created_ts': datetime.utcnow().isoformat(), # noqa
'component': component
}
open_w(os.path.join(target_dir, ZATO_INFO_FILE)).write(dumps(info))
# ################################################################################################################################
def store_config(self, args):
""" Stores the config options in a config file for a later use.
"""
# stdlib
import os
from io import StringIO
# Zato
from zato.common.util.file_system import fs_safe_now
now = fs_safe_now() # noqa
file_name = 'zato.{}.config'.format(now)
file_args = StringIO()
for arg, value in args._get_kwargs():
if value:
file_args.write('{}={}\n'.format(arg, value))
body = '# {} - {}\n{}'.format(now, self._get_user_host(), file_args.getvalue())
open_w(file_name).write(body)
file_args.close()
self.logger.debug('Options saved in file {file_name}'.format(
file_name=os.path.abspath(file_name)))
# ################################################################################################################################
def _get_engine(self, args):
# SQLAlchemy
import sqlalchemy
# Zato
from zato.common.util import api as util_api
from zato.common.util.api import get_engine_url
if not args.odb_type.startswith('postgresql'):
connect_args = {}
else:
connect_args = {'application_name':util_api.get_component_name('enmasse')}
return sqlalchemy.create_engine(get_engine_url(args), connect_args=connect_args)
# ################################################################################################################################
def _get_session(self, engine):
# Zato
from zato.common.util.api import get_session
return get_session(engine)
# ################################################################################################################################
def _check_passwords(self, args, check_password):
""" Get the password from a user for each argument that needs a password.
"""
for opt_name, opt_help in check_password:
opt_name = opt_name.replace('--', '').replace('-', '_')
password_arg = getattr(args, opt_name, None)
# It is OK if password is an empty string and empty secrets are allowed
if not password_arg:
if isinstance(self.allow_empty_secrets, bool):
allow_empty = self.allow_empty_secrets
else:
allow_empty = self.allow_empty_secrets()
if allow_empty:
continue
password = self._get_secret(opt_help, self.needs_secrets_confirm, allow_empty, opt_name)
setattr(args, opt_name, password)
return args
# ################################################################################################################################
def _get_arg(self, args, name, default):
value = getattr(args, name, None)
return value if value else default
# ################################################################################################################################
def run(self, args, offer_save_opts=True, work_args=None, needs_sys_exit=True):
""" Parses the command line or the args passed in and figures out
whether the user wishes to use a config file or command line switches.
"""
# stdlib
import os
import sys
try:
# Do we need to have a clean directory to work in?
if self.needs_empty_dir:
work_dir = os.path.abspath(args.path)
if not os.path.exists(work_dir):
self.logger.info('Creating directory `%s`', work_dir)
os.makedirs(work_dir)
for elem in os.listdir(work_dir):
if elem.startswith('zato') and elem.endswith('config'):
# This is a zato.{}.config file. The had been written there
# before we got to this point and it's OK to skip it.
continue
else:
self.logger.info('Directory `%s` is not empty, please choose a different one or empty it out', work_dir)
sys.exit(self.SYS_ERROR.DIR_NOT_EMPTY) # noqa
# Do we need the directory to contain any specific files?
if self.file_needed:
full_path = os.path.join(args.path, self.file_needed)
if not os.path.exists(full_path):
msg = 'Could not find file {}'.format(full_path)
self.logger.info(msg)
sys.exit(self.SYS_ERROR.FILE_MISSING) # noqa
check_password = []
for opt_dict in self.opts:
name = opt_dict['name']
if 'password' in name or 'secret' in name:
# Don't require a component's secret key
if name == '--secret-key':
continue
# Don't require passwords with SQLite
if 'odb' in name and args.odb_type == 'sqlite':
continue
check_password.append((name, opt_dict['help']))
self.before_execute(args)
if check_password and self.is_password_required():
args = self._check_passwords(args, check_password)
# GH #328 - zato create web_admin treats boolean admin_created as an exit code
# https://github.com/zatosource/zato/issues/328
return_code = self.execute(args)
if needs_sys_exit:
if isinstance(return_code, int):
sys.exit(return_code)
else:
sys.exit(0)
except Exception as e:
self.reset_logger(self.args)
if self.verbose:
# Zato
from zato.common.util.python_ import get_full_stack
msg = get_full_stack()
else:
msg = '{}: {} (Hint: re-run with --verbose for full traceback)'.format(e.__class__.__name__, e.args)
self.logger.error(msg)
sys.exit(self.SYS_ERROR.EXCEPTION_CAUGHT)
# ################################################################################################################################
def is_password_required(self):
return True
# ################################################################################################################################
def before_execute(self, args):
""" A hooks that lets commands customize their input before they are actually executed.
"""
# Update odb_type if it's MySQL so that users don't have to think about the particular client implementation.
if getattr(args, 'odb_type', None) == 'mysql':
args.odb_type = 'mysql+pymysql'
# ################################################################################################################################
def _copy_crypto(self, repo_dir, args, middle_part):
# stdlib
import shutil
import os
for name in('pub-key', 'priv-key', 'cert', 'ca-certs'):
arg_name = '{}_path'.format(name.replace('-', '_'))
target_path = os.path.join(repo_dir, 'zato-{}-{}.pem'.format(middle_part, name))
source_path = getattr(args, arg_name, None)
if source_path:
source_path = os.path.abspath(source_path)
if os.path.exists(source_path):
shutil.copyfile(source_path, target_path)
# ################################################################################################################################
def copy_lb_crypto(self, repo_dir, args):
self._copy_crypto(repo_dir, args, 'lba')
# ################################################################################################################################
def copy_server_crypto(self, repo_dir, args):
self._copy_crypto(repo_dir, args, 'server')
# ################################################################################################################################
def copy_scheduler_crypto(self, repo_dir, args):
self._copy_crypto(repo_dir, args, 'scheduler')
# ################################################################################################################################
def copy_web_admin_crypto(self, repo_dir, args):
# stdlib
import shutil
import os
for attr, name in (('pub_key_path', 'pub-key'), ('priv_key_path', 'priv-key'), ('cert_path', 'cert'),
('ca_certs_path', 'ca-certs')):
file_name = os.path.join(repo_dir, 'web-admin-{}.pem'.format(name))
shutil.copyfile(os.path.abspath(getattr(args, attr)), file_name)
# ################################################################################################################################
def get_crypto_manager_from_server_config(self, config, repo_dir):
# Zato
from zato.common.util.api import get_crypto_manager_from_server_config
return get_crypto_manager_from_server_config(config, repo_dir)
# ################################################################################################################################
def get_odb_session_from_server_config(self, config, cm):
# Zato
from zato.common.util.api import get_odb_session_from_server_config
return get_odb_session_from_server_config(config, cm, False)
# ################################################################################################################################
def ensure_path_is_a_server(self, path):
# stdlib
import os
import sys
# Zato
from zato.common.util.api import get_config
repo_location = os.path.join(path, 'config', 'repo')
secrets_conf = get_config(repo_location, 'secrets.conf', needs_user_config=False)
# This file must exist, otherwise it's not a path to a server
if not secrets_conf:
self.logger.warning('No server found at `%s`', path)
sys.exit(self.SYS_ERROR.NOT_A_ZATO_SERVER)
# ################################################################################################################################
class FromConfig(ZatoCommand):
""" Executes commands from a command config file.
"""
def execute(self, args):
""" Runs the command with arguments read from a config file.
"""
f = open_r(args.path)
for line in f:
if line.lstrip().startswith('#'):
continue
arg, value = line.split('=', 1)
arg = arg.strip()
value = value.strip()
setattr(args, arg, value)
run_command(args)
# ################################################################################################################################
class CACreateCommand(ZatoCommand):
""" A base class for all commands that create new crypto material.
"""
file_needed = '.zato-ca-dir'
# ################################################################################################################################
def __init__(self, args):
# stdlib
import os
super(CACreateCommand, self).__init__(args)
self.target_dir = os.path.abspath(args.path)
# ################################################################################################################################
def _on_file_missing(self):
msg = "{} doesn't seem to be a CA directory, the '{}' file is missing."
return msg.format(self.target_dir, self.file_needed)
# ################################################################################################################################
def _execute(self, args, extension, show_output=True):
# stdlib
import os
import tempfile
now = self._get_now()
openssl_template = open_r(os.path.join(self.target_dir, 'ca-material', 'openssl-template.conf')).read()
ou_attrs = ('organizational_unit', 'organizational-unit')
template_args = {}
for name in('organization', 'locality', 'state_or_province', 'country'):
value = self._get_arg(args, name, ca_defaults[name])
template_args[name.replace('-', '_')] = value
for name in ou_attrs:
has_name = self._get_arg(args, name, None)
if has_name:
value = self._get_arg(args, name, ca_defaults[name])
template_args[name.replace('-', '_')] = value
break
else:
if hasattr(self, 'get_organizational_unit'):
template_args['organizational_unit'] = self.get_organizational_unit(args)
else:
template_args['organizational_unit'] = ca_defaults['organizational_unit']
template_args['common_name'] = self._get_arg(args, 'common_name', default_common_name)
template_args['target_dir'] = self.target_dir
template_args['ca_serial'] = '$dir/ca-material/ca-serial'
template_args['ca_certindex'] = '$dir/ca-material/ca-certindex'
template_args['target_dir_rel'] = '$dir'
template_args['ca_key'] = '$dir/ca-material/ca-cert.pem'
template_args['private_key'] = '$dir/ca-material/ca-key.pem'
import platform
system = platform.system()
is_windows = 'windows' in system.lower()
if is_windows:
template_args['ca_serial'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-serial')).replace('\\','/')
template_args['ca_certindex'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-certindex')).replace('\\','/')
template_args['target_dir_rel'] = os.path.relpath(self.target_dir).replace('\\','/')
template_args['ca_key'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-cert.pem')).replace('\\','/')
template_args['private_key'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-key.pem')).replace('\\','/')
f = tempfile.NamedTemporaryFile(mode='w+') # noqa
f.write(openssl_template.format(**template_args))
f.flush()
file_args = {
'now':now,
'target_dir':self.target_dir
}
for arg in('cluster_name', 'server_name', 'scheduler_name'):
if hasattr(args, arg):
file_args[arg] = getattr(args, arg)
file_args['file_prefix'] = self.get_file_prefix(file_args)
csr_name = os.path.join(self.target_dir, 'out-csr', '{file_prefix}-csr-{now}.pem'.format(**file_args))
priv_key_name = os.path.join(self.target_dir, 'out-priv', '{file_prefix}-priv-{now}.pem'.format(**file_args))
pub_key_name = os.path.join(self.target_dir, 'out-pub', '{file_prefix}-pub-{now}.pem'.format(**file_args))
cert_name = os.path.join(self.target_dir, 'out-cert', '{file_prefix}-cert-{now}.pem'.format(**file_args))
format_args = {
'config': f.name,
'extension': extension,
'csr_name': csr_name,
'priv_key_name': priv_key_name,
'pub_key_name': pub_key_name,
'cert_name': cert_name,
'target_dir': self.target_dir,
'ca_password': os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-password'))
}
if is_windows:
format_args['ca_password'] = os.path.relpath(os.path.join(self.target_dir, 'ca-material', 'ca-password')).replace('\\','\\\\')
# Create the CSR and keys ..
cmd = """openssl req -batch -new -nodes -extensions {extension} \
-out {csr_name} \
-keyout {priv_key_name} \
-pubkey \
-newkey rsa:2048 -config {config} """.format(**format_args)
os.system(cmd)
# .. note that we were using "-pubkey" flag above so we now have to extract
# the public key from the CSR.
split_line = '-----END PUBLIC KEY-----'
csr_pub = open_r(csr_name).read()
csr_pub = csr_pub.split(split_line)
pub = csr_pub[0] + split_line
csr = csr_pub[1].lstrip()
open_w(csr_name).write(csr)
open_w(pub_key_name).write(pub)
# Generate the certificate
cmd = """openssl ca -batch -passin file:{ca_password} -config {config} \
-out {cert_name} \
-extensions {extension} \
-in {csr_name}""".format(**format_args)
os.system(cmd)
f.close()
# Now delete the default certificate stored in '.\', we don't really
# need it because we have its copy in '.\out-cert' anyway.
last_serial = open_r(os.path.join(self.target_dir, 'ca-material', 'ca-serial.old')).read().strip()
os.remove(os.path.join(self.target_dir, last_serial + '.pem'))
msg = """Crypto material generated and saved in:
- private key: {priv_key_name}
- public key: {pub_key_name}
- certificate {cert_name}
- CSR: {csr_name}""".format(**format_args)
if show_output:
if self.verbose:
self.logger.debug(msg)
else:
self.logger.info('OK')
# Make sure permissions are tight (GH #440)
os.chmod(priv_key_name, 0o640)
# In case someone needs to invoke us directly and wants to find out
# what the format_args were.
return format_args
# ################################################################################################################################
class ManageCommand(ZatoCommand):
add_config_file = False
# ################################################################################################################################
def _get_dispatch(self):
return {
self.COMPONENTS.LOAD_BALANCER.code: self._on_lb,
self.COMPONENTS.SERVER.code: self._on_server,
self.COMPONENTS.WEB_ADMIN.code: self._on_web_admin,
self.COMPONENTS.SCHEDULER.code: self._on_scheduler,
}
command_files = {ZATO_INFO_FILE}
# ################################################################################################################################
def _on_lb(self, *ignored_args, **ignored_kwargs):
raise NotImplementedError('Should be implemented by subclasses')
# ################################################################################################################################
_on_web_admin = _on_server = _on_scheduler = _on_lb
# ################################################################################################################################
def execute(self, args):
# pylint: disable=attribute-defined-outside-init
# stdlib
import os
import sys
# Zato
from zato.common.json_internal import load
args_path = os.path.expanduser(args.path)
args_path = os.path.expandvars(args_path)
self.component_dir = os.path.abspath(args_path)
self.config_dir = os.path.join(self.component_dir, 'config')
listing = set(os.listdir(self.component_dir))
# Do we have any files we're looking for?
found = self.command_files & listing
if not found:
msg = """Directory {} doesn't seem to belong to a Zato component. Expected one of the following to exist {}""".format(
self.component_dir, sorted(self.command_files))
self.logger.info(msg)
sys.exit(self.SYS_ERROR.NOT_A_ZATO_COMPONENT) # noqa
found = list(found)[0]
json_data = load(open_r(os.path.join(self.component_dir, found)))
os.chdir(self.component_dir)
handler = self._get_dispatch()[json_data['component']]
return handler(args)
# ################################################################################################################################
def is_arg_given(args, *arg_names):
for arg_name in arg_names:
try:
result = args.get(arg_name)
if result:
return True
except AttributeError:
result = getattr(args, arg_name, None)
if result:
return True
# ################################################################################################################################
# ################################################################################################################################
class ServerAwareCommand(ZatoCommand):
""" A subclass that knows how to assign a Zato client object based on command line arguments.
"""
zato_client: 'ZatoClient'
def before_execute(self, args):
# stdlib
import os
# Zato
from zato.common.util.api import get_client_from_server_conf
server_path = args.path or '.'
server_path = os.path.abspath(server_path)
# This will exist the process if path does not point to a server
self.ensure_path_is_a_server(server_path)
self.zato_client = get_client_from_server_conf(server_path)
# ################################################################################################################################
def _invoke_service(
self,
service:'str',
request:'anydict',
hook_func:'callnone'=None
) -> 'stranydict':
# Pass all the data to the underlying service and get its response ..
response = self.zato_client.invoke(**{
'name': service,
'payload': request
})
# We enter here if there is genuine business data to process
if response.data:
# .. let's extract it ..
data = response.data
# .. if we have a hook callable to pre-process data, let's invoke it ..
if hook_func:
data = hook_func(data)
# We enter here if there was an invocation error
else:
data = response.details or '{}'
data = loads(data)
return data
# ################################################################################################################################
def _log_response(
self,
data:'any_',
needs_stdout:'bool'=True
) -> 'None':
# stdlib
import sys
# We are ready to serialize the data to JSON ..
data = dumps(data, indent=2)
# .. no matter what data we have, we can log it now if we are told to do so.
if needs_stdout:
sys.stdout.write(data + '\n')
# ################################################################################################################################
def _invoke_service_and_log_response(
self,
service:'str',
request:'anydict',
hook_func:'callnone'=None,
needs_stdout:'bool'=True
) -> 'any_':
# Invoke the service first ..
data = self._invoke_service(service, request, hook_func)
# .. log its output ..
self._log_response(data, needs_stdout)
# .. and return the output to our caller.
return data
# ################################################################################################################################
# ################################################################################################################################
| 44,815
|
Python
|
.py
| 868
| 42.793779
| 139
| 0.492853
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,461
|
delete_odb.py
|
zatosource_zato/code/zato-cli/src/zato/cli/delete_odb.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import ZatoCommand, common_odb_opts
class Delete(ZatoCommand):
""" Deletes Zato components
"""
needs_password_confirm = False
opts = common_odb_opts
def execute(self, args):
# Zato
from zato.common.odb import drop_all
engine = self._get_engine(args)
if engine.dialect.has_table(engine.connect(), 'install_state'):
drop_all(engine)
if self.verbose:
self.logger.debug('Successfully deleted the ODB')
else:
self.logger.info('OK')
else:
self.logger.error('No ODB found')
return self.SYS_ERROR.NO_ODB_FOUND
| 920
|
Python
|
.py
| 26
| 28.153846
| 82
| 0.636878
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,462
|
create_server.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_server.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2024, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from copy import deepcopy
from dataclasses import dataclass
# Zato
from zato.cli import common_odb_opts, common_scheduler_server_api_client_opts, common_scheduler_server_address_opts, \
sql_conf_contents, ZatoCommand
from zato.common.api import CONTENT_TYPE, default_internal_modules, Default_Service_File_Data, NotGiven, SCHEDULER, \
SSO as CommonSSO
from zato.common.crypto.api import ServerCryptoManager
from zato.common.simpleio_ import simple_io_conf_contents
from zato.common.util.api import as_bool, get_demo_py_fs_locations
from zato.common.util.config import get_scheduler_api_client_for_server_password, get_scheduler_api_client_for_server_username
from zato.common.util.open_ import open_r, open_w
from zato.common.events.common import Default as EventsDefault
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_
# ################################################################################################################################
# ################################################################################################################################
# For pyflakes
simple_io_conf_contents = simple_io_conf_contents
# ################################################################################################################################
# ################################################################################################################################
server_conf_dict = deepcopy(CONTENT_TYPE)
server_conf_dict.deploy_internal = {}
deploy_internal = []
for key, value in default_internal_modules.items():
deploy_internal.append('{}={}'.format(key, value))
server_conf_dict.deploy_internal = '\n'.join(deploy_internal)
# ################################################################################################################################
# ################################################################################################################################
server_conf_template = """[main]
gunicorn_bind=0.0.0.0:{{port}}
gunicorn_worker_class=gevent
gunicorn_workers={{gunicorn_workers}}
gunicorn_timeout=1234567890
gunicorn_user=
gunicorn_group=
gunicorn_proc_name=
gunicorn_logger_class=
gunicorn_graceful_timeout=1
debugger_enabled=False
debugger_host=0.0.0.0
debugger_port=5678
ipc_host=127.0.0.1
ipc_port_start=17050
work_dir=../../work
deployment_lock_expires=1073741824 # 2 ** 30 seconds = +/- 34 years
deployment_lock_timeout=180
token=zato+secret://zato.server_conf.main.token
service_sources=./service-sources.txt
[http_response]
server_header=Zato
return_x_zato_cid=True
code_400_message=400 Bad Request
code_400_content_type=text/plain
code_401_message=401 Unauthorized
code_401_content_type=text/plain
code_403_message=403 Forbidden
code_403_content_type=text/plain
code_404_message=404 Not Found
code_404_content_type=text/plain
code_405_message=405 Not Allowed
code_405_content_type=text/plain
code_500_message=500 Internal Server Error
code_500_content_type=text/plain
[crypto]
use_tls=False
tls_version=TLSv1
tls_ciphers=ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS
tls_client_certs=optional
priv_key_location=zato-server-priv-key.pem
pub_key_location=zato-server-pub-key.pem
cert_location=zato-server-cert.pem
ca_certs_location=zato-server-ca-certs.pem
[odb]
db_name={{odb_db_name}}
engine={{odb_engine}}
extra=echo=False
host={{odb_host}}
port={{odb_port}}
password=zato+secret://zato.server_conf.odb.password
pool_size={{odb_pool_size}}
username={{odb_user}}
use_async_driver=True
[scheduler]
scheduler_host={{scheduler_host}}
scheduler_port={{scheduler_port}}
scheduler_use_tls={{scheduler_use_tls}}
scheduler_api_username={{scheduler_api_client_for_server_username}}
scheduler_api_password={{scheduler_api_client_for_server_password}}
[hot_deploy]
pickup_dir=../../pickup/incoming/services
backup_history=100
backup_format=bztar
delete_after_pick_up=False
max_batch_size=1000 # In kilobytes, default is 1 megabyte
redeploy_on_parent_change=True
# These three are relative to work_dir
current_work_dir=./hot-deploy/current
backup_work_dir=./hot-deploy/backup
last_backup_work_dir=./hot-deploy/backup/last
[deploy_patterns_allowed]
order=true_false
*=True
[invoke_patterns_allowed]
order=true_false
*=True
[invoke_target_patterns_allowed]
order=true_false
*=True
[spring]
context_class=zato.server.spring_context.ZatoContext
[misc]
return_internal_objects=False
internal_services_may_be_deleted=False
initial_cluster_name={{initial_cluster_name}}
initial_server_name={{initial_server_name}}
queue_build_cap=30000000 # All queue-based connections need to initialize in that many seconds
http_proxy=
locale=
ensure_sql_connections_exist=True
http_server_header=Apache
needs_x_zato_cid=False
zeromq_connect_sleep=0.1
aws_host=
fifo_response_buffer_size=0.2 # In MB
jwt_secret=zato+secret://zato.server_conf.misc.jwt_secret
enforce_service_invokes=False
return_tracebacks=True
default_error_message="An error has occurred"
startup_callable=
return_json_schema_errors=False
sftp_genkey_command=dropbearkey
posix_ipc_skip_platform=darwin
service_invoker_allow_internal="pub.zato.ping", "/zato/api/invoke/service_name"
[events]
fs_data_path = {{events_fs_data_path}}
sync_threshold = {{events_sync_threshold}}
sync_interval = {{events_sync_interval}}
[http]
methods_allowed=GET, POST, DELETE, PUT, PATCH, HEAD, OPTIONS
[stats]
expire_after=168 # In hours, 168 = 7 days = 1 week
[kvdb]
host={{kvdb_host}}
port={{kvdb_port}}
unix_socket_path=
password=zato+secret://zato.server_conf.kvdb.password
db=0
socket_timeout=
charset=
errors=
use_redis_sentinels=False
redis_sentinels=
redis_sentinels_master=
shadow_password_in_logs=True
log_connection_info_sleep_time=5 # In seconds
[startup_services_first_worker]
zato.helpers.input-logger=Sample payload for a startup service (first worker)
zato.notif.init-notifiers=
zato.kvdb.log-connection-info=
zato.sso.cleanup.cleanup=300
zato.updates.check-updates=
pub.zato.channel.web-socket.cleanup-wsx=
[startup_services_any_worker]
zato.helpers.input-logger=Sample payload for a startup service (any worker)
pub.zato.channel.web-socket.cleanup-wsx=
[profiler]
enabled=False
profiler_dir=profiler
log_filename=profiler.log
cachegrind_filename=cachegrind.out
discard_first_request=True
flush_at_shutdown=True
url_path=/zato-profiler
unwind=False
[user_config]
# All paths are either absolute or relative to the directory server.conf is in
user=./user.conf
[newrelic]
config=
environment=
ignore_errors=
log_file=
log_level=
[sentry]
dsn=
timeout=5
level=WARN
[rbac]
custom_auth_list_service=
[[auth_type_hook]]
[component_enabled]
stats=False
slow_response=True
cassandra=True
email=True
hl7=True
search=True
msg_path=True
ibm_mq=False
odoo=True
zeromq=True
patterns=True
target_matcher=False
invoke_matcher=False
sms=True
sso=True
[pubsub]
wsx_gateway_service_allowed=
log_if_deliv_server_not_found=True
log_if_wsx_deliv_server_not_found=False
data_prefix_len=2048
data_prefix_short_len=64
sk_server_table_columns=6, 15, 8, 6, 17, 75
[pubsub_meta_topic]
enabled=True
store_frequency=1
[pubsub_meta_endpoint_pub]
enabled=True
store_frequency=1
max_history=100
data_len=0
[pubsub_meta_endpoint_sub]
enabled=True
store_frequency=1
max_history=100
data_len=0
[wsx]
hook_service=
json_library=stdlib
pings_missed_threshold=2
ping_interval=30
[content_type]
json = {JSON}
[zeromq_mdp]
linger=0
poll_interval=100
heartbeat=3
workers_pool_initial = 10
workers_pool_mult = 2
workers_pool_max = 250
[updates]
notify_major_versions=True
notify_minor_versions=True
notify_if_from_source=True
[preferred_address]
address=
ip=10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, eth0
boot_if_preferred_not_found=False
allow_loopback=False
[shmem]
size=0.1 # In MB
[logging]
http_access_log_ignore=
rest_log_ignore=/zato/admin/invoke,
[greenify]
#/path/to/oracle/instantclient_19_3/libclntsh.so.19.1=True
[os_environ]
sample_key=sample_value
[command_set_scheduler]
[deploy_internal]
{deploy_internal}
""".format(**server_conf_dict)
# ################################################################################################################################
pickup_conf = """#[hot-deploy.user.local-dev]
#pickup_from=/uncomment/this/stanza/to/enable/a/custom/location
[json]
pickup_from=./pickup/incoming/json
move_processed_to=./pickup/processed/json
patterns=*.json
parse_with=py:json.loads
services=zato.pickup.log-json
topics=
[xml]
pickup_from=./pickup/incoming/xml
move_processed_to=./pickup/processed/xml
patterns=*.xml
parse_with=py:lxml.objectify.fromstring
services=zato.pickup.log-xml
topics=
[csv]
pickup_from=./pickup/incoming/csv
move_processed_to=./pickup/processed/csv
patterns=*.csv
read_on_pickup=False
parse_on_pickup=False
delete_after_pickup=False
services=zato.pickup.log-csv
topics=
[user_conf]
pickup_from=./pickup/incoming/user-conf
patterns=*.ini, *.conf
parse_on_pickup=False
delete_after_pickup=False
services=zato.pickup.update-user-conf
topics=
[static]
pickup_from=./pickup/incoming/static
patterns=*
parse_on_pickup=False
delete_after_pickup=False
services=zato.pickup.update-static
topics=
"""
# ################################################################################################################################
service_sources_contents = """
#
# This file is kept for backward compatibility with previous versions of Zato.
# Do not modify it and do not use it in new deployments.
#
./work/hot-deploy/current
""".strip()
# ################################################################################################################################
user_conf_contents = """[sample_section]
string_key=sample_string
list_key=sample,list
"""
# ################################################################################################################################
sso_conf_contents = '''[main]
encrypt_email=True
encrypt_password=True
email_service=
smtp_conn=
site_name=
[backend]
default=sql
[sql]
name=
[hash_secret]
rounds=120000
salt_size=64 # In bytes = 512 bits
[apps]
all=CRM
default=CRM
http_header=X-Zato-SSO-Current-App
signup_allowed=
login_allowed=CRM
login_metadata_allowed=
inform_if_app_invalid=True
[login]
reject_if_not_listed=False
inform_if_locked=True
inform_if_not_confirmed=True
inform_if_not_approved=True
inform_if_totp_missing=True
[password_reset]
valid_for=1440 # In minutes = 1 day
password_change_session_duration=1800 # In seconds = 30 minutes
user_search_by=username
email_title_en_GB=Password reset
email_title_en_US=Password reset
email_from=hello@example.com
[user_address_list]
[session]
expiry=60 # In minutes
expiry_hook= # Name of a service that will return expiry value each time it is needed
[password]
expiry=730 # In days, 365 days * 2 years = 730 days
inform_if_expired=False
inform_if_about_to_expire=True
inform_if_must_be_changed=True
inform_if_invalid=True
about_to_expire_threshold=30 # In days
log_in_if_about_to_expire=True
min_length=8
max_length=256
min_complexity=0
min_complexity_algorithm=zxcvbn
reject_list = """
111111
123123
123321
123456
123qwe
1q2w3e
1q2w3e4r
1q2w3e4r5t
222222
333333
444444
555555
654321
666666
777777
888888
999999
987654321
google
letmein
mynoob
password
qwerty
zxcvbnm
"""
[signup]
inform_if_user_exists=False
inform_if_user_invalid=False
inform_if_email_exists=False
inform_if_email_invalid=False
email_required=True
max_length_username=128
max_length_email=128
password_allow_whitespace=True
always_return_confirm_token=True
is_email_required=True
is_approval_needed=True
callback_service_list=
email_confirm_enabled=True
email_confirm_from=confirm@example.com
email_confirm_cc=
email_confirm_bcc=
email_confirm_template=sso-confirm.txt
email_welcome_enabled=True
email_welcome_from=welcome@example.com
email_welcome_cc=
email_welcome_bcc=
email_welcome_template=sso-welcome.txt
[user_validation]
service=zato.sso.signup.validate
reject_username=zato, admin, root, system, sso
reject_email=zato, admin, root, system, sso
[search]
default_page_size=50
max_page_size=100
'''
# ################################################################################################################################
sso_confirm_template = """
Hello {username},
your account is almost ready - all we need to do is make sure that this is your email.
Use this link to confirm your address:
https://example.com/signup-confirm/{token}
If you did not want to create the account, just delete this email and everything will go back to the way it was.
ZATO_FOOTER_MARKER
Your Zato SSO team.
""".strip()
# ################################################################################################################################
sso_welcome_template = """
Hello {username},
thanks for joining us. Here are a couple great ways to get started:
* https://example.com/link/1
* https://example.com/link/2
* https://example.com/link/3
ZATO_FOOTER_MARKER
Your Zato SSO team.
""".strip()
sso_password_reset_template = """
Hello {username},
a password reset was recently requested on your {site_name} account. If this was you, please click the link below to update your password.
https://example.com/reset-password/{token}
This link will expire in {expiration_time_hours} hours.
If you do not want to reset your password, please ignore this message and the password will not be changed.
ZATO_FOOTER_MARKER
Your Zato SSO team.
""".strip()
# ################################################################################################################################
# We need to do it because otherwise IDEs may replace '-- ' with '--' (stripping the whitespace)
sso_confirm_template = sso_confirm_template.replace('ZATO_FOOTER_MARKER', '-- ')
sso_welcome_template = sso_welcome_template.replace('ZATO_FOOTER_MARKER', '-- ')
sso_password_reset_template = sso_password_reset_template.replace('ZATO_FOOTER_MARKER', '-- ')
# ################################################################################################################################
secrets_conf_template = """
[secret_keys]
key1={keys_key1}
[zato]
well_known_data={zato_well_known_data} # Pi number
server_conf.kvdb.password={zato_kvdb_password}
server_conf.main.token={zato_main_token}
server_conf.misc.jwt_secret={zato_misc_jwt_secret}
server_conf.odb.password={zato_odb_password}
"""
# ################################################################################################################################
lua_zato_rename_if_exists = """
-- Checks whether a from_key exists and if it does renames it to to_key.
-- Returns an error code otherwise.
-- Return codes:
-- 10 = Ok, renamed from_key -> to_key
-- 11 = No such from_key
local from_key = KEYS[1]
local to_key = KEYS[2]
if redis.call('exists', from_key) == 1 then
redis.call('rename', from_key, to_key)
return 10
else
return 11
end
"""
# ################################################################################################################################
default_odb_pool_size = 60
# ################################################################################################################################
directories = (
'config',
'config/repo',
'config/repo/lua',
'config/repo/lua/internal',
'config/repo/lua/user',
'config/repo/schema',
'config/repo/schema/json',
'config/repo/sftp',
'config/repo/sftp/channel',
'config/repo/static',
'config/repo/static/sso',
'config/repo/static/sso/email',
'config/repo/static/sso/email/en_GB',
'config/repo/static/sso/email/en_US',
'config/repo/tls',
'config/repo/tls/keys-certs',
'config/repo/tls/ca-certs',
'logs',
'pickup',
'pickup/incoming',
'pickup/processed',
'pickup/incoming/services',
'pickup/incoming/static',
'pickup/incoming/user-conf',
'pickup/incoming/json',
'pickup/incoming/xml',
'pickup/incoming/csv',
'pickup/processed/static',
'pickup/processed/user-conf',
'pickup/processed/json',
'pickup/processed/xml',
'pickup/processed/csv',
'profiler',
'work',
'work/events',
'work/events/v1',
'work/events/v2',
'work/hot-deploy',
'work/hot-deploy/current',
'work/hot-deploy/backup',
'work/hot-deploy/backup/last',
)
# ################################################################################################################################
priv_key_location = './config/repo/config-priv.pem'
priv_key_location = './config/repo/config-pub.pem'
# ################################################################################################################################
# ################################################################################################################################
@dataclass(init=False)
class SchedulerConfigForServer:
scheduler_host: 'str'
scheduler_port: 'int'
scheduler_use_tls: 'bool'
class api_client:
class from_server_to_scheduler:
username: 'str'
password: 'str'
class from_scheduler_to_server:
username: 'str'
password: 'str'
# ################################################################################################################################
# ################################################################################################################################
class Create(ZatoCommand):
""" Creates a new Zato server
"""
needs_empty_dir = True
opts:'any_' = deepcopy(common_odb_opts)
opts.append({'name':'cluster_name', 'help':'Name of the cluster to join'})
opts.append({'name':'server_name', 'help':'Server\'s name'})
opts.append({'name':'--pub-key-path', 'help':'Path to the server\'s public key in PEM'})
opts.append({'name':'--priv-key-path', 'help':'Path to the server\'s private key in PEM'})
opts.append({'name':'--cert-path', 'help':'Path to the server\'s certificate in PEM'})
opts.append({'name':'--ca-certs-path', 'help':'Path to list of PEM certificates the server will trust'})
opts.append({'name':'--secret-key', 'help':'Server\'s secret key (must be the same for all servers)'})
opts.append({'name':'--jwt-secret', 'help':'Server\'s JWT secret (must be the same for all servers)'})
opts.append({'name':'--http-port', 'help':'Server\'s HTTP port'})
opts.append({'name':'--scheduler-host', 'help':'Deprecated. Use --scheduler-address-for-server instead.'})
opts.append({'name':'--scheduler-port', 'help':'Deprecated. Use --scheduler-address-for-server instead.'})
opts.append({'name':'--threads', 'help':'How many main threads the server should use', 'default':1}) # type: ignore
opts += deepcopy(common_scheduler_server_address_opts)
opts += deepcopy(common_scheduler_server_api_client_opts)
# ################################################################################################################################
def __init__(self, args:'any_') -> 'None':
# stdlib
import os
import uuid
super(Create, self).__init__(args)
self.target_dir = os.path.abspath(args.path)
self.dirs_prepared = False
self.token = uuid.uuid4().hex.encode('utf8')
# ################################################################################################################################
def allow_empty_secrets(self):
return True
# ################################################################################################################################
def prepare_directories(self, show_output:'bool') -> 'None':
# stdlib
import os
if show_output:
self.logger.debug('Creating directories..')
for d in sorted(directories):
d = os.path.join(self.target_dir, d)
if show_output:
self.logger.debug('Creating %s', d)
os.mkdir(d)
self.dirs_prepared = True
# ################################################################################################################################
def _get_scheduler_config(self, args:'any_', secret_key:'bytes') -> 'SchedulerConfigForServer':
# stdlib
import os
# Local variables
use_tls = NotGiven
# Our response to produce
out = SchedulerConfigForServer()
# Extract basic information about the scheduler the server will be invoking ..
use_tls, host, port = self._extract_address_data(
args,
'scheduler_address_for_server',
'scheduler_host',
'scheduler_port',
SCHEDULER.DefaultHost,
SCHEDULER.DefaultPort,
)
# .. now, we can assign host and port to the response ..
out.scheduler_host = host
out.scheduler_port = port
# Extract API credentials
cm = ServerCryptoManager.from_secret_key(secret_key)
scheduler_api_client_for_server_username = get_scheduler_api_client_for_server_username(args)
scheduler_api_client_for_server_password = get_scheduler_api_client_for_server_password(args, cm)
out.api_client.from_server_to_scheduler.username = scheduler_api_client_for_server_username
out.api_client.from_server_to_scheduler.password = scheduler_api_client_for_server_password
# This can be overridden through environment variables
env_keys = ['Zato_Server_To_Scheduler_Use_TLS', 'ZATO_SERVER_SCHEDULER_USE_TLS']
for key in env_keys:
if value := os.environ.get(key):
use_tls = as_bool(value)
break
else:
if use_tls is NotGiven:
use_tls = False
out.scheduler_use_tls = use_tls # type: ignore
# .. finally, return the response to our caller.
return out
# ################################################################################################################################
def _add_demo_service(self, fs_location:'str', full_path:'str') -> 'None':
with open_w(fs_location) as f:
data = Default_Service_File_Data.format(**{
'full_path': full_path,
})
_ = f.write(data)
# ################################################################################################################################
def execute(
self,
args:'any_',
default_http_port:'any_'=None,
show_output:'bool'=True,
return_server_id:'bool'=False
) -> 'int | None':
# stdlib
import os
import platform
from datetime import datetime
from traceback import format_exc
# Cryptography
from cryptography.fernet import Fernet
# SQLAlchemy
from sqlalchemy.exc import IntegrityError
# Python 2/3 compatibility
from six import PY3
# Zato
from zato.cli._apispec_default import apispec_files
from zato.common.api import SERVER_JOIN_STATUS
from zato.common.crypto.const import well_known_data
from zato.common.defaults import http_plain_server_port
from zato.common.odb.model import Cluster, Server
from zato.common.util.logging_ import get_logging_conf_contents
logging_conf_contents = get_logging_conf_contents()
files = {
'config/repo/logging.conf': logging_conf_contents,
'config/repo/service-sources.txt': service_sources_contents,
'config/repo/lua/internal/zato.rename_if_exists.lua': lua_zato_rename_if_exists,
'config/repo/sql.conf': sql_conf_contents,
'config/repo/static/sso/email/en_GB/signup-confirm.txt': CommonSSO.EmailTemplate.SignupConfirm,
'config/repo/static/sso/email/en_GB/signup-welcome.txt': CommonSSO.EmailTemplate.SignupWelcome,
'config/repo/static/sso/email/en_GB/password-reset-link.txt': CommonSSO.EmailTemplate.PasswordResetLink,
'config/repo/static/sso/email/en_US/signup-confirm.txt': CommonSSO.EmailTemplate.SignupConfirm,
'config/repo/static/sso/email/en_US/signup-welcome.txt': CommonSSO.EmailTemplate.SignupWelcome,
'config/repo/static/sso/email/en_US/password-reset-link.txt': CommonSSO.EmailTemplate.PasswordResetLink,
}
default_http_port = default_http_port or http_plain_server_port
engine = self._get_engine(args)
session = self._get_session(engine) # type: ignore
cluster = session.query(Cluster).filter(Cluster.name == args.cluster_name).first() # type: ignore
if not cluster:
self.logger.error("Cluster `%s` doesn't exist in ODB", args.cluster_name)
return self.SYS_ERROR.NO_SUCH_CLUSTER
server = Server(cluster=cluster)
server.name = args.server_name
if isinstance(self.token, (bytes, bytearray)): # type: ignore
server.token = self.token.decode('utf8') # type: ignore
else:
server.token = self.token
server.last_join_status = SERVER_JOIN_STATUS.ACCEPTED # type: ignore
server.last_join_mod_by = self._get_user_host() # type: ignore
server.last_join_mod_date = datetime.utcnow() # type: ignore
session.add(server)
try:
if not self.dirs_prepared:
self.prepare_directories(show_output)
repo_dir = os.path.join(self.target_dir, 'config', 'repo')
# Note that server crypto material is optional so if none was given on input
# this command will be a no-op.
self.copy_server_crypto(repo_dir, args)
if show_output:
self.logger.debug('Created a repo in {}'.format(repo_dir))
self.logger.debug('Creating files..')
for file_name, contents in sorted(files.items()):
file_name = os.path.join(self.target_dir, file_name)
if show_output:
self.logger.debug('Creating {}'.format(file_name))
f = open_w(file_name)
_ = f.write(contents)
f.close()
logging_conf_loc = os.path.join(self.target_dir, 'config/repo/logging.conf')
logging_conf = open_r(logging_conf_loc).read()
_ = open_w(logging_conf_loc).write(logging_conf.format(log_path=os.path.join(self.target_dir, 'logs', 'zato.log')))
if show_output:
self.logger.debug('Logging configuration stored in {}'.format(logging_conf_loc))
odb_engine=args.odb_type
if odb_engine.startswith('postgresql'):
odb_engine = 'postgresql+pg8000'
server_conf_loc = os.path.join(self.target_dir, 'config/repo/server.conf')
server_conf = open_w(server_conf_loc)
# There will be multiple keys in future releases to allow for key rotation
secret_key = args.secret_key or Fernet.generate_key()
try:
threads = int(args.threads)
except Exception:
threads = 1
# Build the scheduler's configuration
scheduler_config = self._get_scheduler_config(args, secret_key)
# Substitue the variables ..
server_conf_data = server_conf_template.format(
port=getattr(args, 'http_port', None) or default_http_port,
gunicorn_workers=threads,
odb_db_name=args.odb_db_name or args.sqlite_path,
odb_engine=odb_engine,
odb_host=args.odb_host or '',
odb_port=args.odb_port or '',
odb_pool_size=default_odb_pool_size,
odb_user=args.odb_user or '',
kvdb_host=self.get_arg('kvdb_host'),
kvdb_port=self.get_arg('kvdb_port'),
initial_cluster_name=args.cluster_name,
initial_server_name=args.server_name,
events_fs_data_path=EventsDefault.fs_data_path,
events_sync_threshold=EventsDefault.sync_threshold,
events_sync_interval=EventsDefault.sync_interval,
scheduler_host=scheduler_config.scheduler_host,
scheduler_port=scheduler_config.scheduler_port,
scheduler_use_tls=scheduler_config.scheduler_use_tls,
scheduler_api_client_for_server_username=scheduler_config.api_client.from_server_to_scheduler.username,
scheduler_api_client_for_server_password=scheduler_config.api_client.from_server_to_scheduler.password,
)
# .. and special-case this one as it contains the {} characters
# .. which makes it more complex to substitute them.
server_conf_data = server_conf_data.replace('/zato/api/invoke/service_name', '/zato/api/invoke/{service_name}')
_ = server_conf.write(server_conf_data)
server_conf.close()
pickup_conf_loc = os.path.join(self.target_dir, 'config/repo/pickup.conf')
pickup_conf_file = open_w(pickup_conf_loc)
_ = pickup_conf_file.write(pickup_conf)
pickup_conf_file.close()
user_conf_loc = os.path.join(self.target_dir, 'config/repo/user.conf')
user_conf = open_w(user_conf_loc)
_ = user_conf.write(user_conf_contents)
user_conf.close()
sso_conf_loc = os.path.join(self.target_dir, 'config/repo/sso.conf')
sso_conf = open_w(sso_conf_loc)
_ = sso_conf.write(sso_conf_contents)
sso_conf.close()
# On systems other than Windows, where symlinks are not fully supported,
# for convenience and backward compatibility,
# create a shortcut symlink from incoming/user-conf to config/repo/user-conf.
system = platform.system()
is_windows = 'windows' in system.lower()
if not is_windows:
user_conf_src = os.path.join(self.target_dir, 'pickup', 'incoming', 'user-conf')
user_conf_dest = os.path.join(self.target_dir, 'config', 'repo', 'user-conf')
os.symlink(user_conf_src, user_conf_dest)
fernet1 = Fernet(secret_key)
secrets_conf_loc = os.path.join(self.target_dir, 'config/repo/secrets.conf')
secrets_conf = open_w(secrets_conf_loc)
kvdb_password = self.get_arg('kvdb_password') or ''
kvdb_password = kvdb_password.encode('utf8')
kvdb_password = fernet1.encrypt(kvdb_password)
kvdb_password = kvdb_password.decode('utf8')
odb_password = self.get_arg('odb_password') or ''
odb_password = odb_password.encode('utf8')
odb_password = fernet1.encrypt(odb_password)
odb_password = odb_password.decode('utf8')
zato_well_known_data = fernet1.encrypt(well_known_data.encode('utf8'))
zato_well_known_data = zato_well_known_data.decode('utf8')
if isinstance(secret_key, (bytes, bytearray)):
secret_key = secret_key.decode('utf8')
zato_main_token = fernet1.encrypt(self.token)
zato_main_token = zato_main_token.decode('utf8')
zato_misc_jwt_secret = getattr(args, 'jwt_secret', None)
if not zato_misc_jwt_secret:
zato_misc_jwt_secret = Fernet.generate_key()
if not isinstance(zato_misc_jwt_secret, bytes):
zato_misc_jwt_secret = zato_misc_jwt_secret.encode('utf8')
zato_misc_jwt_secret = fernet1.encrypt(zato_misc_jwt_secret)
if isinstance(zato_misc_jwt_secret, bytes): # type: ignore
zato_misc_jwt_secret = zato_misc_jwt_secret.decode('utf8')
_ = secrets_conf.write(secrets_conf_template.format(
keys_key1=secret_key,
zato_well_known_data=zato_well_known_data,
zato_kvdb_password=kvdb_password,
zato_main_token=zato_main_token,
zato_misc_jwt_secret=zato_misc_jwt_secret,
zato_odb_password=odb_password,
))
secrets_conf.close()
bytes_to_str_encoding = 'utf8' if PY3 else ''
simple_io_conf_loc = os.path.join(self.target_dir, 'config/repo/simple-io.conf')
simple_io_conf = open_w(simple_io_conf_loc)
_ = simple_io_conf.write(simple_io_conf_contents.format(
bytes_to_str_encoding=bytes_to_str_encoding
))
simple_io_conf.close()
if show_output:
self.logger.debug('Core configuration stored in {}'.format(server_conf_loc))
# Prepare paths for the demo service ..
demo_py_fs = get_demo_py_fs_locations(self.target_dir)
# .. and create it now.
self._add_demo_service(demo_py_fs.pickup_incoming_full_path, demo_py_fs.pickup_incoming_full_path)
self._add_demo_service(demo_py_fs.work_dir_full_path, demo_py_fs.pickup_incoming_full_path)
# Sphinx APISpec files
for file_path, contents in apispec_files.items(): # type: ignore
full_path = os.path.join(self.target_dir, 'config/repo/static/sphinxdoc/apispec', file_path)
dir_name = os.path.dirname(full_path)
try:
os.makedirs(dir_name, 0o770)
except OSError:
# That is fine, the directory must have already created in one of previous iterations
pass
finally:
api_file = open_w(full_path)
_ = api_file.write(contents)
api_file.close()
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.SERVER.code)
session.commit()
except IntegrityError:
msg = 'Server name `{}` already exists'.format(args.server_name)
if self.verbose:
msg += '. Caught an exception:`{}`'.format(format_exc())
self.logger.error(msg)
session.rollback()
return self.SYS_ERROR.SERVER_NAME_ALREADY_EXISTS
except Exception:
self.logger.error('Could not create the server, e:`%s`', format_exc())
session.rollback()
else:
if show_output:
self.logger.debug('Server added to the ODB')
if show_output:
if self.verbose:
msg = """Successfully created a new server.
You can now start it with the 'zato start {}' command.""".format(self.target_dir)
self.logger.debug(msg)
else:
self.logger.info('OK')
# This is optional - need only by quickstart.py and needs to be requested explicitly,
# otherwise it would be construed as a non-0 return code from this process.
if return_server_id:
return server.id # type: ignore
# ################################################################################################################################
# ################################################################################################################################
| 36,460
|
Python
|
.py
| 877
| 35.919042
| 680
| 0.609326
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,463
|
wsx.py
|
zatosource_zato/code/zato-cli/src/zato/cli/wsx.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import sys
# Zato
from zato.cli import ServerAwareCommand
from zato.common.api import DATA_FORMAT, GENERIC, WEB_SOCKET
from zato.common.test import get_free_tcp_port
from zato.common.util.api import fs_safe_now
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class Config:
ServiceName = 'helpers.web-sockets-gateway'
NewTokenWaitTime = WEB_SOCKET.DEFAULT.NEW_TOKEN_TIMEOUT
TokenTTL = WEB_SOCKET.DEFAULT.TOKEN_TTL
PingInterval = WEB_SOCKET.DEFAULT.PING_INTERVAL
PingMissedThreshold = WEB_SOCKET.DEFAULT.PINGS_MISSED_THRESHOLD
WSXOutconnType = GENERIC.CONNECTION.TYPE.OUTCONN_WSX
# ################################################################################################################################
# ################################################################################################################################
class CreateChannel(ServerAwareCommand):
""" Creates a new WebSocket channel.
"""
opts = [
{'name':'--name', 'help':'Name of the channel to create', 'required':False,},
{'name':'--address', 'help':'TCP address for the channel to use', 'required':False},
{'name':'--is-active', 'help':'Should the channel be active upon creation', 'required':False},
{'name':'--service', 'help':'Service reacting to requests sent to the channel', 'required':False,
'default':Config.ServiceName},
{'name':'--security', 'help':'Name of the security definition assigned to the channel', 'required':False},
{'name':'--new-token-wait-time', 'help':'How many seconds to wait for new tokens from clients', 'required':False,
'default':Config.NewTokenWaitTime},
{'name':'--token-ttl', 'help':'For how many seconds a token is considered valid', 'required':False,
'default':Config.TokenTTL},
{'name':'--ping-interval', 'help':'Once in how many seconds to send and expect ping messages', 'required':False,
'default':Config.PingInterval},
{'name':'--ping-missed-threshold', 'help':'After how many missed ping messages to consider a WebSocket disconnected',
'required':False, 'default':Config.PingMissedThreshold},
{'name':'--extra-properties', 'help':'Extra properties as JSON', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
name = getattr(args, 'name', None)
address = getattr(args, 'address', None)
service_name = getattr(args, 'service', None)
security = getattr(args, 'security', None)
ping_interval = getattr(args, 'ping_interval', None) or Config.PingInterval
ping_missed_threshold = getattr(args, 'ping_missed_threshold', None) or Config.PingMissedThreshold
token_ttl = getattr(args, 'token_ttl', None) or Config.TokenTTL
new_token_wait_time = getattr(args, 'new_token_wait_time', None) or Config.NewTokenWaitTime
extra_properties = getattr(args, 'extra_properties', None)
is_active = getattr(args, 'is_active', True)
if is_active is None:
is_active = True
# Assign default values if required
ping_interval = ping_interval or Config.PingInterval
ping_missed_threshold = ping_missed_threshold or Config.PingMissedThreshold
token_ttl = token_ttl or Config.TokenTTL
new_token_wait_time = new_token_wait_time or Config.NewTokenWaitTime
# Generate a name if one is not given
name = name or 'auto.wsx.channel.' + fs_safe_now()
# If we have no address to listen on, generate one here
if not address:
tcp_port = get_free_tcp_port()
address = f'ws://127.0.0.1:{tcp_port}/{name}'
# API service to invoke
service = 'zato.channel.web-socket.create'
# API request to send
request = {
'name': name,
'address': address,
'service_name': service_name,
'security': security,
'is_active': is_active,
'is_internal': False,
'data_format': DATA_FORMAT.JSON,
'token_ttl': token_ttl,
'new_token_wait_time': new_token_wait_time,
'ping_interval': ping_interval,
'ping_missed_threshold': ping_missed_threshold,
'extra_properties': extra_properties
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class DeleteChannel(ServerAwareCommand):
""" Deletes a WebSocket channel.
"""
opts = [
{'name':'--id', 'help':'ID of the channel to delete', 'required':False,},
{'name':'--name', 'help':'Name of the channel to delete', 'required':False,},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
name = getattr(args, 'name', None)
# Make sure we have input data to delete the channel by
if not (id or name):
self.logger.warn('Cannot continue. To delete a WebSocket channel, either --id or --name is required on input.')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# API service to invoke
service = 'zato.channel.web-socket.delete'
# API request to send
request = {
'id': id,
'name': name,
'should_raise_if_missing': False
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class CreateOutconn(ServerAwareCommand):
""" Creates a new outgoing WebSocket connection.
"""
opts = [
{'name':'--name', 'help':'Name of the connection to create', 'required':False,},
{'name':'--address', 'help':'TCP address of a WebSocket server to connect to', 'required':False},
{'name':'--sub-list', 'help':'A comma-separate list of topics the connection should subscribe to', 'required':False},
{'name':'--on-connect-service',
'help':'Service to invoke when the WebSocket connects to a remote server', 'required':False},
{'name':'--on-message-service',
'help':'Service to invoke when the WebSocket receives a message from the remote server', 'required':False},
{'name':'--on-close-service',
'help':'Service to invoke when the remote server closes its WebSocket connection', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
# This can be specified by users
name = getattr(args, 'name', None)
address = getattr(args, 'address', None)
on_connect_service_name = getattr(args, 'on_connect_service', None)
on_message_service_name = getattr(args, 'on_message_service', None)
on_close_service_name = getattr(args, 'on_close_service', None)
subscription_list = getattr(args, 'sub_list', '')
# This is fixed
is_zato = getattr(args, 'is_zato', True)
is_active = getattr(args, 'is_active', True)
has_auto_reconnect = getattr(args, 'has_auto_reconnect', True)
# Generate a name if one is not given
name = name or 'auto.wsx.outconn.' + fs_safe_now()
# If we have no address to connect to, use the on employed for testing
if not address:
address = 'ws://127.0.0.1:47043/zato.wsx.apitests'
# Convert the subscription list to the format that the service expects
if subscription_list:
subscription_list = subscription_list.split(',')
subscription_list = [elem.strip() for elem in subscription_list]
subscription_list = '\n'.join(subscription_list)
# API service to invoke
service = 'zato.generic.connection.create'
# API request to send
request = {
'name': name,
'address': address,
'is_zato': is_zato,
'is_active': is_active,
'has_auto_reconnect': has_auto_reconnect,
'on_connect_service_name': on_connect_service_name,
'on_message_service_name': on_message_service_name,
'on_close_service_name': on_close_service_name,
'subscription_list': subscription_list,
'pool_size': 1,
'is_channel': False,
'is_outconn': True,
'is_internal': False,
'sec_use_rbac': False,
'type_': Config.WSXOutconnType,
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class DeleteOutconn(ServerAwareCommand):
""" Deletes a WebSocket outgoing connection.
"""
opts = [
{'name':'--id', 'help':'ID of the outgoing connection to delete', 'required':False,},
{'name':'--name', 'help':'Name of the outgoing connection to delete', 'required':False,},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
name = getattr(args, 'name', None)
# Make sure we have input data to delete the outgoing connection by
if not (id or name):
msg = 'Cannot continue. To delete a WebSocket outgoing connection, either --id or --name is required on input.'
self.logger.warn(msg)
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# API service to invoke
service = 'zato.generic.connection.delete'
# API request to send
request = {
'id': id,
'name': name,
'should_raise_if_missing': False
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.service = Config.ServiceName
args.sub_list = 'zato.ping, zato.ping2'
args.path = environ['ZATO_SERVER_BASE_DIR']
command = CreateChannel(args)
command.run(args)
# ################################################################################################################################
# ################################################################################################################################
| 12,129
|
Python
|
.py
| 221
| 46.674208
| 130
| 0.516491
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,464
|
_apispec_default.py
|
zatosource_zato/code/zato-cli/src/zato/cli/_apispec_default.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# All APISpec-related files that will be created for each server
apispec_files = {}
# Custom CSS styles
apispec_files['_static/custom.css'] = """
* {
font-size:0.99em;
}
body, html {
background-color:#111 !important;
}
code {
font-size:1.2em
}
div.body {
max-width:900px !important
}
html {
position: relative !important;
min-height: 100% !important;
}
h1 {
color:#67beff !important;
background-color:#282f33 !important;
border-bottom:2px solid #57aeff !important;
font-size: 30px !important;
padding:12px !important;
text-shadow: 3px 3px 1px #111;
}
h2 {
color:#eee !important;
background-color:#332f2f !important;
border-bottom:none !important;
text-shadow: 3px 3px 1px #222;
}
span.doc{
color:#eee !important;
}
a.reference {
text-decoration:none;
padding:3px !important;
margin:9px !important;
margin-left:0px !important;
color: #red !important;
}
a.reference:hover {
background-color:#48751d;
}
a.headerlink {
color: red !important;
}
h4, p.topless a, .nav-item, .nav-item a, li.right a {
color:#e6e6e6 !important;
text-shadow: 1px 1px 1px #222;
}
table.align-default {
width:100% !important;
margin:none !important;
}
table.docutils td {
padding-top:2px;
padding-bottom:2px;
padding-left:0px;
padding-right:0px;
border-bottom:1px solid #f3f3e3;
}
table.docutils th.head {
background-color:#eec;
text-align:left;
padding-left:0px;
}
div.documentwrapper {
min-height: 300px;
background-color:#222;
}
div.related {
background-color:#000;
border-top:1px solid #111;
border-bottom:1px solid #111;
}
.footer {
position: absolute !important;
height: 18px !important;
width: 100% !important;
overflow: hidden !important;
padding-top:8px !important;
padding-bottom:18px !important;
background-color:#111;
}
table.docutils thead tr th {
background-color:#332f2f !important;
border-bottom:1px solid #3a3a3f !important;
padding-top:7px;
padding-bottom:7px;
font-weight: 500 !important;
}
table.docutils thead tr th:first-child {
padding-left: 3px !important;
}
table.docutils tbody tr td {
vertical-align:middle;
border-bottom:2px dotted #333 !important;
}
table.docutils tbody tr td:first-child {
padding:2px;
padding-right:7px;
padding-left: 7px !important;
color: #97eeff !important;
text-shadow: 2px 2px 1px #000;
}
li table.docutils tbody tr td:first-child {
color: #fff !important;
}
div.body {
background-color: #232221;
color:#fff;
}
#services table.docutils thead tr th:first-child {
color:transparent;
}
#services table.docutils tbody tr td:first-child {
font-size:12px;
text-align: center;
color:#331;
}
table.docutils tbody tr td {
vertical-align:top;
}
div.last_updated {
font-size:smaller;
color:#eee;
padding-top:10px;
text-shadow: 2px 2px 1px #222;
}
span.zato-tag-name-highlight {
color: white;
background-color: #885606;
padding: 8px;
font-weight: 600;
border-radius: 8px;
text-shadow: 2px 1px 1px #333;
}
""".lstrip()
# Custom HTML layout
apispec_files['_templates/layout.html'] = """
{#
basic/layout.html
~~~~~~~~~~~~~~~~~
Master layout template for Sphinx themes.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- block doctype -%}{%- if html5_doctype %}
<!DOCTYPE html>
{%- else %}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{%- endif %}{%- endblock %}
{%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %}
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
(sidebars != []) %}
{%- set url_root = pathto('', 1) %}
{# XXX necessary? #}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " — "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
{%- macro relbar() %}
<div class="related" role="navigation" aria-label="related navigation">
<h3>{{ _('Navigation') }}</h3>
<ul>
{%- for rellink in rellinks %}
<li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
<a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}"
{{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
{%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
{%- endfor %}
{%- block rootrellink %}
<li class="nav-item nav-item-0"><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
{%- endblock %}
{%- for parent in parents %}
<li class="nav-item nav-item-{{ loop.index }}"><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
{%- endfor %}
{%- block relbaritems %} {% endblock %}
</ul>
</div>
{%- endmacro %}
{%- macro sidebar() %}
{%- if render_sidebar %}
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
{%- if logo %}
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
{%- if sidebars != None %}
{#- new style sidebar: explicitly include/exclude templates #}
{%- for sidebartemplate in sidebars %}
{%- include sidebartemplate %}
{%- endfor %}
{%- else %}
{#- old style sidebars: using blocks -- should be deprecated #}
{%- block sidebartoc %}
{%- include "localtoc.html" %}
{%- endblock %}
{%- block sidebarrel %}
{%- include "relations.html" %}
{%- endblock %}
{%- block sidebarsourcelink %}
{%- include "sourcelink.html" %}
{%- endblock %}
{%- if customsidebar %}
{%- include customsidebar %}
{%- endif %}
{%- block sidebarsearch %}
{%- include "searchbox.html" %}
{%- endblock %}
{%- endif %}
</div>
</div>
{%- endif %}
{%- endmacro %}
{%- macro script() %}
<script type="text/javascript" id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{%- endmacro %}
{%- macro css() %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for css in css_files %}
{%- if css|attr("rel") %}
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
{%- else %}
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
{%- endif %}
{%- endfor %}
{%- endmacro %}
{%- if html_tag %}
{{ html_tag }}
{%- else %}
<html xmlns="http://www.w3.org/1999/xhtml"{% if language is not none %} lang="{{ language }}"{% endif %}>
{%- endif %}
<head>
{%- if not html5_doctype and not skip_ua_compatible %}
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
{%- endif %}
{%- if use_meta_charset or html5_doctype %}
<meta charset="{{ encoding }}" />
{%- else %}
<meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
{%- endif %}
{{- metatags }}
{%- block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{%- endblock %}
{%- block css %}
{{- css() }}
{%- endblock %}
{%- if not embedded %}
{%- block scripts %}
{{- script() }}
{%- endblock %}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{%- endif %}
{%- endif %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
{%- block body_tag %}<body>{% endblock %}
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
{%- block content %}
{%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
<div class="document">
{%- block document %}
<div class="documentwrapper">
{%- if render_sidebar %}
<div class="bodywrapper">
{%- endif %}
<div class="body" role="main">
{% block body %} {% endblock %}
</div>
{%- if render_sidebar %}
</div>
{%- endif %}
</div>
{%- endblock %}
{%- block sidebar2 %}{{ sidebar() }}{% endblock %}
<div class="clearer"></div>
</div>
{%- endblock %}
{%- block relbar2 %}{{ relbar() }}{% endblock %}
{%- block footer %}
<div class="footer" role="contentinfo">
<div style="padding-bottom:1px">
Generated by <a href="https://zato.io?apidocs">Zato</a>
</div>
<div>
ESB, APIs, AI and Cloud Integrations in Python
</div>
</div>
{%- endblock %}
</body>
</html>
""".lstrip() # noqa: E501
# Custom HTML sidebar
apispec_files['_templates/zato_sidebar.html'] = """
<h4>Downloads</h4>
<p class="topless">
<a href="_downloads/openapi.yaml" title="Download OpenAPI specification">OpenAPI</a>
</p>
<div class="last_updated">
Last update: {{ last_updated }}
</div>
"""
# Default download files
apispec_files['download/api.raml'] = '' # RAML
apispec_files['download/api.wsdl'] = '' # WSDL
apispec_files['download/api.yaml'] = '' # OpenAPI
# Make for Linux
apispec_files['Makefile'] = """
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = APISpec
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
mkdir -p ./$(BUILDDIR)/html/_downloads
cp -p ./download/* ./$(BUILDDIR)/html/_downloads
""".lstrip().replace(' ', '\t')
# Make for Windows
apispec_files['make.bat'] = """
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
set SPHINXPROJ=APISpec
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
""".lstrip()
# Main Sphinx documentation
apispec_files['conf.py'] = """
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'API documentation'
copyright = u''
author = u'Zato Source s.r.o. (https://zato.io)'
rst_epilog = '.. |index_title| replace:: {}'.format(project)
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_title = project
html_last_updated_fmt = '%b %d, %Y'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {'**':['relations.html', 'zato_sidebar.html']}
html_use_index = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'APIdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'api.tex', u'API documentation',
u'Zato', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'apispec', u'API documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'API', u'API documentation',
author, 'API', 'API documentation.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_css_file('custom.css')
""".lstrip()
# Main file
apispec_files['index.rst'] = """
|index_title|
=================
.. toctree::
:hidden:
:titlesonly:
:glob:
./*
.. include:: ./services.rst
""".lstrip()
| 18,381
|
Python
|
.py
| 549
| 29.579235
| 179
| 0.616857
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,465
|
zato_command.py
|
zatosource_zato/code/zato-cli/src/zato/cli/zato_command.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
try:
import cloghandler # type: ignore
except ImportError:
pass
else:
cloghandler = cloghandler # For pyflakes
# Zato
from zato.common.typing_ import cast_
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import ArgumentParser
from zato.common.typing_ import any_, callnone, dictlist, strlist, tuple_
# ################################################################################################################################
# ################################################################################################################################
class CommandStore:
def add_opts(self, parser:'ArgumentParser', opts:'dictlist') -> 'None':
""" Adds parser-specific options.
"""
for opt in opts:
arguments = {}
for name in ('help', 'action', 'default', 'choices', 'type'):
# Almost no command uses 'action' or 'default' parameters
if name in opt:
arguments[name] = opt[name]
parser.add_argument(opt['name'], **arguments)
# ################################################################################################################################
def build_core_parser(self) -> 'tuple_':
# stdlib
import argparse
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('--store-log', help='Whether to store an execution log', action='store_true')
base_parser.add_argument('--verbose', help='Show verbose output', action='store_true')
base_parser.add_argument(
'--store-config',
help='Whether to store config options in a file for a later use', action='store_true')
parser = argparse.ArgumentParser(prog='zato')
subs = parser.add_subparsers()
return parser, base_parser, subs, argparse.RawDescriptionHelpFormatter
# ################################################################################################################################
def load_start_parser(
self,
parser=None, # type: ArgumentParser | None
base_parser=None, # type: ArgumentParser | None
subs=None, # type: any_
formatter_class=None # type: callnone
) -> 'ArgumentParser':
# Zato
from zato.cli import start as start_mod
if not parser:
parser, base_parser, subs, formatter_class = self.build_core_parser()
#
# start
#
start_ = subs.add_parser(
'start', description=start_mod.Start.__doc__, parents=[base_parser], formatter_class=formatter_class)
start_.add_argument('path', help='Path to the Zato component to be started')
start_.set_defaults(command='start')
self.add_opts(start_, start_mod.Start.opts)
return cast_('ArgumentParser', parser)
add_start_server_parser = load_start_parser
# ################################################################################################################################
def load_version_parser(self) -> 'ArgumentParser':
parser, _, _, _ = self.build_core_parser()
self._add_version(parser)
return parser
# ################################################################################################################################
def _add_version(self, parser:'ArgumentParser') -> 'None':
# Zato
from zato.common.version import get_version
parser.add_argument('--version', action='version', version=get_version())
# ################################################################################################################################
def load_full_parser(self) -> 'ArgumentParser':
# Zato
from zato.cli import \
apispec as apispec_mod, \
ca_create_ca as ca_create_ca_mod, \
ca_create_lb_agent as ca_create_lb_agent_mod, \
ca_create_scheduler as ca_create_scheduler_mod, \
ca_create_server as ca_create_server_mod, \
ca_create_web_admin as ca_create_web_admin_mod, \
cache as cache_mod, \
check_config as check_config_mod, \
component_version as component_version_mod, \
create_cluster as create_cluster_mod, \
create_lb as create_lb_mod, \
create_odb as create_odb_mod, \
create_scheduler as create_scheduler_mod, \
create_server as create_server_mod, \
create_web_admin as create_web_admin_mod, \
crypto as crypto_mod, \
delete_odb as delete_odb_mod, \
enmasse as enmasse_mod, \
FromConfig, \
hl7_ as hl7_mod, \
ide as ide_mod, \
info as info_mod, \
openapi_ as openapi_mod, \
quickstart as quickstart_mod, \
service as service_mod, \
sso as sso_mod, \
stop as stop_mod, \
wait as wait_mod, \
web_admin_auth as web_admin_auth_mod, \
wsx as wsx_mod # noqa: E272
# Zato - Pub/sub
from zato.cli.pubsub import \
cleanup as pubsub_cleanup_mod, \
endpoint as pubsub_endpoint_mod, \
topic as pubsub_topic_mod # noqa: E272
# Zato - REST
from zato.cli.rest import \
channel as rest_channel_mod # noqa: E272
# Zato - Security
from zato.cli.security import \
api_key as sec_api_key_mod, \
basic_auth as sec_basic_auth_mod # noqa: E272
parser, base_parser, subs, formatter_class = self.build_core_parser()
self._add_version(parser)
#
# apispec
#
apispec = subs.add_parser(
'apispec',
description='API specifications generator',
parents=[base_parser])
apispec.set_defaults(command='apispec')
apispec.add_argument('path', help='Path to a Zato server')
self.add_opts(apispec, apispec_mod.APISpec.opts)
#
# ca
#
ca = subs.add_parser('ca', description='Basic certificate authority (CA) management')
ca_subs = ca.add_subparsers()
ca_create = ca_subs.add_parser('create', description='Creates crypto material for Zato components')
ca_create_subs = ca_create.add_subparsers()
ca_create_ca = ca_create_subs.add_parser('ca', description=ca_create_ca_mod.Create.__doc__, parents=[base_parser])
ca_create_ca.set_defaults(command='ca_create_ca')
ca_create_ca.add_argument('path', help='Path to an empty directory to hold the CA')
self.add_opts(ca_create_ca, ca_create_ca_mod.Create.opts)
ca_create_lb_agent = ca_create_subs.add_parser(
'lb_agent', description=ca_create_lb_agent_mod.Create.__doc__, parents=[base_parser])
ca_create_lb_agent.set_defaults(command='ca_create_lb_agent')
ca_create_lb_agent.add_argument('path', help='Path to a CA directory')
self.add_opts(ca_create_lb_agent, ca_create_lb_agent_mod.Create.opts)
ca_create_scheduler = ca_create_subs.add_parser(
'scheduler', description=ca_create_scheduler_mod.Create.__doc__, parents=[base_parser])
ca_create_scheduler.set_defaults(command='ca_create_scheduler')
ca_create_scheduler.add_argument('path', help='Path to a CA directory')
self.add_opts(ca_create_scheduler, ca_create_scheduler_mod.Create.opts)
ca_create_server = ca_create_subs.add_parser(
'server', description=ca_create_server_mod.Create.__doc__, parents=[base_parser])
ca_create_server.set_defaults(command='ca_create_server')
ca_create_server.add_argument('path', help='Path to a CA directory')
self.add_opts(ca_create_server, ca_create_server_mod.Create.opts)
ca_create_web_admin = ca_create_subs.add_parser(
'web_admin', description=ca_create_web_admin_mod.Create.__doc__, parents=[base_parser])
ca_create_web_admin.set_defaults(command='ca_create_web_admin')
ca_create_web_admin.add_argument('path', help='Path to a CA directory')
self.add_opts(ca_create_web_admin, ca_create_web_admin_mod.Create.opts)
#
# cache
#
cache = subs.add_parser(
'cache', description='Cache keys - get, set, delete or expire keys and more', parents=[base_parser])
cache_subs = cache.add_subparsers()
cache_get = cache_subs.add_parser('get', description=cache_mod.CacheGet.__doc__, parents=[base_parser])
cache_get.set_defaults(command='cache_get')
self.add_opts(cache_get, cache_mod.CacheGet.opts)
cache_set = cache_subs.add_parser('set', description=cache_mod.CacheSet.__doc__, parents=[base_parser])
cache_set.set_defaults(command='cache_set')
self.add_opts(cache_set, cache_mod.CacheSet.opts)
cache_delete = cache_subs.add_parser('delete', description=cache_mod.CacheDelete.__doc__, parents=[base_parser])
cache_delete.set_defaults(command='cache_delete')
self.add_opts(cache_delete, cache_mod.CacheDelete.opts)
#
# change-password
#
change_password = subs.add_parser(
'change-password',
description="Changes a security definition's password",
parents=[base_parser])
change_password.set_defaults(command='change_password')
self.add_opts(change_password, sec_basic_auth_mod.ChangePassword.opts)
#
# check-config
#
check_config = subs.add_parser(
'check-config',
description='Checks config of a Zato component (currently limited to servers only)',
parents=[base_parser])
check_config.set_defaults(command='check_config')
check_config.add_argument('path', help='Path to a Zato component')
self.add_opts(check_config, check_config_mod.CheckConfig.opts)
#
# component-version
#
component_version = subs.add_parser(
'component-version',
description='Shows the version of a Zato component installed in a given directory',
parents=[base_parser])
component_version.set_defaults(command='component_version')
component_version.add_argument('path', help='Path to a Zato component')
self.add_opts(component_version, component_version_mod.ComponentVersion.opts)
#
# create
#
create = subs.add_parser('create', description='Creates new Zato components')
create_subs = create.add_subparsers()
create_api_key = create_subs.add_parser(
'api-key', description=sec_api_key_mod.CreateDefinition.__doc__, parents=[base_parser])
create_api_key.set_defaults(command='create_api_key')
self.add_opts(create_api_key, sec_api_key_mod.CreateDefinition.opts)
create_basic_auth = create_subs.add_parser(
'basic-auth', description=sec_basic_auth_mod.CreateDefinition.__doc__, parents=[base_parser])
create_basic_auth.set_defaults(command='create_basic_auth')
self.add_opts(create_basic_auth, sec_basic_auth_mod.CreateDefinition.opts)
create_cluster = create_subs.add_parser(
'cluster', description=create_cluster_mod.Create.__doc__, parents=[base_parser])
create_cluster.set_defaults(command='create_cluster')
self.add_opts(create_cluster, create_cluster_mod.Create.opts)
create_lb = create_subs.add_parser('load-balancer', description=create_lb_mod.Create.__doc__, parents=[base_parser])
create_lb.add_argument('path', help='Path to an empty directory to install the load-balancer in')
create_lb.set_defaults(command='create_lb')
self.add_opts(create_lb, create_lb_mod.Create.opts)
create_odb = create_subs.add_parser('odb', description=create_odb_mod.Create.__doc__, parents=[base_parser])
create_odb.set_defaults(command='create_odb')
self.add_opts(create_odb, create_odb_mod.Create.opts)
create_scheduler = create_subs.add_parser(
'scheduler', description=create_scheduler_mod.Create.__doc__, parents=[base_parser])
create_scheduler.add_argument('path', help='Path to an empty directory to install the scheduler in')
create_scheduler.set_defaults(command='create_scheduler')
self.add_opts(create_scheduler, create_scheduler_mod.Create.opts)
create_key = create_subs.add_parser('secret-key', description=crypto_mod.CreateSecretKey.__doc__, parents=[base_parser])
create_key.set_defaults(command='create_secret_key')
self.add_opts(create_key, crypto_mod.CreateSecretKey.opts)
create_server = create_subs.add_parser('server', description=create_server_mod.Create.__doc__, parents=[base_parser])
create_server.add_argument('path', help='Path to an empty directory to install the server in')
create_server.set_defaults(command='create_server')
self.add_opts(create_server, create_server_mod.Create.opts)
create_user = create_subs.add_parser('user', description=web_admin_auth_mod.CreateUser.__doc__, parents=[base_parser])
create_user.add_argument('path', help='Path to a web-admin instance')
create_user.set_defaults(command='create_user')
self.add_opts(create_user, web_admin_auth_mod.CreateUser.opts)
create_web_admin = create_subs.add_parser(
'web-admin', description=create_web_admin_mod.Create.__doc__, parents=[base_parser])
create_web_admin.add_argument('path', help='Path to an empty directory to install a new web admin in')
create_web_admin.set_defaults(command='create_web_admin')
self.add_opts(create_web_admin, create_web_admin_mod.Create.opts)
#
# create-rest-channel
#
create_rest_channel = subs.add_parser('create-rest-channel',
description=rest_channel_mod.CreateChannel.__doc__, parents=[base_parser])
create_rest_channel.set_defaults(command='create_rest_channel')
self.add_opts(create_rest_channel, rest_channel_mod.CreateChannel.opts)
#
# create-wsx-channel
#
create_wsx_channel = subs.add_parser('create-wsx-channel',
description=wsx_mod.CreateChannel.__doc__, parents=[base_parser])
create_wsx_channel.set_defaults(command='create_wsx_channel')
self.add_opts(create_wsx_channel, wsx_mod.CreateChannel.opts)
#
# create-wsx-outconn
#
create_wsx_outconn = subs.add_parser('create-wsx-outconn',
description=wsx_mod.CreateOutconn.__doc__, parents=[base_parser])
create_wsx_outconn.set_defaults(command='create_wsx_outconn')
self.add_opts(create_wsx_outconn, wsx_mod.CreateOutconn.opts)
#
# crypto
#
crypto = subs.add_parser('crypto', description='Cryptographic operations')
crypto_subs = crypto.add_subparsers()
crypto_create_secret_key = crypto_subs.add_parser('create-secret-key',
description=crypto_mod.CreateSecretKey.__doc__, parents=[base_parser])
crypto_create_secret_key.set_defaults(command='crypto_create_secret_key')
self.add_opts(crypto_create_secret_key, crypto_mod.CreateSecretKey.opts)
#
# decrypt
#
decrypt = subs.add_parser('decrypt', description=crypto_mod.Decrypt.__doc__, parents=[base_parser])
decrypt.set_defaults(command='decrypt')
self.add_opts(decrypt, crypto_mod.Decrypt.opts)
#
# delete
#
delete = subs.add_parser('delete', description=delete_odb_mod.Delete.__doc__)
delete_subs = delete.add_subparsers()
delete_api_key = delete_subs.add_parser('api-key', description='Deletes an API key definition',
parents=[base_parser])
delete_api_key.set_defaults(command='delete_api_key')
self.add_opts(delete_api_key, sec_basic_auth_mod.DeleteDefinition.opts)
delete_basic_auth = delete_subs.add_parser('basic-auth', description='Deletes a Basic Auth definition',
parents=[base_parser])
delete_basic_auth.set_defaults(command='delete_basic_auth')
self.add_opts(delete_basic_auth, sec_basic_auth_mod.DeleteDefinition.opts)
delete_odb = delete_subs.add_parser('odb', description='Deletes a Zato ODB', parents=[base_parser])
delete_odb.set_defaults(command='delete_odb')
self.add_opts(delete_odb, delete_odb_mod.Delete.opts)
#
# delete-rest-channel
#
delete_rest_channel = subs.add_parser('delete-rest-channel',
description=rest_channel_mod.DeleteChannel.__doc__, parents=[base_parser])
delete_rest_channel.set_defaults(command='delete_rest_channel')
self.add_opts(delete_rest_channel, rest_channel_mod.DeleteChannel.opts)
#
# delete-wsx-channel
#
delete_wsx_channel = subs.add_parser('delete-wsx-channel',
description=wsx_mod.DeleteChannel.__doc__, parents=[base_parser])
delete_wsx_channel.set_defaults(command='delete_wsx_channel')
self.add_opts(delete_wsx_channel, wsx_mod.DeleteChannel.opts)
#
# delete-wsx-channel
#
delete_wsx_outconn = subs.add_parser('delete-wsx-outconn',
description=wsx_mod.DeleteOutconn.__doc__, parents=[base_parser])
delete_wsx_outconn.set_defaults(command='delete_wsx_outconn')
self.add_opts(delete_wsx_outconn, wsx_mod.CreateOutconn.opts)
#
# encrypt
#
encrypt = subs.add_parser('encrypt', description=crypto_mod.Encrypt.__doc__, parents=[base_parser])
encrypt.set_defaults(command='encrypt')
self.add_opts(encrypt, crypto_mod.Encrypt.opts)
#
# enmasse
#
enmasse = subs.add_parser('enmasse', description=enmasse_mod.Enmasse.__doc__, parents=[base_parser])
enmasse.add_argument('path', help='Path to a running Zato server')
enmasse.set_defaults(command='enmasse')
self.add_opts(enmasse, enmasse_mod.Enmasse.opts)
#
# update
#
hash = subs.add_parser('hash', description='Updates Zato components and users')
hash_subs = hash.add_subparsers()
#
# from-config-file
#
from_config = subs.add_parser('from-config', description=FromConfig.__doc__, parents=[base_parser])
from_config.add_argument('path', help='Path to a Zato command config file')
from_config.set_defaults(command='from_config')
# .. hash info
hash_get_rounds = hash_subs.add_parser('get-rounds', description=crypto_mod.GetHashRounds.__doc__, parents=[base_parser])
hash_get_rounds.set_defaults(command='hash_get_rounds')
self.add_opts(hash_get_rounds, crypto_mod.GetHashRounds.opts)
#
# hl7
#
hl7 = subs.add_parser('hl7', description='HL7-related commands')
hl7_subs = hl7.add_subparsers()
hl7_mllp_send = hl7_subs.add_parser('mllp-send', description=hl7_mod.MLLPSend.__doc__, parents=[base_parser])
hl7_mllp_send.set_defaults(command='hl7_mllp_send')
self.add_opts(hl7_mllp_send, hl7_mod.MLLPSend.opts)
#
# IDE
#
ide = subs.add_parser('set-ide-password', description=ide_mod.SetIDEPassword.__doc__, parents=[base_parser])
ide.add_argument('path', help='Path to a Zato server')
ide.set_defaults(command='set_ide_password')
self.add_opts(ide, ide_mod.SetIDEPassword.opts)
#
# info
#
info = subs.add_parser('info', description=info_mod.Info.__doc__, parents=[base_parser])
info.add_argument('path', help='Path to a Zato component')
info.set_defaults(command='info')
self.add_opts(info, info_mod.Info.opts)
#
# openapi
#
openapi = subs.add_parser(
'openapi',
description='OpenAPI specification generator',
parents=[base_parser])
openapi.set_defaults(command='openapi')
openapi.add_argument('path', help='Path to a Zato server')
self.add_opts(openapi, openapi_mod.OpenAPI.opts)
#
# pubsub
#
pubsub = subs.add_parser('pubsub', description='Publish/subscribe topics and message queues')
pubsub_subs = pubsub.add_subparsers()
#
# pubsub cleanup
#
pubsub_cleanup = pubsub_subs.add_parser('cleanup',
description=pubsub_cleanup_mod.Cleanup.__doc__, parents=[base_parser])
pubsub_cleanup.set_defaults(command='pubsub_cleanup')
self.add_opts(pubsub_cleanup, pubsub_cleanup_mod.Cleanup.opts)
#
# pubsub create-endpoint
#
pubsub_create_endpoint = pubsub_subs.add_parser('create-endpoint',
description=pubsub_endpoint_mod.CreateEndpoint.__doc__, parents=[base_parser])
pubsub_create_endpoint.set_defaults(command='pubsub_create_endpoint')
self.add_opts(pubsub_create_endpoint, pubsub_endpoint_mod.CreateEndpoint.opts)
#
# pubsub create-topic
#
pubsub_create_topic = pubsub_subs.add_parser('create-topic',
description=pubsub_topic_mod.CreateTopic.__doc__, parents=[base_parser])
pubsub_create_topic.set_defaults(command='pubsub_create_topic')
self.add_opts(pubsub_create_topic, pubsub_topic_mod.CreateTopic.opts)
#
# pubsub create-topics
#
pubsub_create_test_topics = pubsub_subs.add_parser('create-test-topics',
description=pubsub_topic_mod.CreateTestTopics.__doc__, parents=[base_parser])
pubsub_create_test_topics.set_defaults(command='pubsub_create_test_topics')
self.add_opts(pubsub_create_test_topics, pubsub_topic_mod.CreateTestTopics.opts)
#
# pubsub delete-endpoint
#
pubsub_delete_endpoint = pubsub_subs.add_parser('delete-endpoint',
description=pubsub_endpoint_mod.DeleteEndpoint.__doc__, parents=[base_parser])
pubsub_delete_endpoint.set_defaults(command='pubsub_delete_endpoint')
self.add_opts(pubsub_delete_endpoint, pubsub_endpoint_mod.DeleteEndpoint.opts)
#
# pubsub delete-topic (an alias for delete-topics)
#
pubsub_delete_topic = pubsub_subs.add_parser('delete-topic',
description=pubsub_topic_mod.DeleteTopics.__doc__, parents=[base_parser])
pubsub_delete_topic.set_defaults(command='pubsub_delete_topic')
self.add_opts(pubsub_delete_topic, pubsub_topic_mod.DeleteTopics.opts)
#
# pubsub delete-topics
#
pubsub_delete_topics = pubsub_subs.add_parser('delete-topics',
description=pubsub_topic_mod.DeleteTopics.__doc__, parents=[base_parser])
pubsub_delete_topics.set_defaults(command='pubsub_delete_topics')
self.add_opts(pubsub_delete_topics, pubsub_topic_mod.DeleteTopics.opts)
#
# pubsub get-topic (alias to get-topics)
#
pubsub_get_topic = pubsub_subs.add_parser('get-topic',
description=pubsub_topic_mod.GetTopics.__doc__, parents=[base_parser])
pubsub_get_topic.set_defaults(command='pubsub_get_topic')
self.add_opts(pubsub_get_topic, pubsub_topic_mod.GetTopics.opts)
#
# pubsub get-topics
#
pubsub_get_topics = pubsub_subs.add_parser('get-topics',
description=pubsub_topic_mod.GetTopics.__doc__, parents=[base_parser])
pubsub_get_topics.set_defaults(command='pubsub_get_topics')
self.add_opts(pubsub_get_topics, pubsub_topic_mod.GetTopics.opts)
#
# reset-totp-key
#
reset_totp_key = subs.add_parser('reset-totp-key',
description=web_admin_auth_mod.ResetTOTPKey.__doc__, parents=[base_parser])
reset_totp_key.add_argument('path', help='Path to web-admin')
reset_totp_key.set_defaults(command='reset_totp_key')
self.add_opts(reset_totp_key, web_admin_auth_mod.ResetTOTPKey.opts)
#
# set-admin-invoke-password
#
set_admin_invoke_password = subs.add_parser('set-admin-invoke-password',
description=web_admin_auth_mod.SetAdminInvokePassword.__doc__, parents=[base_parser])
set_admin_invoke_password.add_argument('path', help='Path to web-admin')
set_admin_invoke_password.set_defaults(command='set_admin_invoke_password')
self.add_opts(set_admin_invoke_password, web_admin_auth_mod.SetAdminInvokePassword.opts)
#
# quickstart
#
quickstart = subs.add_parser('quickstart', description='Quickly set up and manage Zato clusters', parents=[base_parser])
quickstart_subs = quickstart.add_subparsers()
quickstart_create = quickstart_subs.add_parser('create', description=quickstart_mod.Create.__doc__, parents=[base_parser])
quickstart_create.add_argument('path', help='Path to an empty directory for the quickstart cluster')
quickstart_create.set_defaults(command='quickstart_create')
self.add_opts(quickstart_create, quickstart_mod.Create.opts)
#
# service
#
service = subs.add_parser('service', description='Commands related to the management of Zato services')
service_subs = service.add_subparsers()
service_invoke = service_subs.add_parser('invoke', description=service_mod.Invoke.__doc__, parents=[base_parser])
service_invoke.set_defaults(command='service_invoke')
self.add_opts(service_invoke, service_mod.Invoke.opts)
#
# sso
#
sso = subs.add_parser('sso', description='SSO management')
sso_subs = sso.add_subparsers()
#
# create-user
#
sso_create_user = sso_subs.add_parser('create-user', description=sso_mod.CreateUser.__doc__, parents=[base_parser])
sso_create_user.add_argument('path', help='Path to a Zato server')
sso_create_user.set_defaults(command='sso_create_user')
self.add_opts(sso_create_user, sso_mod.CreateUser.opts)
#
# create-super-user
#
sso_create_super_user = sso_subs.add_parser(
'create-super-user', description=sso_mod.CreateSuperUser.__doc__, parents=[base_parser])
sso_create_super_user.add_argument('path', help='Path to a Zato server')
sso_create_super_user.set_defaults(command='sso_create_super_user')
self.add_opts(sso_create_super_user, sso_mod.CreateSuperUser.opts)
#
# lock-user
#
sso_lock_user = sso_subs.add_parser('lock-user', description=sso_mod.LockUser.__doc__, parents=[base_parser])
sso_lock_user.add_argument('path', help='Path to a Zato server')
sso_lock_user.set_defaults(command='sso_lock_user')
self.add_opts(sso_lock_user, sso_mod.LockUser.opts)
#
# login
#
sso_login = sso_subs.add_parser('login', description=sso_mod.Login.__doc__, parents=[base_parser])
sso_login.add_argument('path', help='Path to a Zato server')
sso_login.set_defaults(command='sso_login')
self.add_opts(sso_login, sso_mod.Login.opts)
#
# logout
#
sso_logout = sso_subs.add_parser('logout', description=sso_mod.Logout.__doc__, parents=[base_parser])
sso_logout.add_argument('path', help='Path to a Zato server')
sso_logout.set_defaults(command='sso_logout')
self.add_opts(sso_logout, sso_mod.Logout.opts)
#
# unlock-user
#
sso_unlock_user = sso_subs.add_parser('unlock-user', description=sso_mod.UnlockUser.__doc__, parents=[base_parser])
sso_unlock_user.add_argument('path', help='Path to a Zato server')
sso_unlock_user.set_defaults(command='sso_unlock_user')
self.add_opts(sso_unlock_user, sso_mod.UnlockUser.opts)
#
# delete-user
#
sso_delete_user = sso_subs.add_parser('delete-user', description=sso_mod.DeleteUser.__doc__, parents=[base_parser])
sso_delete_user.add_argument('path', help='Path to a Zato server')
sso_delete_user.set_defaults(command='sso_delete_user')
self.add_opts(sso_delete_user, sso_mod.DeleteUser.opts)
#
# change-user-password
#
sso_change_user_password = sso_subs.add_parser(
'change-user-password', description=sso_mod.ChangeUserPassword.__doc__, parents=[base_parser])
sso_change_user_password.add_argument('path', help='Path to a Zato server')
sso_change_user_password.set_defaults(command='sso_change_user_password')
self.add_opts(sso_change_user_password, sso_mod.ChangeUserPassword.opts)
#
# reset-totp-key (sso)
#
sso_reset_totp_key_password = sso_subs.add_parser(
'reset-totp-key', description=sso_mod.ResetTOTPKey.__doc__, parents=[base_parser])
sso_reset_totp_key_password.add_argument('path', help='Path to a Zato server')
sso_reset_totp_key_password.set_defaults(command='sso_reset_totp_key')
self.add_opts(sso_reset_totp_key_password, sso_mod.ResetTOTPKey.opts)
#
# reset-user-password
#
sso_reset_user_password = sso_subs.add_parser(
'reset-user-password', description=sso_mod.ResetUserPassword.__doc__, parents=[base_parser])
sso_reset_user_password.add_argument('path', help='Path to a Zato server')
sso_reset_user_password.set_defaults(command='sso_reset_user_password')
self.add_opts(sso_reset_user_password, sso_mod.ResetUserPassword.opts)
#
# create-odb
#
sso_create_odb = sso_subs.add_parser(
'create-odb', description=sso_mod.CreateODB.__doc__, parents=[base_parser])
sso_create_odb.set_defaults(command='sso_create_odb')
self.add_opts(sso_create_odb, sso_mod.CreateODB.opts)
#
# start
#
self.add_start_server_parser(parser, base_parser, subs, formatter_class)
#
# stop
#
stop = subs.add_parser('stop', description=stop_mod.Stop.__doc__, parents=[base_parser])
stop.add_argument('path', help='Path to the Zato component to be stopped')
stop.set_defaults(command='stop')
#
# update
#
update = subs.add_parser('update', description='Updates Zato components and users')
update_subs = update.add_subparsers()
# .. update password
update_password = update_subs.add_parser(
'password', description=web_admin_auth_mod.UpdatePassword.__doc__, parents=[base_parser])
update_password.add_argument('path', help='Path to a web admin directory')
update_password.set_defaults(command='update_password')
self.add_opts(update_password, web_admin_auth_mod.UpdatePassword.opts)
#
# wait
#
wait = subs.add_parser('wait', description=wait_mod.Wait.__doc__, parents=[base_parser])
wait.set_defaults(command='wait')
self.add_opts(wait, wait_mod.Wait.opts)
return parser
command_store = CommandStore()
# ################################################################################################################################
def pre_process_quickstart(sys_argv:'strlist', opts_idx:'int') -> 'None':
# We know that it exists so we can skip the try/except ValueError: block
create_idx = sys_argv.index('create')
# We are looking for the idx of the path element but it is still possible
# that we have an incomplete command 'zato quickstart create' alone on input.
# In such a case, 'create' will be our last element among args and we can return immediately.
if len(sys_argv) == create_idx:
return
# If we are here, we have 'zato quickstart create' followed by a path, ODB and Redis options.
path_idx = create_idx + 1
# ODB + Redis options start here .
original_odb_type_idx = path_idx + 1
opts = sys_argv[path_idx+1:opts_idx]
# No options = we can return
if not opts:
return
# Extract the options ..
odb_type = opts[0]
redis_host = opts[1]
redis_port = opts[2]
# .. remove them from their pre-3.2 non-optional positions,
# .. note that we need to do it once for each of odb_type, redis_host and redis_port
# .. using the same index because .pop will modify the list in place ..
sys_argv.pop(original_odb_type_idx)
sys_argv.pop(original_odb_type_idx)
sys_argv.pop(original_odb_type_idx)
# .. now, add the options back as '--' ones.
sys_argv.append('--odb_type')
sys_argv.append(odb_type)
sys_argv.append('--kvdb_host')
sys_argv.append(redis_host)
sys_argv.append('--kvdb_port')
sys_argv.append(redis_port)
# ################################################################################################################################
def pre_process_server(sys_argv, opts_idx, opts):
# type: (list, int, list) -> None
#
# This is pre-3.2
# 'zato0', 'create1', 'server2', '/path/to/server3', 'sqlite4', 'kvdb_host5', 'kvdb_port6', 'cluster_name7', 'server_name8'
#
len_pre_32 = 9
#
# This is 3.2
# 'zato0', 'create1', 'server2', '/path/to/server3', 'cluster_name7', 'server_name8'
#
# We are turning pre-3.2 options into 3.2 ones.
if len(sys_argv[:opts_idx]) == len_pre_32:
# New arguments to produce
new_argv = []
new_argv.append(sys_argv[0]) # zato0
new_argv.append(sys_argv[1]) # create1
new_argv.append(sys_argv[2]) # server2
new_argv.append(sys_argv[3]) # /path/to/server3
new_argv.append(sys_argv[7]) # cluster_name7
new_argv.append(sys_argv[8]) # server_name8
new_argv.append('--odb_type') # sqlite4
new_argv.append(sys_argv[4])
new_argv.append('--kvdb_host') # kvdb_host5
new_argv.append(sys_argv[5])
new_argv.append('--kvdb_port') # kvdb_port6
new_argv.append(sys_argv[6])
new_argv.extend(opts)
# We are ready to replace sys.argv now
sys_argv[:] = new_argv
# ################################################################################################################################
def pre_process_sys_argv(sys_argv):
# type: (list) -> None
# stdlib
import sys
# Local aliases
opts_idx = sys.maxsize
#
# We need to find out where flags begin because they are constant
# and did not change between versions. Only what is before flags was changed.
#
# zato command --flag1 --flag2
#
for idx, elem in enumerate(sys_argv): # type: str
if elem.startswith('--'):
opts_idx = idx
break
try:
qs_idx = sys_argv.index('quickstart', 0, opts_idx)
except ValueError:
qs_idx = 0
# We enter here if it is a quickstart command
if qs_idx:
#
# Quickstart commands
#
#
# Turn a) into b)
# a) zato quickstart /path [--flags]
# b) zato quickstart create /path [--flags]
#
# This is 3.2
if 'create' not in sys_argv:
sys_argv.insert(qs_idx+1, 'create')
return
# An earlier version so we need to turn Redis options into what 3.2 expects
else:
pre_process_quickstart(sys_argv, opts_idx)
return
# Otherwise, it could be a 'zato create <component>' command
else:
if 'create' in sys_argv:
# We want to find the last non-optional element and if opts_idx is the max size,
# it means that we do not have any such.
if opts_idx == sys.maxsize:
opts_idx = len(sys_argv)
# This is for later use, when we construct a new sys_argv
opts = sys_argv[opts_idx:]
if 'server' in sys_argv:
pre_process_server(sys_argv, opts_idx, opts)
return
# ################################################################################################################################
def main() -> 'any_':
# stdlib
import os
import sys
# Used by start/stop commands
os.environ['ZATO_CURDIR'] = os.getcwd()
# Special-case the most commonly used commands to make the parser build quickly in these cases.
has_args = len(sys.argv) > 1
# First, zato --version
if has_args and sys.argv[1] == '--version':
# Zato
from zato.common.version import get_version
sys.stdout.write(get_version() + '\n')
sys.exit(0)
# Now, zato start ...
elif has_args and sys.argv[1] == 'start':
parser = command_store.load_start_parser()
# All the other commands
else:
# Take into account changes introduced between versions
pre_process_sys_argv(sys.argv)
# This may change what os.getcwd returns
parser = command_store.load_full_parser()
# Set it back for further use after it was potentially reset by command_store.load_full_parser
os.chdir(os.environ['ZATO_CURDIR'])
# Parse the arguments
args = parser.parse_args()
# Expand user directories to full paths
if getattr(args, 'path', None):
args.path = os.path.expanduser(args.path)
# Exit if no known command was found among arguments ..
if not hasattr(args, 'command'):
parser.print_help()
# .. otherwise, try to run the command now ..
else:
# Now that we are here, we also need to check if non-SQLite databases
# have all their required options on input. We do it here rather than in create_odb.py
# because we want to report it as soon as possible, before actual commands execute.
odb_type = getattr(args, 'odb_type', None)
if odb_type and odb_type != 'sqlite':
missing = []
for name in 'odb_db_name', 'odb_host', 'odb_port', 'odb_user':
if not getattr(args, name, None):
missing.append(name)
if missing:
missing_noun = 'Option ' if len(missing) == 1 else 'Options '
missing_verb = ' is ' if len(missing) == 1 else ' are '
missing.sort()
sys.stdout.write(
missing_noun + \
'`' + \
', '.join(missing) + \
'`' + \
missing_verb + \
'required if odb_type is ' + \
'`{}`.'.format(args.odb_type) + \
'\n'
)
sys.exit(1)
# Zato
from zato.cli import run_command
return run_command(args)
# ################################################################################################################################
| 40,401
|
Python
|
.py
| 785
| 42.155414
| 130
| 0.594104
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,466
|
create_lb.py
|
zatosource_zato/code/zato-cli/src/zato/cli/create_lb.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os, uuid
# Zato
from zato.cli import is_arg_given, ZatoCommand
from zato.common.defaults import http_plain_server_port
from zato.common.util.open_ import open_w
config_template = """{{
"haproxy_command": "haproxy",
"host": "localhost",
"port": 20151,
"is_tls_enabled": false,
"keyfile": "./zato-lba-priv-key.pem",
"certfile": "./zato-lba-cert.pem",
"ca_certs": "./zato-lba-ca-certs.pem",
"work_dir": "../",
"verify_fields": {{}},
"log_config": "./logging.conf",
"pid_file": "zato-lb-agent.pid"
}}
"""
zato_config_template = """
# ##############################################################################
global
log 127.0.0.1:514 local0 debug # ZATO global:log
stats socket {stats_socket} # ZATO global:stats_socket
# ##############################################################################
defaults
log global
option httpclose
stats uri /zato-lb-stats # ZATO defaults:stats uri
timeout connect 15000 # ZATO defaults:timeout connect
timeout client 15000 # ZATO defaults:timeout client
timeout server 15000 # ZATO defaults:timeout server
errorfile 503 {http_503_path}
stats enable
stats realm Haproxy\ Statistics
# Note: The password below is a UUID4 written in plain-text.
stats auth admin1:{stats_password}
stats refresh 5s
# ##############################################################################
backend bck_http_plain
mode http
balance roundrobin
# ZATO begin backend bck_http_plain
{default_backend}
# ZATO end backend bck_http_plain
# ##############################################################################
frontend front_http_plain
mode http
default_backend bck_http_plain
option forwardfor
option httplog # ZATO frontend front_http_plain:option log-http-requests
bind 0.0.0.0:11223 # ZATO frontend front_http_plain:bind
maxconn 200 # ZATO frontend front_http_plain:maxconn
monitor-uri /zato-lb-alive # ZATO frontend front_http_plain:monitor-uri
""" # noqa
default_backend = """
server http_plain--server1 127.0.0.1:{server01_port} check inter 2s rise 2 fall 2 # ZATO backend bck_http_plain:server--server1
"""
http_503 = """HTTP/1.0 503 Service Unavailable
Cache-Control: no-cache
Connection: close
Content-Type: application/json
{"zato_env":
{"details": "No server is available to handle the request",
"result": "ZATO_ERROR",
"cid": "K012345678901234567890123456"}
}
"""
class Create(ZatoCommand):
""" Creates a new Zato load-balancer
"""
opts = []
opts.append({'name':'--pub-key-path', 'help':"Path to the load-balancer agent's public key in PEM"})
opts.append({'name':'--priv-key-path', 'help':"Path to the load-balancer agent's private key in PEM"})
opts.append({'name':'--cert-path', 'help':"Path to the load-balancer agent's certificate in PEM"})
opts.append({'name':'--ca-certs-path', 'help':"Path to the a PEM list of certificates the load-balancer's agent will trust"})
needs_empty_dir = True
def __init__(self, args):
super(Create, self).__init__(args)
self.target_dir = os.path.abspath(args.path) # noqa
def execute(self, args, use_default_backend=False, server02_port=None, show_output=True):
# Zato
from zato.common.util.logging_ import get_logging_conf_contents
os.mkdir(os.path.join(self.target_dir, 'config')) # noqa
os.mkdir(os.path.join(self.target_dir, 'logs')) # noqa
repo_dir = os.path.join(self.target_dir, 'config', 'repo') # noqa
os.mkdir(repo_dir) # noqa
log_path = os.path.abspath(os.path.join(repo_dir, '..', '..', 'logs', 'lb-agent.log')) # noqa
stats_socket = os.path.join(self.target_dir, 'haproxy-stat.sock') # noqa
is_tls_enabled = is_arg_given(args, 'priv_key_path')
config = config_template.format(**{
'is_tls_enabled': is_tls_enabled,
})
logging_conf_contents = get_logging_conf_contents()
open_w(os.path.join(repo_dir, 'lb-agent.conf')).write(config) # noqa
open_w(os.path.join(repo_dir, 'logging.conf')).write(logging_conf_contents) # noqa
if use_default_backend:
backend = default_backend.format(server01_port=http_plain_server_port, server02_port=server02_port)
else:
backend = '\n# ZATO default_backend_empty'
zato_config = zato_config_template.format(
stats_socket=stats_socket,
stats_password=uuid.uuid4().hex,
default_backend=backend,
http_503_path=os.path.join(repo_dir, '503.http')) # noqa
open_w(os.path.join(repo_dir, 'zato.config')).write(zato_config) # noqa
open_w(os.path.join(repo_dir, '503.http')).write(http_503) # noqa
self.copy_lb_crypto(repo_dir, args)
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.LOAD_BALANCER.code)
if show_output:
if self.verbose:
msg = "Successfully created a load-balancer's agent in {}".format(self.target_dir)
self.logger.debug(msg)
else:
self.logger.info('OK')
| 5,339
|
Python
|
.py
| 122
| 38.131148
| 131
| 0.62819
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,467
|
ca_create_web_admin.py
|
zatosource_zato/code/zato-cli/src/zato/cli/ca_create_web_admin.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import CACreateCommand, common_ca_create_opts
class Create(CACreateCommand):
""" Creates crypto material for a Zato web console
"""
opts = [
{'name':'--organizational-unit', 'help':'Organizational unit name'},
]
opts += common_ca_create_opts
def get_file_prefix(self, file_args):
return 'web-admin'
def get_organizational_unit(self, args):
return 'web-admin'
def execute(self, args, show_output=True):
self._execute(args, 'v3_client_server', show_output)
| 783
|
Python
|
.py
| 21
| 32.857143
| 82
| 0.688329
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,468
|
service.py
|
zatosource_zato/code/zato-cli/src/zato/cli/service.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand
from zato.client import CID_NO_CLIP, DEFAULT_MAX_CID_REPR, DEFAULT_MAX_RESPONSE_REPR
from zato.common.api import BROKER, ZATO_INFO_FILE
from zato.common.const import ServiceConst
# ################################################################################################################################
class Invoke(ZatoCommand):
""" Invokes a service by its name
"""
file_needed = ZATO_INFO_FILE
opts = [
{'name':'path', 'help':'Path in the file-system to a server the service is deployed on'},
{'name':'name', 'help':'Name of the service to invoke'},
{'name':'--payload', 'help':'Payload to invoke the service with'},
{'name':'--headers',
'help':'Additional HTTP headers the service invoker will receive in format of header-name=header-value; header2-name=header2-value'},
{'name':'--channel', 'help':'Channel the service will be invoked through', 'default':'invoke'},
{'name':'--data-format', 'help':"Payload's data format", 'default': 'json'},
{'name':'--transport', 'help':'Transport to invoke the service over'},
{'name':'--url-path', 'help':'URL path zato.service.invoke is exposed on',
'default':ServiceConst.API_Admin_Invoke_Url_Path},
{'name':'--max-cid-repr',
'help':'How many characters of each end of a CID to print out in verbose mode, defaults to {}, use {} to print the whole of it'.format(
DEFAULT_MAX_CID_REPR, CID_NO_CLIP), 'default':DEFAULT_MAX_CID_REPR},
{'name':'--max-response-repr', 'help':'How many characters of a response to print out in verbose mode, defaults to {}'.format(
DEFAULT_MAX_RESPONSE_REPR), 'default':DEFAULT_MAX_RESPONSE_REPR},
{'name':'--is-async', 'help':'If given, the service will be invoked asynchronously', 'action':'store_true'},
{'name':'--expiration', 'help':'In async mode, after how many seconds the message should expire, defaults to {} seconds'.format(
BROKER.DEFAULT_EXPIRATION), 'default':BROKER.DEFAULT_EXPIRATION},
]
# ################################################################################################################################
def execute(self, args):
# Zato
from zato.common.api import DATA_FORMAT
from zato.common.util.api import get_client_from_server_conf
client = get_client_from_server_conf(args.path, stdin_data=self.stdin_data)
headers = {}
if args.headers:
for pair in args.headers.strip().split(';'):
k, v = pair.strip().split('=', 1)
headers[k] = v
# Prevents attempts to convert/escape XML into JSON
to_json = True if args.data_format == DATA_FORMAT.JSON else False
func = client.invoke_async if args.is_async else client.invoke
response = func(args.name, args.payload, headers, args.channel, args.data_format, args.transport, to_json=to_json)
if response.ok:
self.logger.info(response.data or '(None)')
else:
self.logger.error(response.details)
if args.verbose:
self.logger.debug('inner.text:[{}]'.format(response.inner.text))
self.logger.debug('response:[{}]'.format(response))
# ################################################################################################################################
| 3,599
|
Python
|
.py
| 58
| 54.189655
| 144
| 0.575035
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,469
|
apispec.py
|
zatosource_zato/code/zato-cli/src/zato/cli/apispec.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand
from zato.common.util.open_ import open_w
# ################################################################################################################################
stderr_sleep_fg = 0.9
stderr_sleep_bg = 1.2
# ################################################################################################################################
internal_patterns = [
'zato.*',
'pub.zato.*',
'helpers.*',
]
# ################################################################################################################################
class APISpec(ZatoCommand):
"""API specifications generator."""
opts = [
{'name':'--include', 'help':'A comma-separated list of patterns to include services by', 'default':'*'},
{'name':'--with-internal', 'help':'Whether internal services should be included on output', 'action':'store_true'},
{'name':'--exclude', 'help':'A comma-separated list of patterns to exclude services by',
'default':','.join(internal_patterns)},
{'name':'--dir', 'help':'Directory to save the output to', 'default':''},
{'name':'--delete-dir', 'help':'If given, --dir will be deleted before the output is saved', 'action':'store_true'},
{'name':'--with-api-invoke', 'help':'If given, OpenAPI spec for --api-invoke-path endpoints will be generated',
'action':'store_true', 'default':True},
{'name':'--with-rest-channels', 'help':'If given, OpenAPI spec for individual REST endpoints will be generated',
'action':'store_true', 'default':True},
{'name':'--api-invoke-path', 'help':'A comma-separated list of URL paths to invoke API services through'},
{'name':'--tags', 'help':'A comma-separated list of docstring tags to generate documentation for',
'default':'public'},
]
# ################################################################################################################################
def execute(self, args):
# stdlib
import os
from shutil import rmtree
# Zato
from zato.common.util.api import get_client_from_server_conf
from zato.common.util.file_system import fs_safe_now
client = get_client_from_server_conf(args.path)
exclude = args.exclude.split(',') or []
exclude = [elem.strip() for elem in exclude]
tags = args.tags.split(',')
tags = [elem.strip() for elem in tags]
if args.with_internal:
for item in internal_patterns:
try:
exclude.remove(item)
except ValueError:
pass
request = {
'return_internal': args.with_internal,
'include': args.include,
'exclude': ','.join(exclude),
'needs_api_invoke': args.with_api_invoke,
'needs_rest_channels': args.with_rest_channels,
'needs_sphinx': True,
'tags': tags,
}
if args.with_api_invoke:
request['api_invoke_path'] = args.api_invoke_path if args.api_invoke_path else '/zato/api/invoke/{service_name}'
if not args.dir:
now = fs_safe_now()
out_dir = '{}.{}'.format('apispec', now)
else:
out_dir = args.dir
out_dir = os.path.abspath(out_dir)
if os.path.exists(out_dir):
if args.delete_dir:
self.logger.info('Deleting %s', out_dir)
rmtree(out_dir)
else:
self.logger.warning('Output directory %s already exists and --delete-dir was not provided', out_dir)
return
os.mkdir(out_dir)
response = client.invoke('zato.apispec.get-api-spec', request)
data = response.data['response']['data']
for file_path, contents in data.items():
full_file_path = os.path.join(out_dir, file_path)
file_dir = os.path.abspath(os.path.dirname(full_file_path))
try:
os.makedirs(file_dir)
except OSError:
pass # Must have been already created
finally:
if contents:
f = open_w(full_file_path)
_ = f.write(contents)
f.close()
self.logger.info('Output saved to %s', out_dir)
self.logger.info('To build the documentation, run:\ncd %s\nmake html', out_dir)
| 4,638
|
Python
|
.py
| 95
| 38.978947
| 130
| 0.514058
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,470
|
web_admin_auth.py
|
zatosource_zato/code/zato-cli/src/zato/cli/web_admin_auth.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
# Zato
from zato.cli import common_totp_opts, ManageCommand
from zato.common.const import ServiceConst
from zato.common.util.open_ import open_r, open_w
# ################################################################################################################################
# ################################################################################################################################
class _WebAdminAuthCommand(ManageCommand):
def _prepare(self, args):
import pymysql
pymysql.install_as_MySQLdb()
# stdlib
import os
# Zato
from zato.admin.zato_settings import update_globals
from zato.common.json_internal import loads
os.chdir(os.path.abspath(args.path))
base_dir = os.path.join(self.original_dir, args.path)
config = loads(open_r(os.path.join(base_dir, '.', 'config/repo/web-admin.conf')).read())
config['config_dir'] = os.path.abspath(args.path)
update_globals(config, base_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zato.admin.settings'
import django
django.setup()
def _ok(self, args):
# Needed because Django took over our logging config
self.reset_logger(args, True)
self.logger.info('OK')
# ################################################################################################################################
# ################################################################################################################################
class CreateUser(_WebAdminAuthCommand):
""" Creates a new Dashboard user
"""
opts = [
{'name': '--username', 'help': 'Username to use'},
{'name': '--email', 'help': 'e-mail the user uses'},
{'name': '--password', 'help': "Newly created user's password"},
]
def __init__(self, *args, **kwargs):
super(CreateUser, self).__init__(*args, **kwargs)
self.is_interactive = True
# Class django.contrib.auth.management.commands.createsuperuser.Command needs self.stding and self.stdout
# so we fake them here.
class _FakeStdout:
def __init__(self, logger):
self.logger = logger
def write(self, msg):
self.logger.info(msg.strip())
class _FakeStdin:
def isatty(self):
return True
def is_password_required(self):
return not self.is_interactive
def before_execute(self, args):
# stdlib
import sys
super(CreateUser, self).before_execute(args)
self._prepare(args)
username = getattr(args, 'username', None)
email = getattr(args, 'email', None)
if username or email:
if not(username and email):
self.logger.error('Both --username and --email are required if either is provided')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
else:
from django.core.validators import EmailValidator
from django.core import exceptions
self.reset_logger(self.args, True)
try:
validator = EmailValidator()
validator(email)
except exceptions.ValidationError:
self.logger.error('Invalid e-mail `%s`', email)
sys.exit(self.SYS_ERROR.INVALID_INPUT)
else:
self.is_interactive = False
def execute(self, args, needs_sys_exit=True):
# stdlib
import sys
from traceback import format_exc
# Django
from django.contrib.auth.management.commands.createsuperuser import Command
self.reset_logger(args, True)
Command.stdout = CreateUser._FakeStdout(self.logger)
Command.stdin = CreateUser._FakeStdin()
options = {
'verbosity':0,
'database': None,
'username':self.args.username,
'email':self.args.email,
}
os.environ['DJANGO_SUPERUSER_PASSWORD'] = self.args.password
try:
Command().handle(interactive=self.is_interactive, **options)
except Exception as e:
if self.args.verbose:
suffix = ''
exc_info = format_exc()
else:
suffix = '(use --verbose for more information)'
exc_info = e
self.logger.error(f'User could not be created, details: `{exc_info}` {suffix}')
if needs_sys_exit:
sys.exit(self.SYS_ERROR.INVALID_INPUT)
else:
raise
else:
self._ok(args)
# ################################################################################################################################
# ################################################################################################################################
class UpdatePassword(_WebAdminAuthCommand):
""" Updates a Dashboard user's password
"""
opts = [
{'name': 'username', 'help': 'Username to change the password of'},
{'name': '--password', 'help': 'New password'},
]
def before_execute(self, args):
super(UpdatePassword, self).before_execute(args)
self._prepare(args)
def execute(self, args, called_from_wrapper=False):
if not called_from_wrapper:
self._prepare(args)
from django.contrib.auth.management.commands.changepassword import Command
self.reset_logger(self.args, True)
# An optional password tells us if we are to use the Django's command
# or our own wrapper returning the user-provided password without asking for one.
if getattr(args, 'password', None):
class _Command(Command):
def _get_pass(self, *ignored_args, **ignored_kwargs):
return args.password
else:
_Command = Command
_Command().handle(username=args.username, database=None)
if not called_from_wrapper:
self._ok(args)
# ################################################################################################################################
# ################################################################################################################################
class ResetTOTPKey(_WebAdminAuthCommand):
""" Resets a user's TOTP secret key. Returns the key on output unless it was given on input.
"""
opts = common_totp_opts
def before_execute(self, args):
super(ResetTOTPKey, self).before_execute(args)
self._prepare(args)
self.reset_logger(args, True)
def execute(self, args):
# Zato
from zato.admin.web.util import set_user_profile_totp_key
from zato.admin.web.models import User
from zato.admin.web.util import get_user_profile
from zato.admin.zato_settings import zato_secret_key
from zato.cli.util import get_totp_info_from_args
from zato.common.json_internal import dumps
# Extract or generate a new TOTP key and label
key, key_label = get_totp_info_from_args(args)
self.reset_logger(args, True)
try:
user = User.objects.get(username=args.username)
except User.DoesNotExist:
self.logger.warning('No such user `%s` found in `%s`', args.username, args.path)
return
# Here we know we have the user and key for sure, now we need to get the person's profile
user_profile = get_user_profile(user, False)
# Everything is ready, we can reset the key ..
opaque_attrs = set_user_profile_totp_key(user_profile, zato_secret_key, key, key_label)
# .. and save the modified profile.
user_profile.opaque1 = dumps(opaque_attrs)
user_profile.save()
# Log the key only if it was not given on input. Otherwise the user is expected to know it already
# and may perhaps want not to disclose it.
if self.args.key:
self.logger.info('OK')
else:
self.logger.info(key)
# ################################################################################################################################
# ################################################################################################################################
class SetAdminInvokePassword(_WebAdminAuthCommand):
""" Resets a web-admin user's password that it uses to connect to servers.
"""
opts = [
{'name': '--username', 'help': 'Username to reset the password of', 'default':ServiceConst.API_Admin_Invoke_Username},
{'name': '--password', 'help': 'Password to set'},
]
def execute(self, args):
# stdlib
import os
# Zato
from zato.common.crypto.api import WebAdminCryptoManager
# Python 2/3 compatibility
from zato.common.py23_.past.builtins import unicode
# Find directories for config data
os.chdir(os.path.abspath(args.path))
base_dir = os.path.join(self.original_dir, args.path)
repo_dir = os.path.join(base_dir, 'config', 'repo')
# Read config in
config_path = os.path.join(repo_dir, 'web-admin.conf')
config_data = open_r(config_path).read()
# Encrypted the provided password
cm = WebAdminCryptoManager(repo_dir=repo_dir)
encrypted = cm.encrypt(args.password.encode('utf8') if isinstance(args.password, unicode) else args.password)
# Update the config file in-place so as not to reformat its contents
new_config = []
for line in config_data.splitlines():
if 'ADMIN_INVOKE_PASSWORD' in line:
encrypted = encrypted.decode('utf8') if not isinstance(encrypted, unicode) else encrypted
line = ' "ADMIN_INVOKE_PASSWORD": "{}",'.format(encrypted)
new_config.append(line)
# Save config with the updated password
new_config = '\n'.join(new_config)
open_w(config_path).write(new_config)
# ################################################################################################################################
# ################################################################################################################################
| 10,545
|
Python
|
.py
| 219
| 38.977169
| 130
| 0.525731
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,471
|
check_config.py
|
zatosource_zato/code/zato-cli/src/zato/cli/check_config.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ManageCommand
from zato.common.util.open_ import open_r
# ################################################################################################################################
class CheckConfig(ManageCommand):
""" Checks config of a Zato component.
"""
# ################################################################################################################################
def get_json_conf(self, conf_name, repo_dir=None):
# stdlib
from os.path import join
# Zato
from zato.common.json_internal import loads
repo_dir = repo_dir or join(self.config_dir, 'repo')
return loads(open_r(join(repo_dir, conf_name)).read())
# ################################################################################################################################
def ensure_port_free(self, prefix, port, address):
# Zato
from zato.common.util.tcp import is_port_taken
if is_port_taken(port):
raise Exception('{} check failed. Address `{}` already taken.'.format(prefix, address))
# ################################################################################################################################
def ensure_json_config_port_free(self, prefix, conf_name=None, conf=None):
conf = self.get_json_conf(conf_name) if conf_name else conf
address = '{}:{}'.format(conf['host'], conf['port'])
self.ensure_port_free(prefix, conf['port'], address)
# ################################################################################################################################
def ping_sql(self, engine_params, ping_query):
# Zato
from zato.common.odb import ping_database
ping_database(engine_params, ping_query)
if self.show_output:
self.logger.info('SQL ODB connection OK')
# ################################################################################################################################
def check_sql_odb_server_scheduler(self, cm, conf, fs_sql_config, needs_decrypt_password=True):
# Zato
from zato.common.odb.ping import get_ping_query
engine_params = dict((conf['odb']))
engine_params['extra'] = {}
engine_params['pool_size'] = 1
# This will be needed by scheduler but not server
if needs_decrypt_password:
password = engine_params['password']
if password:
engine_params['password'] = cm.decrypt(password)
self.ping_sql(engine_params, get_ping_query(fs_sql_config, engine_params))
# ################################################################################################################################
def check_sql_odb_web_admin(self, cm, conf):
# Zato
from zato.common.api import ping_queries
pairs = (
('engine', 'db_type'),
('username', 'DATABASE_USER'),
('password', 'DATABASE_PASSWORD'),
('host', 'DATABASE_HOST'),
('port', 'DATABASE_PORT'),
('db_name', 'DATABASE_NAME'),
)
engine_params = {'extra':{}, 'pool_size':1}
for sqlalch_name, django_name in pairs:
engine_params[sqlalch_name] = conf[django_name]
password = engine_params['password']
if password:
engine_params['password'] = cm.decrypt(password)
self.ping_sql(engine_params, ping_queries[engine_params['engine']])
# ################################################################################################################################
def on_server_check_kvdb(self, cm, conf, conf_key='kvdb'):
# Bunch
from bunch import Bunch
# Zato
from zato.common.kvdb.api import KVDB
# Redis is not configured = we can return
kvdb_config = conf.get(conf_key) or {}
if not kvdb_config:
return
# Redis is not enabled = we can return
if not KVDB.is_config_enabled(kvdb_config):
return
kvdb_config = Bunch(kvdb_config)
kvdb = KVDB(kvdb_config, cm.decrypt)
kvdb.init()
kvdb.conn.info()
kvdb.close()
if self.show_output:
self.logger.info('Redis connection OK')
# ################################################################################################################################
def ensure_no_pidfile(self, log_file_marker):
# stdlib
from os.path import abspath, exists, join
pidfile = abspath(join(self.component_dir, 'pidfile'))
# Pidfile exists ..
if exists(pidfile):
# stdlib
import os
# psutil
from psutil import AccessDenied, Process, NoSuchProcess
# Zato
from zato.common.api import INFO_FORMAT
from zato.common.component_info import get_info
# .. but raise an error only if the PID it points to belongs
# to an already running component. Otherwise, it must be a stale pidfile
# that we can safely delete.
pid = open_r(pidfile).read().strip()
try:
if pid:
pid = int(pid)
except ValueError:
raise Exception('Could not parse pid value `{}` as an integer ({})'.format(pid, pidfile))
else:
try:
get_info(self.component_dir, INFO_FORMAT.DICT)
except AccessDenied:
# This could be another process /or/ it can be our own component started by another user,
# so to be on the safe side, indicate an error instead of deleting the pidfile
raise Exception('Access denied to PID `{}` found in `{}`'.format(pid, pidfile))
except NoSuchProcess:
# This is fine, there is no process of that PID,
# which means that this PID does not belong to our component
# (because it doesn't belong to any process), so we may just delete this pidfile safely ..
os.remove(pidfile)
# .. but, if the component is load-balancer, we also need to delete its agent's pidfile.
# The assumption is that if the load-balancer is not running then so isn't its agent.
if log_file_marker == 'lb-agent':
lb_agent_pidfile = abspath(join(self.component_dir, 'zato-lb-agent.pid'))
os.remove(lb_agent_pidfile)
else:
#
# This PID exists, but it still still possible that it belongs to another process
# that took over a PID previously assigned to a Zato component,
# in which case we can still delete the pidfile.
#
# We decide that a process is actually an already running Zato component if it has
# opened log files that should belong that kind of component, as indicated by log_file_marker,
# otherwise we assume this PID belongs to a completely different process and we can delete pidfile.
#
has_log = False
has_lock = False
log_path = abspath(join(self.component_dir, 'logs', '{}.log'.format(log_file_marker)))
lock_path = abspath(join(self.component_dir, 'logs', '{}.lock'.format(log_file_marker)))
if pid:
for name in Process(pid).open_files():
if name.path == log_path:
has_log = True
elif name.path == lock_path:
has_lock = True
# Both files exist - this is our component and it's running so we cannot continue
if has_log and has_lock:
raise Exception('Cannot proceed, found pidfile `{}`'.format(pidfile))
# This must be an unrelated process, so we can delete pidfile ..
os.remove(pidfile)
# .. again, if the component is load-balancer, we also need to delete its agent's pidfile.
# The assumption is that if the load-balancer is not running then so isn't its agent.
if log_file_marker == 'lb-agent':
lb_agent_pidfile = abspath(join(self.component_dir, 'zato-lb-agent.pid'))
os.remove(lb_agent_pidfile)
if self.show_output:
self.logger.info('No such pidfile `%s`, OK', pidfile)
# ################################################################################################################################
def on_server_check_port_available(self, server_conf):
address = server_conf['main']['gunicorn_bind']
_, port = address.split(':')
self.ensure_port_free('Server', int(port), address)
# ################################################################################################################################
def get_crypto_manager(self, secret_key=None, stdin_data=None, class_=None):
# stdlib
from os.path import join
return class_.from_repo_dir(secret_key, join(self.config_dir, 'repo'), stdin_data)
# ################################################################################################################################
def get_sql_ini(self, conf_file, repo_dir=None):
# stdlib
from os.path import join
# Zato
from zato.common.ext.configobj_ import ConfigObj
repo_dir = repo_dir or join(self.config_dir, 'repo')
return ConfigObj(join(repo_dir, conf_file))
# ################################################################################################################################
def _on_server(self, args):
# stdlib
from os.path import join
# Zato
from zato.common.ext.configobj_ import ConfigObj
# Zato
from zato.common.crypto.api import ServerCryptoManager
cm = self.get_crypto_manager(getattr(args, 'secret_key', None), getattr(args, 'stdin_data', None),
class_=ServerCryptoManager)
fs_sql_config = self.get_sql_ini('sql.conf')
repo_dir = join(self.component_dir, 'config', 'repo')
server_conf_path = join(repo_dir, 'server.conf')
secrets_conf_path = ConfigObj(join(repo_dir, 'secrets.conf'), use_zato=False)
server_conf = ConfigObj(server_conf_path, zato_secrets_conf=secrets_conf_path, zato_crypto_manager=cm, use_zato=True)
self.check_sql_odb_server_scheduler(cm, server_conf, fs_sql_config, False)
if getattr(args, 'ensure_no_pidfile', False):
self.ensure_no_pidfile('server')
if getattr(args, 'check_server_port_available', False):
self.on_server_check_port_available(server_conf)
# ################################################################################################################################
def _on_lb(self, args, *ignored_args, **ignored_kwargs):
# stdlib
from os.path import join
# Zato
from zato.common.haproxy import validate_haproxy_config
self.ensure_no_pidfile('lb-agent')
repo_dir = join(self.config_dir, 'repo')
lba_conf = self.get_json_conf('lb-agent.conf')
lb_conf_string = open_r(join(repo_dir, 'zato.config')).read()
# Load-balancer's agent
self.ensure_json_config_port_free('Load balancer\'s agent', None, lba_conf)
# Load balancer itself
lb_address = None
marker = 'ZATO frontend front_http_plain:bind'
lb_conf = lb_conf_string.splitlines()
for line in lb_conf:
if marker in line:
lb_address = line.split(marker)[0].strip().split()[1]
break
if not lb_address:
raise Exception('Load balancer check failed. Marker line not found `{}`.'.format(marker))
_, port = lb_address.split(':')
self.ensure_port_free('Load balancer', int(port), lb_address)
validate_haproxy_config(lb_conf_string, lba_conf['haproxy_command'])
# ################################################################################################################################
def _on_web_admin(self, args, *ignored_args, **ignored_kwargs):
# stdlib
from os.path import join
# Zato
from zato.common.crypto.api import WebAdminCryptoManager
from zato.common.crypto.secret_key import resolve_secret_key
repo_dir = join(self.component_dir, 'config', 'repo')
secret_key = getattr(args, 'secret_key', None)
secret_key = resolve_secret_key(secret_key)
self.check_sql_odb_web_admin(
self.get_crypto_manager(secret_key, getattr(args, 'stdin_data', None), WebAdminCryptoManager),
self.get_json_conf('web-admin.conf', repo_dir))
self.ensure_no_pidfile('web-admin')
self.ensure_json_config_port_free('Web admin', 'web-admin.conf')
# ################################################################################################################################
def _on_scheduler(self, args, *ignored_args, **ignored_kwargs):
# stdlib
from os.path import join
# Zato
from zato.common.crypto.api import SchedulerCryptoManager
from zato.common.ext.configobj_ import ConfigObj
repo_dir = join(self.component_dir, 'config', 'repo')
server_conf_path = join(repo_dir, 'scheduler.conf')
cm = self.get_crypto_manager(getattr(args, 'secret_key', None), getattr(args, 'stdin_data', None), SchedulerCryptoManager)
secrets_conf_path = ConfigObj(join(repo_dir, 'secrets.conf'), use_zato=False)
server_conf = ConfigObj(server_conf_path, zato_secrets_conf=secrets_conf_path, zato_crypto_manager=cm, use_zato=True)
# ODB is optional for schedulers
if 'odb' in server_conf:
fs_sql_config = self.get_sql_ini('sql.conf')
self.check_sql_odb_server_scheduler(cm, server_conf, fs_sql_config)
self.ensure_no_pidfile('scheduler')
# ################################################################################################################################
| 14,742
|
Python
|
.py
| 258
| 45.643411
| 130
| 0.506052
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,472
|
common.py
|
zatosource_zato/code/zato-cli/src/zato/cli/common.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ServerAwareCommand
from zato.common.api import CommonObject, NotGiven
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import strdict, strdictnone, strlist, strnone
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class DeleteCommon(ServerAwareCommand):
""" A base class for CLI commands that delete objects.
"""
# This will be populated by subclasses
object_type = CommonObject.Invalid
opts = [
{'name':'--id', 'help':'An exact ID of an object to delete', 'required':False},
{'name':'--id-list', 'help':'A list of object IDs to delete', 'required':False},
{'name':'--name', 'help':'An exact name of an object to delete', 'required':False},
{'name':'--name-list','help':'List of objects to delete', 'required':False},
{'name':'--pattern', 'help':'All objects with names that contain this pattern', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':False},
]
# ################################################################################################################################
def execute(self, args:'Namespace'):
# stdlib
import sys
# This will be built based on the option provided by user
request = {
'object_type': self.object_type,
}
options = ['--id', '--id-list', '--name', '--name-list', '--pattern']
for name in options:
arg_attr = name.replace('--', '')
arg_attr = arg_attr.replace('-', '_')
value = getattr(args, arg_attr, None)
if value:
request[arg_attr] = value
break
if not request:
options = ', '.join(options)
self.logger.warn(f'Missing input. One of the following is expected: {options}')
sys.exit(self.SYS_ERROR.PARAMETER_MISSING)
# Our service to invoke
service = 'zato.common.delete-objects'
# Invoke the service and log the response it produced
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class CreateCommon(ServerAwareCommand):
""" A base class for CLI commands that create objects.
"""
# These will be populated by subclasses
object_type = CommonObject.Invalid
prefix = CommonObject.Prefix_Invalid
opts = [
{'name':'--count', 'help':'How many objects to create', 'required':False},
{'name':'--prefix', 'help':'Prefix that each of the topics to be created will have', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':False},
{'name':'--endpoints-per-topic',
'help':'How many endpoints to create per each topic', 'required':False, 'default':3, 'type':int},
{'name':'--messages-per-pub',
'help':'How many messages to publish from each publisher', 'required':False, 'default':10, 'type':int},
{'name':'--msg-size',
'help':'How big, in kilobytes, each published message should be', 'required':False, 'default':3, 'type':int},
{'name':'--needs-stdout',
'help':'Whether to log default messages to stdout', 'required':False, 'default':NotGiven},
]
# ################################################################################################################################
def invoke_common(
self,
service:'str',
object_type: 'strnone',
name_list:'strlist',
*,
args:'Namespace | None'=None,
needs_stdout:'bool'=True,
initial_data:'strdictnone'=None,
) -> 'strdict':
# stdlib
from zato.common.util import as_bool
request:'strdict' = {
'object_type': object_type,
'name_list': name_list,
'initial_data': initial_data,
}
if args and (args_needs_stdout := args.needs_stdout):
args_needs_stdout = as_bool(args_needs_stdout)
needs_stdout = args_needs_stdout
# Invoke the service and log the response it produced
response = self._invoke_service_and_log_response(service, request, needs_stdout=needs_stdout)
return response
# ################################################################################################################################
def invoke_common_create(self,
object_type: 'strnone',
name_list:'strlist',
*,
args:'Namespace | None'=None,
needs_stdout:'bool'=True,
initial_data:'strdictnone'=None,
) -> 'strdict':
service = 'zato.common.create-objects'
response = self.invoke_common(
service, object_type, name_list, args=args, needs_stdout=needs_stdout, initial_data=initial_data)
return response
# ################################################################################################################################
def execute(self, args:'Namespace') -> 'strdict':
# Local variables
default_count = 1000
# Read the parameters from the command line or fall back on the defaults
count = int(args.count or default_count)
prefix = args.prefix or self.prefix
# Find out to how many digits we should fill tha names
digits = len(str(count))
# The list to create
name_list = []
for idx, _ in enumerate(range(count), 1):
idx = str(idx).zfill(digits)
topic_name = f'{prefix}{idx}'
name_list.append(topic_name)
# Invoke the service
response = self.invoke_common_create(self.object_type, name_list, args=args)
return response
# ################################################################################################################################
# ################################################################################################################################
| 6,899
|
Python
|
.py
| 131
| 44.664122
| 130
| 0.468114
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,473
|
stop.py
|
zatosource_zato/code/zato-cli/src/zato/cli/stop.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import ManageCommand
from zato.common.util.open_ import open_r
# ################################################################################################################################
# ################################################################################################################################
class Stop(ManageCommand):
""" Stops a Zato component
"""
def signal(self, component_name, signal_name, signal_code, pidfile=None, component_dir=None, ignore_missing=False,
needs_logging=True):
""" Sends a signal to a process known by its pidfile.
"""
# stdlib
import os
import sys
component_dir = component_dir or self.component_dir
pidfile = pidfile or os.path.join(component_dir, 'pidfile')
if not os.path.exists(pidfile):
if ignore_missing:
# No such pidfile - it may be a connector process and these are optional,
# in this case, we just simply return because there is not anything else for us to do.
return
self.logger.error('No pidfile found in `%s`', pidfile)
sys.exit(self.SYS_ERROR.FILE_MISSING)
pid = open_r(pidfile).read().strip()
if not pid:
self.logger.error('Empty pidfile `%s`, did not attempt to stop `%s`', pidfile, component_dir)
sys.exit(self.SYS_ERROR.NO_PID_FOUND)
pid = int(pid)
if needs_logging:
self.logger.debug('Sending `%s` to pid `%s` (found in `%s`)', signal_name, pid, pidfile)
os.kill(pid, signal_code)
os.remove(pidfile)
if needs_logging:
self.logger.info('%s `%s` shutting down', component_name, component_dir)
# ################################################################################################################################
def _on_server(self, *ignored):
# stdlib
import os
import signal
pidfile_ibm_mq = os.path.join(self.component_dir, 'pidfile-ibm-mq')
pidfile_sftp = os.path.join(self.component_dir, 'pidfile-sftp')
pidfile_zato_events = os.path.join(self.component_dir, 'pidfile-zato-events')
self.signal('IBM MQ connector', 'SIGTERM', signal.SIGTERM, pidfile_ibm_mq, ignore_missing=True, needs_logging=False)
self.signal('SFTP connector', 'SIGTERM', signal.SIGTERM, pidfile_sftp, ignore_missing=True, needs_logging=False)
self.signal('Events connector', 'SIGTERM', signal.SIGTERM, pidfile_zato_events, ignore_missing=True, needs_logging=False)
self.signal('Server', 'SIGTERM', signal.SIGTERM)
# ################################################################################################################################
def stop_haproxy(self, component_dir):
# stdlib
import os
import signal
# Zato
from zato.common.util.api import get_haproxy_agent_pidfile
# We much check whether the pidfile for agent exists, it won't if --fg was given on input in which case
# Ctrl-C must have closed the agent thus we cannot send any signal.
lb_agent_pidfile = get_haproxy_agent_pidfile(component_dir)
if os.path.exists(lb_agent_pidfile):
self.signal('Load-balancer\'s agent', 'SIGTERM', signal.SIGTERM, lb_agent_pidfile, component_dir)
self.signal('Load-balancer', 'SIGTERM', signal.SIGTERM, None, component_dir)
# ################################################################################################################################
def _on_lb(self, *ignored):
self.stop_haproxy(self.component_dir)
# ################################################################################################################################
def _on_web_admin(self, *ignored):
# stdlib
import signal
self.signal('Web admin', 'SIGTERM', signal.SIGTERM)
# ################################################################################################################################
def _on_scheduler(self, *ignored):
# stdlib
import signal
self.signal('Scheduler', 'SIGTERM', signal.SIGTERM)
# ################################################################################################################################
# ################################################################################################################################
| 4,744
|
Python
|
.py
| 81
| 50.691358
| 130
| 0.486381
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,474
|
info.py
|
zatosource_zato/code/zato-cli/src/zato/cli/info.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.cli import ManageCommand
from zato.common.api import INFO_FORMAT
DEFAULT_COLS_WIDTH = '30,90'
class Info(ManageCommand):
""" Shows detailed information regarding a chosen Zato component
"""
# Changed in 2.0 - --json replaced with --format
opts = [
{'name':'--format', 'help':'Output format, must be one of text, json or yaml, default: {}'.format(INFO_FORMAT.TEXT),
'default':INFO_FORMAT.TEXT},
{'name':'--cols_width', 'help':'A list of columns width to use for the table output, default: {}'.format(DEFAULT_COLS_WIDTH)}
]
def _on_server(self, args):
# stdlib
import os
# yaml
import yaml
# Zato
from zato.common.component_info import format_info, get_info
class _Dumper(yaml.SafeDumper):
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', '')
os.chdir(self.original_dir)
info = get_info(os.path.abspath(args.path), args.format)
self.logger.info(format_info(info, args.format, args.cols_width if args.cols_width else DEFAULT_COLS_WIDTH, _Dumper))
_on_scheduler = _on_lb = _on_web_admin = _on_server
| 1,462
|
Python
|
.py
| 33
| 37.969697
| 133
| 0.659605
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,475
|
crypto.py
|
zatosource_zato/code/zato-cli/src/zato/cli/crypto.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ManageCommand, ZatoCommand
from zato.common.crypto.api import CryptoManager
# ################################################################################################################################
# ################################################################################################################################
class CreateSecretKey(ZatoCommand):
""" Creates a new secret key.
"""
def execute(self, args):
# Zato
from zato.common.crypto.api import CryptoManager
self.logger.info(CryptoManager.generate_key().decode('utf8'))
# ################################################################################################################################
# ################################################################################################################################
class Encrypt(ManageCommand):
""" Encrypts secrets using a secret key.
"""
allow_empty_secrets = False
opts = [
{'name':'--data', 'help':'Data to encrypt'},
{'name':'--secret-key', 'help':'Secret key to encrypt data with'},
{'name':'--path', 'help':'Path to a Zato component where the secret key can be found'},
]
# ################################################################################################################################
def execute(self, args):
# We need to know what to encrypt
if not args.data:
raise ValueError('Parameter --data is required')
# We are encrypting using a given component's secret key ..
if args.path:
super().execute(args)
# .. otherwise, we use the key we were given on input
else:
cm = CryptoManager(secret_key=args.secret_key)
out = cm.encrypt(args.data)
out = out.decode('utf8')
self.logger.info(out)
# ################################################################################################################################
def _on_web_admin(self, args):
# Zato
from zato.common.crypto.api import WebAdminCryptoManager
self._encrypt(WebAdminCryptoManager, args)
# ################################################################################################################################
def _on_server(self, args):
# Zato
from zato.common.crypto.api import ServerCryptoManager
self._encrypt(ServerCryptoManager, args)
# ################################################################################################################################
def _on_scheduler(self, args):
# Zato
from zato.common.crypto.api import SchedulerCryptoManager
self._encrypt(SchedulerCryptoManager, args)
# ################################################################################################################################
# ################################################################################################################################
class Decrypt(ManageCommand):
""" Decrypts secrets using a secret key.
"""
allow_empty_secrets = False
opts = [
{'name':'--data', 'help':'Data to encrypt'},
{'name':'--secret-key', 'help':'Secret key to encrypt data with'},
{'name':'--path', 'help':'Path to a Zato component where the secret key can be found'},
]
# ################################################################################################################################
def execute(self, args):
# We need to know what to decrypt
if not args.data:
raise ValueError('Parameter --data is required')
# We are decrypting using a given component's secret key ..
if args.path:
super().execute(args)
# .. otherwise, we use the key we were given on input
else:
cm = CryptoManager(secret_key=args.secret_key or '')
out = cm.decrypt(args.data)
self.logger.info(out)
# ################################################################################################################################
def _decrypt(self, class_, args):
# stdlib
import os
os.chdir(self.original_dir)
repo_dir = os.path.abspath(os.path.join(args.path, 'config', 'repo'))
cm = class_(repo_dir=repo_dir)
decrypted = cm.decrypt(args.secret)
self.logger.info(decrypted)
# ################################################################################################################################
def _on_web_admin(self, args):
# Zato
from zato.common.crypto.api import WebAdminCryptoManager
self._decrypt(WebAdminCryptoManager, args)
# ################################################################################################################################
def _on_server(self, args):
# Zato
from zato.common.crypto.api import ServerCryptoManager
self._decrypt(ServerCryptoManager, args)
# ################################################################################################################################
def _on_scheduler(self, args):
# Zato
from zato.common.crypto.api import SchedulerCryptoManager
self._decrypt(SchedulerCryptoManager, args)
# ################################################################################################################################
# ################################################################################################################################
class GetHashRounds(ZatoCommand):
""" Computes PBKDF2-SHA512 hash rounds.
"""
opts = [
{'name':'--json', 'help':'Output full info in JSON', 'action':'store_true'},
{'name':'--rounds-only', 'help':'Output only rounds in plain text', 'action':'store_true'},
{'name':'goal', 'help':'How long a single hash should take in seconds (e.g. 0.2)'},
]
# ################################################################################################################################
def allow_empty_secrets(self):
return True
# ################################################################################################################################
def header_func(self, cpu_info, goal):
self.logger.info('-' * 70)
self.logger.info('Algorithm ........... PBKDF2-SHA512, salt size 64 bytes (512 bits)')
self.logger.info('CPU brand ........... {}'.format(cpu_info['brand']))
self.logger.info('CPU frequency........ {}'.format(cpu_info['hz_actual']))
self.logger.info('Goal ................ {} sec'.format(goal))
self.logger.info('-' * 70)
# ################################################################################################################################
def footer_func(self, rounds_per_second_str, rounds_str):
self.logger.info('-' * 70)
self.logger.info('Performance ......... {} rounds/s'.format(rounds_per_second_str))
self.logger.info('Required for goal ... {} rounds'.format(rounds_str))
self.logger.info('-' * 70)
# ################################################################################################################################
def progress_func(self, current_per_cent):
self.logger.info('Done % .............. {:<3}'.format(100 if current_per_cent >= 100 else current_per_cent))
# ################################################################################################################################
def execute(self, args):
# Zato
from zato.common.crypto.api import CryptoManager
from zato.common.json_internal import dumps
goal = round(float(args.goal), 2)
if args.json or args.rounds_only:
header_func, progress_func, footer_func = None, None, None
else:
header_func, progress_func, footer_func = self.header_func, self.progress_func, self.footer_func
info = CryptoManager.get_hash_rounds(goal, header_func, progress_func, footer_func)
if args.json:
self.logger.info(dumps(info))
elif args.rounds_only:
self.logger.info(info['rounds'])
# ################################################################################################################################
# ################################################################################################################################
| 8,758
|
Python
|
.py
| 152
| 50.493421
| 130
| 0.395011
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,476
|
basic_auth.py
|
zatosource_zato/code/zato-cli/src/zato/cli/security/basic_auth.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import sys
from uuid import uuid4
# Zato
from zato.cli import ServerAwareCommand
from zato.common.util.api import fs_safe_now
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class CreateDefinition(ServerAwareCommand):
""" Creates a new Basic Auth definition.
"""
allow_empty_secrets = True # type: ignore
opts = [
{'name':'--name', 'help':'Name of the definition to create', 'required':False,},
{'name':'--realm', 'help':'HTTP realm of the definition', 'required':False,},
{'name':'--username', 'help':'Username for the definition to use', 'required':False},
{'name':'--password', 'help':'Password for the definition to use', 'required':False},
{'name':'--is-active', 'help':'Should the definition be active upon creation', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
# Zato
from zato.common.util.cli import BasicAuthManager
name = getattr(args, 'name', None)
realm = getattr(args, 'realm', None)
username = getattr(args, 'username', None)
password = getattr(args, 'password', None)
name = name or 'auto.basic-auth.name.' + fs_safe_now()
realm = realm or 'auto.basic-auth.realm.' + fs_safe_now()
username = username or 'auto.basic-auth.username.' + fs_safe_now()
password = password or 'auto.basic-auth.password.' + uuid4().hex
is_active = getattr(args, 'is_active', True)
if is_active is None:
is_active = True
# Use a reusable object to create a new definition and set its password
manager = BasicAuthManager(self, name, is_active, username, realm, password)
_ = manager.create(needs_stdout=True)
# ################################################################################################################################
# ################################################################################################################################
class ChangePassword(ServerAwareCommand):
""" Changes a Basic Auth definition's password.
"""
allow_empty_secrets = True # type: ignore
opts = [
{'name':'--name', 'help':'Name of the definition to create', 'required':False,},
{'name':'--password', 'help':'Password for the definition to use', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
# Zato
from zato.common.util.cli import BasicAuthManager
name = getattr(args, 'name', None)
password = getattr(args, 'password', None)
name = name or 'auto.basic-auth.name.' + fs_safe_now()
password = password or 'auto.basic-auth.password.' + uuid4().hex
# Use a reusable object to create a new definition and set its password
manager = BasicAuthManager(self, name, is_active=True, username='', realm='', password=password)
_ = manager.change_password(needs_stdout=False)
# ################################################################################################################################
# ################################################################################################################################
class DeleteDefinition(ServerAwareCommand):
""" Deletes a Basic Auth definition.
"""
opts = [
{'name':'--id', 'help':'ID of the definition to delete', 'required':False},
{'name':'--name', 'help':'Name of the definition to delete', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
name = getattr(args, 'name', None)
# Make sure we have input data to delete the channel by
if not (id or name):
self.logger.warn('Cannot continue. To delete a Basic Auth definition, either --id or --name is required on input.')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# API service to invoke
service = 'zato.security.basic-auth.delete'
# API request to send
request = {
'id': id,
'name': name,
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
command = CreateDefinition(args)
command.run(args)
# ################################################################################################################################
# ################################################################################################################################
| 5,917
|
Python
|
.py
| 108
| 48.407407
| 130
| 0.462405
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,477
|
__init__.py
|
zatosource_zato/code/zato-cli/src/zato/cli/security/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,478
|
api_key.py
|
zatosource_zato/code/zato-cli/src/zato/cli/security/api_key.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import sys
from uuid import uuid4
# Zato
from zato.cli import ServerAwareCommand
from zato.common.util.api import fs_safe_now
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class CreateDefinition(ServerAwareCommand):
""" Creates a new API key.
"""
allow_empty_secrets = True
opts = [
{'name':'--name', 'help':'Name of the definition to create', 'required':False,},
{'name':'--realm', 'help':'HTTP realm of the definition', 'required':False,},
{'name':'--username', 'help':'Username for the definition to use', 'required':False},
{'name':'--password', 'help':'Password for the definition to use', 'required':False},
{'name':'--is-active', 'help':'Should the definition be active upon creation', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
name = getattr(args, 'name', None)
realm = getattr(args, 'realm', None)
username = getattr(args, 'username', None)
password = getattr(args, 'password', None)
name = name or 'auto.basic-auth.name.' + fs_safe_now()
realm = realm or 'auto.basic-auth.realm.' + fs_safe_now()
username = username or 'auto.basic-auth.username.' + fs_safe_now()
password = password or 'auto.basic-auth.password.' + uuid4().hex
is_active = getattr(args, 'is_active', True)
if is_active is None:
is_active = True
# API service to invoke to create a new definition
create_service = 'zato.security.basic-auth.create'
# API request to send to create a new definition
create_request = {
'name': name,
'realm': realm,
'username': username,
'password': password,
'is_active': is_active,
}
# This will create a new definition and, in the next step, we will change its password.
self._invoke_service_and_log_response(create_service, create_request)
# API service to invoke to create a new definition
change_password_service = 'zato.security.basic-auth.change-password'
# API request to send to create a new definition
change_password_request = {
'name': name,
'password1': password,
'password2': password,
}
# Change the newly created definition's password
self._invoke_service_and_log_response(change_password_service, change_password_request, needs_stdout=False)
# ################################################################################################################################
# ################################################################################################################################
class DeleteDefinition(ServerAwareCommand):
""" Deletes a Basic Auth definition.
"""
opts = [
{'name':'--id', 'help':'ID of the channel to create', 'required':False},
{'name':'--name', 'help':'Name of the channel to create', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
name = getattr(args, 'name', None)
# Make sure we have input data to delete the channel by
if not (id or name):
self.logger.warn('Cannot continue. To delete a Basic Auth definition, either --id or --name is required on input.')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# API service to invoke
service = 'zato.security.basic-auth.delete'
# API request to send
request = {
'id': id,
'name': name,
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
command = CreateDefinition(args)
command.run(args)
# ################################################################################################################################
# ################################################################################################################################
| 5,373
|
Python
|
.py
| 104
| 44.653846
| 130
| 0.469633
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,479
|
topic.py
|
zatosource_zato/code/zato-cli/src/zato/cli/pubsub/topic.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ServerAwareCommand
from zato.cli.common import CreateCommon, DeleteCommon
from zato.common.api import GENERIC, CommonObject, PUBSUB as Common_PubSub
from zato.common.test.config import TestConfig
from zato.common.typing_ import cast_
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import anydict, anylist, strdict, strlist
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
_opaque_attr = GENERIC.ATTR_NAME
class Config:
DefaultTopicKeys = ('id', 'name', 'current_depth_gd', 'last_pub_time', 'last_pub_msg_id', 'last_endpoint_name',
'last_pub_server_name', 'last_pub_server_pid', 'last_pub_has_gd')
# ################################################################################################################################
# ################################################################################################################################
class CreateTopic(ServerAwareCommand):
""" Creates a new publish/subscribe topic.
"""
opts = [
{'name':'--name', 'help':'Name of the topic to create', 'required':False},
{'name':'--gd', 'help':'Should the topic use Guaranteed Delivery', 'required':False},
{'name':'--is-internal', 'help':'Is it a topic internal to the platform', 'required':False},
{'name':'--is-api-sub-allowed', 'help':'Can applications subscribe to the topic via a public API', 'required':False},
{'name':'--limit-retention', 'help':'Limit retention time in topic to that many seconds', 'required':False},
{'name':'--limit-sub-inactivity',
'help':'After how many seconds an inactive subscription will be deleted', 'required':False},
{'name':'--limit-message-expiry', 'help':'Limit max. message expiration time to that many seconds', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':False},
]
# ################################################################################################################################
def execute(self, args:'Namespace'):
# Zato
from zato.common.api import PUBSUB
from zato.common.util.file_system import fs_safe_now
_default = PUBSUB.DEFAULT
# Topic name will be generated if it is now given on input
topic_name = getattr(args, 'name', None)
has_gd = getattr(args, 'gd', False)
is_internal = getattr(args, 'is_internal', None)
is_api_sub_allowed = getattr(args, 'is_api_sub_allowed', True)
limit_retention = getattr(args, 'limit_retention', 0) or _default.LimitTopicRetention
limit_sub_inactivity = getattr(args, 'limit_sub_inactivity', 0) or _default.LimitSubInactivity
limit_message_expiry = getattr(args, 'limit_message_expiry', 0) or _default.LimitMessageExpiry
limit_retention = int(limit_retention)
limit_sub_inactivity = int(limit_sub_inactivity)
limit_message_expiry = int(limit_message_expiry)
if not topic_name:
topic_name = '/auto/topic.{}'.format(fs_safe_now())
service = 'zato.pubsub.topic.create'
request = {
'name': topic_name,
'is_active': True,
'is_internal': is_internal,
'has_gd': has_gd,
'is_api_sub_allowed': is_api_sub_allowed,
'max_depth_gd': _default.TOPIC_MAX_DEPTH_GD,
'max_depth_non_gd': _default.TOPIC_MAX_DEPTH_NON_GD,
'depth_check_freq': _default.DEPTH_CHECK_FREQ,
'pub_buffer_size_gd': _default.PUB_BUFFER_SIZE_GD,
'task_sync_interval': _default.TASK_SYNC_INTERVAL,
'task_delivery_interval': _default.TASK_DELIVERY_INTERVAL,
'limit_retention': limit_retention,
'limit_sub_inactivity': limit_sub_inactivity,
'limit_message_expiry': limit_message_expiry,
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class GetTopics(ServerAwareCommand):
""" Returns one or more topic by their name. Accepts partial names, e.g. "demo" will match "/my/demo/topic".
"""
opts = [
{'name':'--name', 'help':'Query to look up topics by', 'required':False},
{'name':'--keys', 'help':'What JSON keys to return on put. Use "all" to return them all', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':False},
]
# ################################################################################################################################
def execute(self, args:'Namespace'):
# Make sure that keys are always a set object to look up information in
args_keys = getattr(args, 'keys', '')
if args_keys:
if isinstance(args_keys, str):
args_keys = args_keys.split(',')
args_keys = [elem.strip() for elem in args_keys]
has_all = 'all' in args_keys
needs_default_keys = has_all or (not args_keys)
else:
has_all = False
needs_default_keys = True
args_keys = Config.DefaultTopicKeys
args_keys = set(args_keys)
def hook_func(data:'anydict') -> 'anylist':
# Response to produce ..
out = [] # type: anylist
# .. extract the top-level element ..
data = data['zato_pubsub_topic_get_list_response']
# .. go through each response element found ..
for elem in data: # type: dict
elem = cast_('anydict', elem)
# Delete the opaque attributes container
_ = elem.pop(_opaque_attr, '')
# Make sure we return only the requested keys. Note that we build a new dictionary
# because we want to preserve the order of DefaultConfigKeys. Also note that if all keys
# are requested, for consistency, we still initially populate the dictionary
# with keys from DefaultTopicKeys and only then do we proceed to the remaining keys.
out_elem = {}
# We are possibly return the default keys
if needs_default_keys:
# First, populate the default keys ..
for name in Config.DefaultTopicKeys:
value = elem.get(name)
out_elem[name] = value
# .. otherwise, we return only the specifically requested keys
for name, value in sorted(elem.items()):
if has_all or (name in args_keys):
if name not in out_elem:
out_elem[name] = value
# .. we are finished with pre-processing of this element ..
out.append(out_elem)
# .. and return the output to our caller.
return out
# Our service to invoke
service = 'zato.pubsub.topic.get-list'
# Get a list of topics matching the input query, if any
request = {
'paginate': True,
'needs_details': True,
'query': getattr(args, 'name', ''),
}
# Invoke and log, pre-processing the data first with a hook function
self._invoke_service_and_log_response(service, request, hook_func=hook_func)
# ################################################################################################################################
# ################################################################################################################################
class DeleteTopics(DeleteCommon):
""" Deletes topic by input criteria.
"""
object_type = CommonObject.PubSub_Topic
# ################################################################################################################################
# ################################################################################################################################
class CreateTestTopics(CreateCommon):
""" Creates multiple test topics.
"""
object_type = CommonObject.PubSub_Topic
prefix = TestConfig.pubsub_topic_name_perf_auto_create
# ################################################################################################################################
def _get_topics(self, data:'strdict') -> 'strlist':
# Extract the objects returned ..
objects = data.get('objects') or []
# .. build a sorted list of names to be returned ..
topic_name_list = sorted(elem['name'] for elem in objects)
# .. and return them to our caller.
return topic_name_list
# ################################################################################################################################
def _create_security(
self,
count:'int',
prefix:'str',
endpoint_type:'str',
) -> 'strlist':
# A list of endpoints to create ..
sec_name_list:'strlist' = []
# .. generate their names ..
for idx in range(count):
sec_name = f'security-test-cli-{prefix}/sec/{endpoint_type}/{idx:04}'
sec_name_list.append(sec_name)
# .. do create the endpoints now ..
_ = self.invoke_common_create(CommonObject.Security_Basic_Auth, sec_name_list)
return sec_name_list
# ################################################################################################################################
def _create_endpoints(
self,
security_list: 'strlist',
*,
pub_allowed:'str'='',
sub_allowed:'str'=''
) -> 'strlist':
# Local variables
endpoint_name_list:'strlist' = []
topic_patterns = ''
if pub_allowed:
topic_patterns += f'pub={pub_allowed}\n'
if sub_allowed:
topic_patterns += f'sub={sub_allowed}'
for sec_name in security_list:
name = 'endpoint-test-cli-' + sec_name
endpoint_name_list.append(name)
initial_data = {
'security_name': sec_name,
'topic_patterns': topic_patterns,
}
_ = self.invoke_common_create(CommonObject.PubSub_Endpoint, [name], initial_data=initial_data)
return endpoint_name_list
# ################################################################################################################################
def _create_subscriptions(self, sub_name_list:'strlist', topic:'str') -> 'None':
# Should all subscriptions for this endpoint be deleted before creating this one
should_delete_all = False
for endpoint_name in sub_name_list:
initial_data = {
'topic_name': topic,
'endpoint_name': endpoint_name,
'delivery_method': Common_PubSub.DELIVERY_METHOD.PULL.id,
'should_delete_all': should_delete_all,
}
_ = self.invoke_common_create(CommonObject.PubSub_Subscription, [], initial_data=initial_data)
# ################################################################################################################################
def _publish_messages(self, pub_name_list:'strlist', topic:'str', messages_per_pub:'int') -> 'None':
for msg_idx in range(1, messages_per_pub+1):
for publisher_endpoint_name in pub_name_list:
data = f'{publisher_endpoint_name}-msg-{msg_idx}\n' * 200
initial_data = {
'topic_name': topic,
'endpoint_name': publisher_endpoint_name,
'data': data,
'has_gd': True,
}
_ = self.invoke_common_create(CommonObject.PubSub_Publish, [], initial_data=initial_data)
# ################################################################################################################################
def execute(self, args:'Namespace') -> 'None':
# This call to our parent will create the topics ..
create_topics_result = super().execute(args)
# .. now, we can extract their names ..
topic_list = self._get_topics(create_topics_result)
for topic in topic_list:
pub_sec_name_list = self._create_security(args.endpoints_per_topic, topic, 'pub')
sub_sec_name_list = self._create_security(args.endpoints_per_topic, topic, 'sub')
pub_name_list = self._create_endpoints(pub_sec_name_list, pub_allowed='/*')
sub_name_list = self._create_endpoints(sub_sec_name_list, sub_allowed='/*')
self._create_subscriptions(sub_name_list, topic)
self._publish_messages(pub_name_list, topic, args.messages_per_pub)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
command = DeleteTopics(args)
command.run(args)
"""
args = Namespace()
args.keys = 'all'
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
command = GetTopics(args)
command.run(args)
"""
"""
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SERVER_BASE_DIR']
command = CreateTopic(args)
command.run(args)
"""
# ################################################################################################################################
# ################################################################################################################################
| 15,070
|
Python
|
.py
| 273
| 45.886447
| 130
| 0.473441
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,480
|
endpoint.py
|
zatosource_zato/code/zato-cli/src/zato/cli/pubsub/endpoint.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ServerAwareCommand
from zato.common.api import CommonObject, PUBSUB
from zato.common.util.api import fs_safe_now
from zato.cli.common import CreateCommon
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class CreateEndpoint(ServerAwareCommand):
""" Creates a new pub/sub endpoint.
"""
opts = [
{'name':'--name', 'help':'Name of the endpoint to create', 'required':False,},
{'name':'--role', 'help':'Role the endpoint should have', 'required':False},
{'name':'--is-active', 'help':'Should the endpoint be active upon creation', 'required':False},
{'name':'--topic-patterns', 'help':'A comma-separated list of topic patterns allowed', 'required':False},
{'name':'--wsx-id', 'help':'An ID of a WebSocket channel if the endpoint is associated with one', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
name = getattr(args, 'name', None)
role = getattr(args, 'role', None)
wsx_id = getattr(args, 'wsx_id', None)
is_active = getattr(args, 'is_active', True)
if is_active is None:
is_active = True
topic_patterns = getattr(args, 'topic_patterns', '')
# If we do not have any patterns, it means that we want to assign the endpoint to all the topics possible.
if not topic_patterns:
topic_patterns = 'pub=/*,sub=/*'
topic_patterns = topic_patterns.split(',')
topic_patterns = [elem.strip() for elem in topic_patterns]
topic_patterns = '\n'.join(topic_patterns)
# Generate a name if one is not given
name = name or 'auto.pubsub.endpoint.' + fs_safe_now()
# By default, endpoints can both publish and subscribe
role = role or PUBSUB.ROLE.PUBLISHER_SUBSCRIBER.id
# API service to invoke
service = 'zato.pubsub.endpoint.create'
# API request to send
request = {
'name': name,
'role': role,
'is_active': is_active,
'topic_patterns': topic_patterns,
'is_internal': False,
}
if wsx_id:
request['ws_channel_id'] = wsx_id
request['endpoint_type'] = PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class DeleteEndpoint(ServerAwareCommand):
""" Deletes a pub/sub endpoint.
"""
opts = [
{'name':'--id', 'help':'ID of the endpoint to delete', 'required':True},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
# API service to invoke
service = 'zato.pubsub.endpoint.delete'
# API request to send
request = {
'id': id,
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
class CreateEndpoints(CreateCommon):
""" Creates multiple endpoints.
"""
object_type = CommonObject.PubSub_Topic
def execute(self, args:'Namespace') -> 'None':
request = super().execute(args)
print()
print(111, request)
print(222, args)
print()
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
create_args = Namespace()
create_args.verbose = True
create_args.store_log = False
create_args.store_config = False
create_args.wsx_id = 194
create_args.path = environ['ZATO_SERVER_BASE_DIR']
create_command = CreateEndpoint(create_args)
create_command.run(create_args)
delete_args = Namespace()
delete_args.verbose = True
delete_args.store_log = False
delete_args.store_config = False
delete_args.id = 555
delete_args.path = environ['ZATO_SERVER_BASE_DIR']
delete_command = DeleteEndpoint(delete_args)
delete_command.run(delete_args)
# ################################################################################################################################
# ################################################################################################################################
| 5,747
|
Python
|
.py
| 114
| 43.842105
| 130
| 0.4537
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,481
|
__init__.py
|
zatosource_zato/code/zato-cli/src/zato/cli/pubsub/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,482
|
cleanup.py
|
zatosource_zato/code/zato-cli/src/zato/cli/pubsub/cleanup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# Zato
from zato.cli import ZatoCommand
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class Cleanup(ZatoCommand):
""" Cleans up the pub/sub database.
"""
opts = [
{'name':'--subscriptions',
'help':'Should unused subscriptions be deleted',
'required':False, 'default':True},
{'name':'--topics-without-subscribers',
'help':'Should messages from topics without subscribers be deleted',
'required':False, 'default':True},
{'name':'--topics-with-max-retention-reached',
'help':'Whether to delete messages whose max. retention time has been reached',
'required':False, 'default':True},
{'name':'--queues-with-expired-messages',
'help':'Whether to delete messages whose expiration time has been reached',
'required':False, 'default':True},
{'name':'--path', 'help':'Local path to a Zato scheduler', 'required':True},
]
# ################################################################################################################################
def execute(self, args:'Namespace'):
# Zato
from zato.common.util.api import as_bool
from zato.scheduler.cleanup.core import run_cleanup
clean_up_subscriptions = getattr(args, 'subscriptions', True)
clean_up_topics_without_subscribers = getattr(args, 'topics_without_subscribers', True)
clean_up_topics_with_max_retention_reached = getattr(args, 'topics_with_max_retention_reached', True)
clean_up_queues_with_expired_messages = getattr(args, 'queues_with_expired_messages', True)
clean_up_subscriptions = as_bool(clean_up_subscriptions)
clean_up_topics_without_subscribers = as_bool(clean_up_topics_without_subscribers)
clean_up_topics_with_max_retention_reached = as_bool(clean_up_topics_with_max_retention_reached)
clean_up_queues_with_expired_messages = as_bool(clean_up_queues_with_expired_messages)
_ = run_cleanup(
clean_up_subscriptions,
clean_up_topics_without_subscribers,
clean_up_topics_with_max_retention_reached,
clean_up_queues_with_expired_messages,
scheduler_path = args.path
)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.path = environ['ZATO_SCHEDULER_BASE_DIR']
# while True:
command = Cleanup(args)
command.run(args, needs_sys_exit=False)
# ################################################################################################################################
# ################################################################################################################################
| 3,857
|
Python
|
.py
| 68
| 49.911765
| 130
| 0.454063
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,483
|
channel.py
|
zatosource_zato/code/zato-cli/src/zato/cli/rest/channel.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import sys
from uuid import uuid4
# Zato
from zato.cli import ServerAwareCommand
from zato.common.api import CONNECTION, ZATO_NONE
from zato.common.util.api import fs_safe_now
# ################################################################################################################################
# ################################################################################################################################
if 0:
from argparse import Namespace
from zato.common.typing_ import anytuple, stranydict
Namespace = Namespace
# ################################################################################################################################
# ################################################################################################################################
class Config:
ServiceName = 'pub.zato.ping'
MaxBytesRequests = 500 # 0.5k because requests are usually shorter
MaxBytesResponses = 5000 # 5k because responses are usually longer
# ################################################################################################################################
# ################################################################################################################################
class SecurityAwareCommand(ServerAwareCommand):
def _extract_credentials(self, name:'str', credentials:'str', needs_header:'bool') -> 'anytuple':
credentials_lower = credentials.lower()
if credentials_lower == 'true':
username = name
value_type = 'key' if needs_header else 'password'
password = 'api.{}.'.format(value_type) + uuid4().hex
# If the username is represented through an HTTP header,
# turn the value into one.
if needs_header:
# 'T' is included below because it was part of the timestamp,
# e.g. auto.rest.channel.2022_03_26T19_47_12_191630.
username = username.replace('.', '-').replace('_', '-').replace('T', '-')
username = username.split('-')
username = [elem.capitalize() for elem in username]
username = 'X-' + '-'.join(username)
elif credentials_lower == 'false':
username, password = None, None
else:
_credentials = credentials.split(',')
_credentials = [elem.strip() for elem in _credentials]
username, password = _credentials
return username, password
# ################################################################################################################################
def _get_security_id(self, *, name:'str', basic_auth:'str', api_key:'str') -> 'stranydict':
# Zato
from zato.common.util.cli import APIKeyManager, BasicAuthManager
out = {}
if basic_auth:
username, password = self._extract_credentials(name, basic_auth, False)
manager = BasicAuthManager(self, name, True, username, 'API', password)
response = manager.create()
out['username'] = username
out['password'] = password
out['security_id'] = response['id']
elif api_key:
header, key = self._extract_credentials(name, api_key, True)
manager = APIKeyManager(self, name, True, header, key)
response = manager.create()
out['header'] = header
out['key'] = key
out['security_id'] = response['id']
else:
# Use empty credentials to explicitly indicate that none are required
out['username'] = None
out['password'] = None
out['security_id'] = ZATO_NONE
# No matter what we had on input, we can return our output now.
return out
# ################################################################################################################################
# ################################################################################################################################
class CreateChannel(SecurityAwareCommand):
""" Creates a new REST channel.
"""
opts = [
{'name':'--name', 'help':'Name of the channel to create', 'required':False,},
{'name':'--is-active', 'help':'Should the channel be active upon creation', 'required':False},
{'name':'--url-path', 'help':'URL path to assign to the channel', 'required':False},
{'name':'--service', 'help':'Service reacting to requests sent to the channel',
'required':False, 'default':Config.ServiceName},
{'name':'--basic-auth', 'help':'HTTP Basic Auth credentials for the channel', 'required':False},
{'name':'--api-key', 'help':'API key-based credentials for the channel', 'required':False},
{'name':'--store-requests', 'help':'How many requests to store in audit log',
'required':False, 'default':0, 'type': int},
{'name':'--store-responses', 'help':'How many responses to store in audit log',
'required':False, 'default':0, 'type': int},
{'name':'--max-bytes-requests', 'help':'How many bytes of each request to store',
'required':False, 'default':500, 'type': int},
{'name':'--max-bytes-responses', 'help':'How many bytes of each response to store',
'required':False, 'default':500, 'type': int},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
# ################################################################################################################################
def execute(self, args:'Namespace'):
name = getattr(args, 'name', None)
is_active = getattr(args, 'is_active', None)
url_path = getattr(args, 'url_path', None)
channel_service = getattr(args, 'service', None) or Config.ServiceName
basic_auth = getattr(args, 'basic_auth', '')
api_key = getattr(args, 'api_key', '')
store_requests = getattr(args, 'store_requests', 0)
store_responses = getattr(args, 'store_responses', 0)
max_bytes_requests = getattr(args, 'max_bytes_requests', None) or Config.MaxBytesRequests
max_bytes_responses = getattr(args, 'max_bytes_requests', None) or Config.MaxBytesResponses
# For later use
now = fs_safe_now()
# Assume that the channel should be active
is_active = getattr(args, 'is_active', True)
if is_active is None:
is_active = True
# Generate a name if one is not given
name = name or 'auto.rest.channel.' + now
# If we have no URL path, base it on the auto-generate name
if not url_path:
url_path = '/'+ name
# Enable the audit log if told to
is_audit_log_received_active = bool(store_requests)
is_audit_log_sent_active = bool(store_responses)
# Obtain the security ID based on input data, creating the definition if necessary.
sec_name = 'auto.sec.' + now
security_info = self._get_security_id(name=sec_name, basic_auth=basic_auth, api_key=api_key)
security_id = security_info.pop('security_id')
# API service to invoke
service = 'zato.http-soap.create'
# API request to send
request = {
'name': name,
'url_path': url_path,
'service': channel_service,
'is_active': is_active,
'connection': CONNECTION.CHANNEL,
'security_id': security_id,
'is_audit_log_received_active': is_audit_log_received_active,
'is_audit_log_sent_active': is_audit_log_sent_active,
'max_len_messages_received': store_requests,
'max_len_messages_sent': store_responses,
'max_bytes_per_message_received': max_bytes_requests,
'max_bytes_per_message_sent': max_bytes_responses,
}
# Invoke the base service that creates a channel ..
response = self._invoke_service(service, request)
# .. update the response with the channel security definition's details ..
response.update(security_info)
# .. finally, log the response for the caller.
self._log_response(response, needs_stdout=True)
# ################################################################################################################################
# ################################################################################################################################
class DeleteChannel(SecurityAwareCommand):
""" Deletes a REST channel.
"""
opts = [
{'name':'--id', 'help':'ID of the channel to delete', 'required':False},
{'name':'--name', 'help':'Name of the channel to delete', 'required':False},
{'name':'--path', 'help':'Path to a Zato server', 'required':True},
]
def execute(self, args:'Namespace'):
id = getattr(args, 'id', None)
name = getattr(args, 'name', None)
# Make sure we have input data to delete the channel by
if not (id or name):
self.logger.warn('Cannot continue. To delete a REST channel, either --id or --name is required on input.')
sys.exit(self.SYS_ERROR.INVALID_INPUT)
# API service to invoke
service = 'zato.http-soap.delete'
# API request to send
request = {
'id': id,
'name': name,
'connection': CONNECTION.CHANNEL,
'should_raise_if_missing': False
}
self._invoke_service_and_log_response(service, request)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
# stdlib
from argparse import Namespace
from os import environ
now = fs_safe_now()
username = 'cli.username.' + now
password = 'cli.password.' + now
args = Namespace()
args.verbose = True
args.store_log = False
args.store_config = False
args.service = Config.ServiceName
# args.basic_auth = f'{username}, {password}'
args.api_key = 'true'
args.path = environ['ZATO_SERVER_BASE_DIR']
command = CreateChannel(args)
command.run(args)
# ################################################################################################################################
# ################################################################################################################################
| 10,879
|
Python
|
.py
| 199
| 46.125628
| 130
| 0.49138
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,484
|
__init__.py
|
zatosource_zato/code/zato-cli/src/zato/cli/rest/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,485
|
setup.py
|
zatosource_zato/code/zato-broker/setup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# flake8: noqa
from setuptools import setup, find_packages
version = '3.2'
setup(
name = 'zato-broker',
version = version,
author = 'Zato Source s.r.o.',
author_email = 'info@zato.io',
url = 'https://zato.io',
package_dir = {'':'src'},
packages = find_packages('src'),
namespace_packages = ['zato'],
zip_safe = False,
)
| 535
|
Python
|
.py
| 19
| 23.842105
| 64
| 0.617357
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,486
|
__init__.py
|
zatosource_zato/code/zato-broker/src/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
__import__('pkg_resources').declare_namespace(__name__)
| 287
|
Python
|
.py
| 8
| 34.375
| 64
| 0.683636
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,487
|
__init__.py
|
zatosource_zato/code/zato-broker/src/zato/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
__import__('pkg_resources').declare_namespace(__name__)
| 287
|
Python
|
.py
| 8
| 34.375
| 64
| 0.683636
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,488
|
client.py
|
zatosource_zato/code/zato-broker/src/zato/broker/client.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2023, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import logging
from json import loads
from traceback import format_exc
# gevent
from gevent import sleep, spawn
# orjson
from orjson import dumps
# Requests
from requests import post as requests_post
from requests.models import Response
# Zato
from zato.common.broker_message import code_to_name, SCHEDULER
from zato.common.api import URLInfo
from zato.common.util.config import get_url_protocol_from_config_item
from zato.common.util.platform_ import is_non_windows
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.client import AnyServiceInvoker
from zato.common.typing_ import any_, anydict, strdict, strdictnone
from zato.server.connection.server.rpc.api import ServerRPC
AnyServiceInvoker = AnyServiceInvoker
ServerRPC = ServerRPC
# ################################################################################################################################
# ################################################################################################################################
logger = logging.getLogger(__name__)
has_debug = False
use_tls = is_non_windows
# ################################################################################################################################
# ################################################################################################################################
to_scheduler_actions = {
SCHEDULER.CREATE.value,
SCHEDULER.EDIT.value,
SCHEDULER.DELETE.value,
SCHEDULER.EXECUTE.value,
SCHEDULER.SET_SERVER_ADDRESS.value,
}
from_scheduler_actions = {
SCHEDULER.JOB_EXECUTED.value,
SCHEDULER.DELETE.value,
SCHEDULER.DELETE_PUBSUB_SUBSCRIBER.value,
}
# ################################################################################################################################
# ################################################################################################################################
class BrokerClient:
""" Simulates previous Redis-based RPC.
"""
def __init__(
self,
*,
scheduler_config: 'strdictnone' = None,
server_rpc: 'ServerRPC | None' = None,
zato_client: 'AnyServiceInvoker | None' = None,
) -> 'None':
# This is used to invoke services
self.server_rpc = server_rpc
self.zato_client = zato_client
self.scheduler_address = ''
self.scheduler_auth = None
# We are a server so we will have configuration needed to set up the scheduler's details ..
if scheduler_config:
self.set_scheduler_config(scheduler_config)
# .. otherwise, we are a scheduler so we have a client to invoke servers with.
else:
self.zato_client = zato_client
# ################################################################################################################################
def set_scheduler_config(self, scheduler_config:'strdict') -> 'None':
# Branch-local variables
scheduler_host = scheduler_config['scheduler_host']
scheduler_port = scheduler_config['scheduler_port']
if not (scheduler_api_username := scheduler_config.get('scheduler_api_username')):
scheduler_api_username = 'scheduler_api_username_missing'
if not (scheduler_api_password := scheduler_config.get('scheduler_api_password')):
scheduler_api_password = 'scheduler_api_password_missing'
# Make sure both parts are string objects
scheduler_api_username = str(scheduler_api_username)
scheduler_api_password = str(scheduler_api_password)
self.scheduler_auth = (scheduler_api_username, scheduler_api_password)
# Introduced after 3.2 was released, hence optional
scheduler_use_tls = scheduler_config.get('scheduler_use_tls', False)
# Decide whether to use HTTPS or HTTP
api_protocol = get_url_protocol_from_config_item(scheduler_use_tls)
# Set a full URL for later use
scheduler_address = f'{api_protocol}://{scheduler_host}:{scheduler_port}'
self.set_scheduler_address(scheduler_address)
# ################################################################################################################################
def set_zato_client_address(self, url:'URLInfo') -> 'None':
self.zato_client.set_address(url)
# ################################################################################################################################
def set_scheduler_address(self, scheduler_address:'str') -> 'None':
self.scheduler_address = scheduler_address
# ################################################################################################################################
def run(self) -> 'None':
raise NotImplementedError()
# ################################################################################################################################
def _invoke_scheduler_from_server(self, msg:'anydict') -> 'any_':
idx = 0
response = None
msg_bytes = dumps(msg)
while not response:
# Increase the loop counter
idx += 1
try:
response = requests_post(
self.scheduler_address,
msg_bytes,
auth=self.scheduler_auth,
verify=False,
timeout=5,
)
except Exception as e:
# .. log what happened ..
logger.warn('Scheduler invocation error -> %s (%s)', e, self.scheduler_address)
# .. keep retrying or return the response ..
finally:
# .. we can return the response if we have it ..
if response:
return response
# .. otherwise, wait until the scheduler responds ..
else:
# .. The first time around, wait a little longer ..
# .. because the scheduler may be only starting now ..
if idx == 1:
logger.info('Waiting for the scheduler to respond (1)')
sleep(5)
# .. log what is happening ..
logger.info('Waiting for the scheduler to respond (2)')
# .. wait for a moment ..
sleep(3)
# ################################################################################################################################
def _invoke_server_from_scheduler(self, msg:'anydict') -> 'any_':
if self.zato_client:
response = self.zato_client.invoke_async(msg.get('service'), msg['payload'])
return response
else:
logger.warning('Scheduler -> server invocation failure; self.zato_client is not configured (%r)', self.zato_client)
# ################################################################################################################################
def _rpc_invoke(self, msg:'anydict', from_scheduler:'bool'=False) -> 'any_':
# Local aliases ..
from_server = not from_scheduler
action = msg['action']
try:
# Special-case messages that are actually destined to the scheduler, not to servers ..
if from_server and action in to_scheduler_actions:
try:
response = self._invoke_scheduler_from_server(msg)
return response
except Exception as e:
logger.warning('Invocation error; server -> scheduler -> %s (%d:%r)', e, from_server, action)
return
# .. special-case messages from the scheduler to servers ..
elif from_scheduler and action in from_scheduler_actions:
try:
response = self._invoke_server_from_scheduler(msg)
return response
except Exception as e:
logger.warning('Invocation error; scheduler -> server -> %s (%d:%r)', e, from_server, action)
return
# .. otherwise, we invoke servers.
code_name = code_to_name[action]
if has_debug:
logger.info('Invoking %s %s', code_name, msg)
if self.server_rpc:
return self.server_rpc.invoke_all('zato.service.rpc-service-invoker', msg, ping_timeout=10)
else:
logger.warning('Server-to-server RPC invocation failure -> self.server_rpc is not configured (%r) (%d:%r)',
self.server_rpc, from_server, action)
except Exception:
logger.warning(format_exc())
# ################################################################################################################################
def publish(self, msg:'anydict', *ignored_args:'any_', **kwargs:'any_') -> 'any_':
spawn(self._rpc_invoke, msg, **kwargs)
# ################################################################################################################################
def invoke_async(self, msg:'anydict', *ignored_args:'any_', **kwargs:'any_') -> 'any_':
spawn(self._rpc_invoke, msg, **kwargs)
# ################################################################################################################################
def invoke_sync(self, msg:'anydict', *ignored_args:'any_', **kwargs:'any_') -> 'any_':
response = self._rpc_invoke(msg, **kwargs) # type: Response
if response.text:
out = loads(response.text)
return out
else:
return response.text
# ################################################################################################################################
def on_message(self, msg):
# type: (object) -> None
raise NotImplementedError()
# ################################################################################################################################
def close(self):
# type: () -> None
raise NotImplementedError()
# ################################################################################################################################
# ################################################################################################################################
| 10,859
|
Python
|
.py
| 199
| 45.281407
| 130
| 0.442713
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,489
|
thread_client.py
|
zatosource_zato/code/zato-broker/src/zato/broker/thread_client.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
| 154
|
Python
|
.py
| 5
| 29.4
| 64
| 0.687075
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,490
|
__init__.py
|
zatosource_zato/code/zato-broker/src/zato/broker/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import logging
from traceback import format_exc
# Zato
from zato.common.api import ZATO_NONE
from zato.common.broker_message import code_to_name
from zato.common.util.api import new_cid
from zato.common.util.config import resolve_env_variables
logger = logging.getLogger('zato')
has_debug = logger.isEnabledFor(logging.DEBUG)
# ################################################################################################################################
class BrokerMessageReceiver:
""" A class that knows how to handle messages received from other worker processes.
"""
def __init__(self):
self.broker_client_id = '{}-{}'.format(ZATO_NONE, new_cid())
self.broker_callbacks = {}
self.broker_messages = []
# ################################################################################################################################
def on_broker_msg(self, msg):
""" Receives a configuration message, parses its JSON contents and invokes an appropriate handler, the one indicated
by the msg's 'action' key so if the action is '1000' then self.on_config_SCHEDULER_CREATE will be invoked
(because in this case '1000' is the code for creating a new scheduler's job, see zato.common.broker_message for the list
of all actions).
"""
try:
# Apply pre-processing
msg = self.preprocess_msg(msg)
if self.filter(msg):
action = code_to_name[msg['action']]
handler = 'on_broker_msg_{0}'.format(action)
func = getattr(self.worker_store, handler)
func(msg)
else:
logger.info('Rejecting broker message `%r`', msg)
except Exception:
msg_action = msg.get('action') or 'undefined_msg_action' # type: str
action = code_to_name.get(msg_action) or 'undefined_action'
logger.error('Could not handle broker message: (%s:%s) `%r`, e:`%s`', action, msg_action, msg, format_exc())
# ################################################################################################################################
def preprocess_msg(self, msg):
""" Pre-processes a given message before it is handed over to its recipient by resolving all environment variables.
"""
return resolve_env_variables(msg)
# ################################################################################################################################
def filter(self, msg):
""" Subclasses may override the method in order to filter the messages prior to invoking the actual message handler.
Default implementation always returns False which rejects all the incoming messages.
"""
return True
# ################################################################################################################################
| 3,080
|
Python
|
.py
| 56
| 47.946429
| 130
| 0.519442
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,491
|
setup.py
|
zatosource_zato/code/zato-common/setup.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os
# flake8: noqa
from setuptools import setup, find_packages
version = '3.2'
def parse_requirements(requirements):
ignored = ['#', 'setuptools', '-e']
with open(requirements) as f:
return [line.split('==')[0] for line in f if line.strip() and not any(line.startswith(prefix) for prefix in ignored)]
setup(
name = 'zato-common',
version = version,
author = 'Zato Source s.r.o.',
author_email = 'info@zato.io',
url = 'https://zato.io',
license = 'AGPLv3',
platforms = 'OS Independent',
description = 'Constants and utils common across the whole of Zato ESB and app server (https://zato.io)',
package_dir = {'':'src'},
packages = find_packages('src'),
namespace_packages = ['zato'],
install_requires = parse_requirements(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')),
keywords=('soa eai esb middleware messaging queueing asynchronous integration performance http zeromq framework events agile broker messaging server jms enterprise python middleware clustering amqp nosql websphere mq wmq mqseries ibm amqp zmq'),
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Framework :: Buildout',
'Intended Audience :: Customer Service',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: AGPLv3',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Internet :: File Transfer Protocol (FTP)',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Object Brokering',
],
zip_safe = False,
)
| 2,560
|
Python
|
.py
| 56
| 38.125
| 251
| 0.630269
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,492
|
test_kv_data_api.py
|
zatosource_zato/code/zato-common/test/zato/common/test_kv_data_api.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# ################################################################################################################################
# ################################################################################################################################
# stdlib
from unittest import main
# Zato
from zato.common.kv_data import default_expiry_time, KeyCtx, KVDataAPI
from zato.common.test import ODBTestCase, rand_datetime, rand_string
# ################################################################################################################################
# ################################################################################################################################
if 0:
from datetime import datetime
datetime = datetime
# ################################################################################################################################
# ################################################################################################################################
cluster_id = 1
# ################################################################################################################################
# ################################################################################################################################
class KVDataAPITestCase(ODBTestCase):
# ################################################################################################################################
def test_key_ctx(self):
key = rand_string()
data_type = 'string'
creation_time = rand_datetime()
ctx = KeyCtx()
ctx.key = key
ctx.data_type = data_type
ctx.creation_time = creation_time
self.assertEqual(ctx.key, key)
self.assertEqual(ctx.data_type, data_type)
self.assertEqual(ctx.creation_time, creation_time)
self.assertIsNone(ctx.value)
self.assertIsNone(ctx.expiry_time)
# ################################################################################################################################
def test_session(self):
kv_data_api = KVDataAPI(cluster_id, self.session_wrapper)
session = kv_data_api._get_session()
result = session.execute('SELECT 1+1')
rows = result.fetchall()
self.assertListEqual(rows, [(2,)])
# ################################################################################################################################
def test_set_with_ctx(self):
key = rand_string()
value = rand_string()
data_type = 'text'
creation_time = rand_datetime(to_string=False) # type: datetime
ctx = KeyCtx()
ctx.key = key
ctx.value = value
ctx.data_type = data_type
ctx.creation_time = creation_time
kv_data_api = KVDataAPI(cluster_id, self.session_wrapper)
# Set the key ..
kv_data_api.set_with_ctx(ctx)
# .. let's get it back ..
result = kv_data_api.get(key)
# .. and run all the assertions now.
self.assertEqual(result.key, ctx.key)
self.assertEqual(result.value, ctx.value)
self.assertEqual(result.data_type, ctx.data_type)
self.assertEqual(result.creation_time, ctx.creation_time)
self.assertEqual(result.expiry_time, default_expiry_time)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
| 3,967
|
Python
|
.py
| 71
| 49.887324
| 130
| 0.340756
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,493
|
test_imap.py
|
zatosource_zato/code/zato-common/test/zato/common/test_imap.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# This comes first
from gevent.monkey import patch_all
patch_all()
# stdlib
import os
from unittest import main, TestCase
# Bunch
from bunch import Bunch
# Zato
from zato.common.api import EMAIL
from zato.server.connection.email import Imbox
# ################################################################################################################################
# ################################################################################################################################
class _Base_Test_Case(TestCase):
def _run_test(self, key_config):
host_key = key_config.get('host')
port_key = key_config.get('port')
username_key = key_config.get('username')
password_key = key_config.get('password')
host = os.environ.get(host_key)
if not host:
return
port = os.environ.get(port_key)
username = os.environ.get(username_key)
password = os.environ.get(password_key)
config = Bunch()
config.host = host
config.port = int(port) # type: ignore
config.username = username
config.password = password
config.mode = EMAIL.IMAP.MODE.SSL
config.debug_level = 0
imbox = Imbox(config, config)
result = imbox.folders()
self.assertTrue(len(result) > 0)
imbox.server.server.sock.close()
# ################################################################################################################################
# ################################################################################################################################
class IMAP_Without_OAuth_TestCase(_Base_Test_Case):
def test_connection(self) -> 'None':
config = {
'host': 'Zato_Test_IMAP_Host',
'port': 'Zato_Test_IMAP_Port',
'username': 'Zato_Test_IMAP_Username',
'password': 'Zato_Test_IMAP_Password',
}
self._run_test(config)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 2,717
|
Python
|
.py
| 58
| 40.965517
| 130
| 0.380319
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,494
|
test_oauth.py
|
zatosource_zato/code/zato-common/test/zato/common/test_oauth.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# This comes first
from gevent.monkey import patch_all
patch_all()
# stdlib
import os
from unittest import main, TestCase
# Zato
from zato.common.oauth import OAuthTokenClient, OAuthStore
from zato.common.typing_ import cast_
# ################################################################################################################################
# ################################################################################################################################
if 0:
from zato.common.typing_ import any_, dictnone, stranydict
# ################################################################################################################################
# ################################################################################################################################
class _BaseTestCase(TestCase):
def setUp(self) -> 'None':
self.zato_test_config = {}
username = os.environ.get('Zato_Test_OAuth_Username')
if not username:
return
secret = os.environ.get('Zato_Test_OAuth_Secret')
auth_server_url = os.environ.get('Zato_Test_OAuth_Auth_Server_URL')
scopes = os.environ.get('Zato_Test_OAuth_Scopes')
self.zato_test_config['conn_name'] = 'OAuthTokenClientTestCase'
self.zato_test_config['username'] = username
self.zato_test_config['secret'] = secret
self.zato_test_config['auth_server_url'] = auth_server_url
self.zato_test_config['scopes'] = scopes
# ################################################################################################################################
def run_common_token_assertions(self, token:'dictnone') -> 'None':
token = cast_('stranydict', token)
self.assertEqual(token['token_type'], 'Bearer')
self.assertEqual(token['expires_in'], 3600)
self.assertEqual(token['scope'], self.zato_test_config['scopes'])
self.assertIsInstance(token['access_token'], str)
self.assertGreaterEqual(len(token['access_token']), 50)
# ################################################################################################################################
# ################################################################################################################################
class OAuthTokenClientTestCase(_BaseTestCase):
def test_client_obtain_token_from_remote_auth_server(self) -> 'None':
if not self.zato_test_config:
return
client = OAuthTokenClient(**self.zato_test_config)
token = client.obtain_token()
self.run_common_token_assertions(token)
# ################################################################################################################################
# ################################################################################################################################
class OAuthStoreTestCase(_BaseTestCase):
def test_get_with_set(self) -> 'None':
# This value can be any integer
item_id = 123
if not self.zato_test_config:
return
def get_config(ignored_item_id:'any_') -> 'stranydict':
return self.zato_test_config
max_obtain_iters = 1
obtain_sleep_time = 0
store = OAuthStore(get_config, OAuthTokenClient.obtain_from_config, max_obtain_iters, obtain_sleep_time)
store.create(item_id)
item = store.get(item_id)
self.run_common_token_assertions(item.data)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 4,251
|
Python
|
.py
| 73
| 52.342466
| 130
| 0.395315
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,495
|
test_match.py
|
zatosource_zato/code/zato-common/test/zato/common/test_match.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from copy import deepcopy
from unittest import TestCase
# Bunch
from bunch import Bunch
# Zato
from zato.common.api import FALSE_TRUE, TRUE_FALSE
from zato.common.match import Matcher
default_config = Bunch({
'order': FALSE_TRUE,
'*.zxc': True,
'abc.*': True,
'*': False,
'qwe.*.zxc': False,
})
# ################################################################################################################################
class MatcherTestCase(TestCase):
# ################################################################################################################################
def test_read_config(self):
m = Matcher()
m.read_config(default_config)
self.assertEqual(m.config, default_config)
self.assertEqual(m.order1, False)
self.assertEqual(m.order2, True)
self.assertIsNone(m.special_case)
# Note that it's reversed because we match from narrowest
# to broadest patterns, sorted lexicographically.
self.assertListEqual(m.items[True], ['abc.*', '*.zxc',])
self.assertListEqual(m.items[False], ['qwe.*.zxc', '*'])
# ################################################################################################################################
def test_is_allowed_order_false_true(self):
m = Matcher()
m.read_config(default_config)
self.assertEqual(m.config, default_config)
self.assertEqual(m.order1, False)
self.assertEqual(m.order2, True)
self.assertIsNone(m.special_case)
# ###########################################################################################
#
# The value of 'aaa.zxc' is allowed because
# 1) We match from False to True
# 2) qwe.*.zxc does not match it
# 3) * says nothing is allowed
# 4) *.zxc says it is allowed overriding bullet #3
#
is_allowed = m.is_allowed('aaa.zxc')
self.assertIs(is_allowed, True)
self.assertDictEqual(m.is_allowed_cache, {'aaa.zxc':True})
# ###########################################################################################
#
# The value of 'qwe.333.zxc' is also allowed because
# 1) We match from False to True
# 2) qwe.*.zxc disallowes it
# 3) * says nothing is allowed
# 4) *.zxc this one allows it even though #1 and #2 said no
#
is_allowed = m.is_allowed('qwe.333.zxc')
self.assertIs(is_allowed, True)
self.assertDictEqual(m.is_allowed_cache, {'aaa.zxc':True, 'qwe.333.zxc':True})
# ###########################################################################################
#
# The value of 'qwe.444.aaa' is not allowed
# 1) We match from False to True
# 2) qwe.*.zxc does not match it at all
# 3) * says nothing is allowed
# 4) *.zxc does not match it at all so the last match of #2 is taken into account
is_allowed = m.is_allowed('qwe.444.aaa')
self.assertIs(is_allowed, False)
self.assertDictEqual(m.is_allowed_cache, {'aaa.zxc':True, 'qwe.333.zxc':True, 'qwe.444.aaa': False})
# ################################################################################################################################
def test_is_allowed_order_true_false(self):
config = deepcopy(default_config)
config.order = TRUE_FALSE
m = Matcher()
m.read_config(config)
self.assertEqual(m.config, config)
self.assertEqual(m.order1, True)
self.assertEqual(m.order2, False)
self.assertIsNone(m.special_case)
# ###########################################################################################
#
# The value of 'aaa.zxc' is not allowed because
# 1) We match from True to False
# 2) *.zxc says it is allowed
# 3) qwe.*.zxc matches it and says it's not allowed
# 4) * matches again and confirms it's not allowed
#
is_allowed = m.is_allowed('aaa.zxc')
self.assertIs(is_allowed, False)
self.assertDictEqual(m.is_allowed_cache, {'aaa.zxc':False})
# ###########################################################################################
#
# The value of 'qwe.333.zxc' is also not allowed because
# 1) We match from True to False
# 2) *.zxc says it is allowed
# 3) qwe.*.zxc matches it and says it's not allowed
# 4) * matches again and confirms it's not allowed
#
is_allowed = m.is_allowed('qwe.333.zxc')
self.assertIs(is_allowed, False)
self.assertDictEqual(m.is_allowed_cache, {'aaa.zxc':False, 'qwe.333.zxc':False})
# ###########################################################################################
config2 = deepcopy(default_config)
del config2['*']
config2['*.aaa'] = True
config2.order = TRUE_FALSE
m2 = Matcher()
m2.read_config(config2)
self.assertEqual(m2.config, config2)
self.assertEqual(m2.order1, True)
self.assertEqual(m2.order2, False)
# ###########################################################################################
#
# The value of 'qwe.444.aaa' is allowed
# 1) We match from True to False
# 2) *.aaa matches and allows it
# 3) *.zxc does not match
# 4) abc.* does not match
# 5) qwe.*zxc does not match
#
is_allowed = m2.is_allowed('qwe.444.aaa')
self.assertIs(is_allowed, True)
self.assertDictEqual(m2.is_allowed_cache, {'qwe.444.aaa':True})
# ################################################################################################################################
def test_is_allowed_true_only_has_order(self):
config = Bunch({'order':FALSE_TRUE, 'abc':True, 'zxc':True})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertTrue(m.is_allowed('abc'))
self.assertTrue(m.is_allowed('zxc'))
self.assertFalse(m.is_allowed('111'))
self.assertFalse(m.is_allowed('222'))
self.assertDictEqual(m.is_allowed_cache, {'abc':True, 'zxc':True, '111':False, '222':False})
# ################################################################################################################################
def test_is_allowed_false_only_has_order(self):
config = Bunch({'order':FALSE_TRUE, 'abc':False, 'zxc':False})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertFalse(m.is_allowed('abc'))
self.assertFalse(m.is_allowed('zxc'))
self.assertFalse(m.is_allowed('111'))
self.assertFalse(m.is_allowed('222'))
self.assertDictEqual(m.is_allowed_cache, {'abc':False, 'zxc':False, '111':False, '222':False})
# ################################################################################################################################
def test_is_allowed_true_only_no_order(self):
config = Bunch({'abc':True, 'zxc':True})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertTrue(m.is_allowed('abc'))
self.assertTrue(m.is_allowed('zxc'))
self.assertFalse(m.is_allowed('111'))
self.assertFalse(m.is_allowed('222'))
self.assertDictEqual(m.is_allowed_cache, {'abc':True, 'zxc':True, '111':False, '222':False})
# ################################################################################################################################
def test_is_allowed_false_only_no_order(self):
config = Bunch({'abc':False, 'zxc':False})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertFalse(m.is_allowed('abc'))
self.assertFalse(m.is_allowed('zxc'))
self.assertFalse(m.is_allowed('111'))
self.assertFalse(m.is_allowed('222'))
self.assertDictEqual(m.is_allowed_cache, {'abc':False, 'zxc':False, '111':False, '222':False})
# ################################################################################################################################
def test_is_allowed_no_match(self):
# No entries at all - we disallow everything in that case
config = Bunch({'order': FALSE_TRUE})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertEqual(m.config, config)
self.assertEqual(m.order1, False)
self.assertEqual(m.order2, True)
is_allowed = m.is_allowed('abc')
self.assertIs(is_allowed, False)
self.assertDictEqual(m.is_allowed_cache, {'abc':False})
# ################################################################################################################################
def test_is_allowed_no_order(self):
# Default order will be FALSE_TRUE
config = Bunch({'abc':True})
m = Matcher()
m.read_config(config)
self.assertIsNone(m.special_case)
self.assertEqual(m.config, config)
self.assertEqual(m.order1, False)
self.assertEqual(m.order2, True)
# ################################################################################################################################
def test_is_allowed_cache_is_used(self):
class FakeCache:
def __init__(self):
self.impl = {}
self.getitem_used = 0
self.setitem_used = 0
def __setitem__(self, key, value):
self.setitem_used += 1
self.impl[key] = value
def __getitem__(self, key):
self.getitem_used += 1
return self.impl[key]
m = Matcher()
m.is_allowed_cache = FakeCache()
m.read_config(default_config)
self.assertIsNone(m.special_case)
self.assertEqual(m.config, default_config)
self.assertEqual(m.order1, False)
self.assertEqual(m.order2, True)
m.is_allowed('aaa.zxc')
m.is_allowed('aaa.zxc')
m.is_allowed('aaa.zxc')
self.assertEqual(m.is_allowed_cache.setitem_used, 1)
self.assertEqual(m.is_allowed_cache.getitem_used, 3) # It is 3 because the first time we attempted to return the key
def test_is_allowed_special_case(self):
# ##################################################################################
config = Bunch({'order':TRUE_FALSE, '*':False})
m = Matcher()
m.read_config(config)
self.assertIs(m.special_case, False)
m.is_allowed('aaa.zxc')
self.assertEqual(m.is_allowed_cache, {})
# ##################################################################################
config = Bunch({'order':TRUE_FALSE, '*':True})
m = Matcher()
m.read_config(config)
self.assertIs(m.special_case, True)
m.is_allowed('aaa.zxc')
self.assertEqual(m.is_allowed_cache, {})
# ################################################################################################################################
| 11,670
|
Python
|
.py
| 237
| 40.810127
| 130
| 0.475132
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,496
|
test_exception.py
|
zatosource_zato/code/zato-common/test/zato/common/test_exception.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main, TestCase
# Zato
from zato.common.util.exception import pretty_format_exception
from zato.common.version import get_version
# ################################################################################################################################
# ################################################################################################################################
class ExceptionTestCase(TestCase):
maxDiff = 1234567890
def test_pretty_format_exception(self):
# Filter our warnings coming from zato --version
import warnings
warnings.filterwarnings(action='ignore', message='unclosed file', category=ResourceWarning)
# Test data
cid = '123456'
zato_version = get_version()
def utcnow_func():
return '2222-11-22T00:11:22'
e = None
try:
print(12345 * 1/0)
except ZeroDivisionError as exc:
e = exc
if not e:
self.fail('Expected for an exception to have been raised')
result = pretty_format_exception(e, cid, utcnow_func)
expected = f"""
路路路 Error 路路路
>>> ZeroDivisionError: 'division by zero'
>>> File "code/zato-common/test/zato/common/test_exception.py", line 39, in test_pretty_format_exception
>>> print(12345 * 1/0)
路路路 Details 路路路
Traceback (most recent call last):
File "code/zato-common/test/zato/common/test_exception.py", line 39, in test_pretty_format_exception
print(12345 * 1/0)
ZeroDivisionError: division by zero
路路路 Context 路路路
123456
2222-11-22T00:11:22
{zato_version}
""".strip()
self.assertEqual(result, expected)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 2,432
|
Python
|
.py
| 53
| 40.566038
| 130
| 0.443921
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,497
|
test_common.py
|
zatosource_zato/code/zato-common/test/zato/common/test_common.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from unittest import TestCase
# Nose
from nose.tools import eq_
# Zato
from zato.common.api import soapenv11_namespace, soapenv12_namespace, StatsElem
class StatsElemTestCase(TestCase):
def test_from_json(self):
item = {
'usage_perc_all_services': 1.22, 'all_services_time': 4360,
'time_perc_all_services': 17.64,
'mean_trend': '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,769,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'min_resp_time': 769.0, 'service_name': 'zato.stats.summary.create-summary-by-year',
'max_resp_time': 769.0, 'rate': 0.0, 'mean_all_services': '63',
'all_services_usage': 82, 'time': 769.0, 'usage': 1,
'usage_trend': '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0',
'mean': 12.61
}
stats_elem = StatsElem.from_json(item)
for k, v in item.items():
value = getattr(stats_elem, k)
eq_(v, value)
class TestSOAPNamespace(TestCase):
def test_soap_ns(self):
self.assertEqual(soapenv11_namespace, 'http://schemas.xmlsoap.org/soap/envelope/')
self.assertEqual(soapenv12_namespace, 'http://www.w3.org/2003/05/soap-envelope')
| 1,576
|
Python
|
.py
| 32
| 42.4375
| 152
| 0.617992
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,498
|
test_cloud_atlassian_confluence.py
|
zatosource_zato/code/zato-common/test/zato/common/test_cloud_atlassian_confluence.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2022, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from unittest import main, TestCase
# Zato
from zato.common.util.cloud.atlassian.confluence import PageProperties
# ################################################################################################################################
# ################################################################################################################################
class PagePropertiesTestCase(TestCase):
maxDiff = 1234567890
def test_create_page_properties(self):
param_name = 'My Parameter'
local_id = 'abc-my-local-id'
prop = PageProperties(param_name, local_id=local_id)
key1 = 'my.key.1'
key2 = 'my.key.2'
key3 = 'my.user'
value1 = 'my.value.1'
value2 = 'my.value.2'
value3 = prop.get_user_link(key3)
prop.append(key1, value1)
prop.append(key2, value2)
prop.append(key3, value3)
result = prop.get_result()
expected = """
<ac:structured-macro
ac:name="details"
ac:schema-version="1"
data-layout="default"
ac:local-id="structured-macro-local-id-abc-my-local-id"
ac:macro-id="macro-id-abc-my-local-id">
<ac:parameter ac:name="id">My Parameter</ac:parameter>
<ac:rich-text-body>
<table data-layout="default" ac:local-id="table-local-id-abc-my-local-id">
<colgroup>
<col style="width: 340.0px;" />
<col style="width: 340.0px;" />
</colgroup>
<tbody>
<tr>
<th>
<p>my.key.1</p>
</th>
<td>
<p>my.value.1</p>
</td>
</tr>
<tr>
<th>
<p>my.key.2</p>
</th>
<td>
<p>my.value.2</p>
</td>
</tr>
<tr>
<th>
<p>my.user</p>
</th>
<td>
<p><ac:link><ri:user ri:account-id="my.user" /></ac:link></p>
</td>
</tr>
</tbody>
</table>
</ac:rich-text-body>
</ac:structured-macro>
""".strip()
self.assertEqual(expected, result)
# ################################################################################################################################
# ################################################################################################################################
if __name__ == '__main__':
_ = main()
# ################################################################################################################################
# ################################################################################################################################
| 2,723
|
Python
|
.py
| 78
| 28.961538
| 130
| 0.398399
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|
10,499
|
test_kvdb.py
|
zatosource_zato/code/zato-common/test/zato/common/test_kvdb.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from unittest import TestCase
# Bunch
from bunch import Bunch
# Nose
from nose.tools import eq_
# Zato
from zato.common.kvdb.api import KVDB
from zato.common.test import rand_string, rand_int
# ##############################################################################
class KVDBTestCase(TestCase):
def test_parse_config(self):
class FakeSentinel:
def __init__(self, sentinels, password, socket_timeout):
self.sentinels = sentinels
self.password = password
self.socket_timeout = socket_timeout
self.master_for_called_with = None
def master_for(self, master_name):
self.master_for_called_with = master_name
return self
class FakeStrictRedis:
def __init__(self, **config):
self.config = config
class FakeKVDB(KVDB):
def _get_connection_class(self):
return FakeSentinel if self.has_sentinel else FakeStrictRedis
def decrypt_func(password):
return password
sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()
password = rand_string()
socket_timeout = rand_int()
redis_sentinels_master = rand_string()
redis_sentinels = ['{}:{}'.format(sentinel1_host, sentinel1_port), '{}:{}'.format(sentinel2_host, sentinel2_port)]
try:
config = {'use_redis_sentinels': True}
kvdb = KVDB(config=config)
kvdb.init()
except ValueError as e:
eq_(e.message, 'kvdb.redis_sentinels must be provided')
else:
self.fail('Expected a ValueError (kvdb.redis_sentinels)')
try:
config = {'use_redis_sentinels': True, 'redis_sentinels': redis_sentinels}
kvdb = KVDB(config=config)
kvdb.init()
except ValueError as e:
eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
else:
self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')
config = Bunch({
'use_redis_sentinels': True,
'redis_sentinels':redis_sentinels,
'redis_sentinels_master':redis_sentinels_master,
'password': password,
'socket_timeout':socket_timeout
})
kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
kvdb.init()
eq_(sorted(kvdb.conn.sentinels), [(sentinel1_host, sentinel1_port), (sentinel2_host, sentinel2_port)])
eq_(kvdb.conn.password, password)
eq_(kvdb.conn.socket_timeout, socket_timeout)
eq_(kvdb.conn.master_for_called_with, redis_sentinels_master)
config = {'use_redis_sentinels': False}
kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
kvdb.init()
self.assertTrue(isinstance(kvdb.conn, FakeStrictRedis))
| 3,231
|
Python
|
.py
| 74
| 34.297297
| 122
| 0.608168
|
zatosource/zato
| 1,096
| 239
| 0
|
AGPL-3.0
|
9/5/2024, 5:10:54 PM (Europe/Amsterdam)
|