code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'''Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.''' if isinstance(kv_arg, Mapping): return six.iterkeys(kv_arg), six.itervalues(kv_arg), len(kv_arg) assert 2 <= len(kv_arg) <= 3, \ 'Argument must be a mapping or a sequence (keys, values, [len])' return ( kv_arg[0], kv_arg[1], kv_arg[2] if len(kv_arg) == 3 else len(kv_arg[0]))
def _get_kvc(kv_arg)
Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.
3.956728
2.402522
1.646906
'''Creates a database info with the given information for use with :meth:`trace_sql_database_request`. :param str name: The name (e.g., connection string) of the database. :param str vendor: The type of the database (e.g., sqlite, PostgreSQL, MySQL). :param Channel channel: The channel used to communicate with the database. :returns: A new handle, holding the given database information. :rtype: DbInfoHandle ''' return DbInfoHandle(self._nsdk, self._nsdk.databaseinfo_create( name, vendor, channel.type_, channel.endpoint))
def create_database_info( self, name, vendor, channel)
Creates a database info with the given information for use with :meth:`trace_sql_database_request`. :param str name: The name (e.g., connection string) of the database. :param str vendor: The type of the database (e.g., sqlite, PostgreSQL, MySQL). :param Channel channel: The channel used to communicate with the database. :returns: A new handle, holding the given database information. :rtype: DbInfoHandle
6.384422
2.183966
2.923315
'''Creates a web application info for use with :meth:`trace_incoming_web_request`. See <https://www.dynatrace.com/support/help/server-side-services/introduction/how-does-dynatrace-detect-and-name-services/#web-request-services> for more information about the meaning of the parameters. :param str virtual_host: The logical name of the web server that hosts the application. :param str application_id: A unique ID for the web application. This will also be used as the display name. :param str context_root: The context root of the web application. This is the common path prefix for requests which will be routed to the web application. If all requests to this server are routed to this application, use a slash :code:`'/'`. :rtype: WebapplicationInfoHandle ''' return WebapplicationInfoHandle( self._nsdk, self._nsdk.webapplicationinfo_create( virtual_host, application_id, context_root))
def create_web_application_info( self, virtual_host, application_id, context_root)
Creates a web application info for use with :meth:`trace_incoming_web_request`. See <https://www.dynatrace.com/support/help/server-side-services/introduction/how-does-dynatrace-detect-and-name-services/#web-request-services> for more information about the meaning of the parameters. :param str virtual_host: The logical name of the web server that hosts the application. :param str application_id: A unique ID for the web application. This will also be used as the display name. :param str context_root: The context root of the web application. This is the common path prefix for requests which will be routed to the web application. If all requests to this server are routed to this application, use a slash :code:`'/'`. :rtype: WebapplicationInfoHandle
5.401742
1.428613
3.781109
'''Create a tracer for the given database info and SQL statement. :param DbInfoHandle database: Database information (see :meth:`create_database_info`). :param str sql: The SQL statement to trace. :rtype: tracers.DatabaseRequestTracer ''' assert isinstance(database, DbInfoHandle) return tracers.DatabaseRequestTracer( self._nsdk, self._nsdk.databaserequesttracer_create_sql(database.handle, sql))
def trace_sql_database_request(self, database, sql)
Create a tracer for the given database info and SQL statement. :param DbInfoHandle database: Database information (see :meth:`create_database_info`). :param str sql: The SQL statement to trace. :rtype: tracers.DatabaseRequestTracer
5.793949
2.441959
2.372664
'''Create a tracer for an outgoing webrequest. :param str url: The request URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :rtype: tracers.OutgoingWebRequestTracer .. versionadded:: 1.1.0 ''' result = tracers.OutgoingWebRequestTracer( self._nsdk, self._nsdk.outgoingwebrequesttracer_create(url, method)) if not result: return result try: if headers: self._nsdk.outgoingwebrequesttracer_add_request_headers(result.handle, *_get_kvc(headers)) except: result.end() raise return result
def trace_outgoing_web_request(self, url, method, headers=None)
Create a tracer for an outgoing webrequest. :param str url: The request URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :rtype: tracers.OutgoingWebRequestTracer .. versionadded:: 1.1.0
5.743077
1.522
3.773376
'''Creates a tracer for outgoing remote calls. :param str method: The name of the service method/operation. :param str service: The name of the service class/type. :param str endpoint: A string identifying the "instance" of the the service. See also `the general documentation on service endpoints`__. :param Channel channel: The channel used to communicate with the service. :param str protocol_name: The name of the remoting protocol (on top of the communication protocol specified in :code:`channel.type_`.) that is used to to communicate with the service (e.g., RMI, Protobuf, ...). __ \ https://github.com/Dynatrace/OneAgent-SDK#common-concepts-service-endpoints-and-communication-endpoints :rtype: tracers.OutgoingRemoteCallTracer ''' result = tracers.OutgoingRemoteCallTracer( self._nsdk, self._nsdk.outgoingremotecalltracer_create( method, service, endpoint, channel.type_, channel.endpoint)) if protocol_name is not None: self._nsdk.outgoingremotecalltracer_set_protocol_name( result.handle, protocol_name) return result
def trace_outgoing_remote_call( self, method, service, endpoint, channel, protocol_name=None)
Creates a tracer for outgoing remote calls. :param str method: The name of the service method/operation. :param str service: The name of the service class/type. :param str endpoint: A string identifying the "instance" of the the service. See also `the general documentation on service endpoints`__. :param Channel channel: The channel used to communicate with the service. :param str protocol_name: The name of the remoting protocol (on top of the communication protocol specified in :code:`channel.type_`.) that is used to to communicate with the service (e.g., RMI, Protobuf, ...). __ \ https://github.com/Dynatrace/OneAgent-SDK#common-concepts-service-endpoints-and-communication-endpoints :rtype: tracers.OutgoingRemoteCallTracer
5.099754
1.493452
3.414742
'''Creates a tracer for incoming remote calls. For the parameters, see :ref:`tagging` (:code:`str_tag` and :code:`byte_tag`) and :meth:`trace_outgoing_remote_call` (all others). :rtype: tracers.IncomingRemoteCallTracer ''' result = tracers.IncomingRemoteCallTracer( self._nsdk, self._nsdk.incomingremotecalltracer_create(method, name, endpoint)) if protocol_name is not None: self._nsdk.incomingremotecalltracer_set_protocol_name( result.handle, protocol_name) self._applytag(result, str_tag, byte_tag) return result
def trace_incoming_remote_call( self, method, name, endpoint, protocol_name=None, str_tag=None, byte_tag=None)
Creates a tracer for incoming remote calls. For the parameters, see :ref:`tagging` (:code:`str_tag` and :code:`byte_tag`) and :meth:`trace_outgoing_remote_call` (all others). :rtype: tracers.IncomingRemoteCallTracer
3.956813
2.168844
1.824388
'''Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0 ''' return tracers.InProcessLinkTracer(self._nsdk, self._nsdk.trace_in_process_link(link_bytes))
def trace_in_process_link(self, link_bytes)
Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0
4.652267
1.850849
2.513585
'''Adds a custom request attribute to the current active tracer. :param str key: The name of the custom request attribute, the name is mandatory and may not be None. :param value: The value of the custom request attribute. Currently supported types are integer, float and string values. The value is mandatory and may not be None. :type value: str or int or float .. versionadded:: 1.1.0 ''' if isinstance(value, int): self._nsdk.customrequestattribute_add_integer(key, value) elif isinstance(value, float): self._nsdk.customrequestattribute_add_float(key, value) elif isinstance(value, six.string_types): self._nsdk.customrequestattribute_add_string(key, value) else: warn = self._nsdk.agent_get_logging_callback() if warn: warn('Can\'t add custom request attribute \'{0}\' ' 'because the value type \'{1}\' is not supported!'.format(key, type(value)))
def add_custom_request_attribute(self, key, value)
Adds a custom request attribute to the current active tracer. :param str key: The name of the custom request attribute, the name is mandatory and may not be None. :param value: The value of the custom request attribute. Currently supported types are integer, float and string values. The value is mandatory and may not be None. :type value: str or int or float .. versionadded:: 1.1.0
3.418142
2.034614
1.679996
'''Closes the handle, if it is still open. Usually, you should prefer using the handle as a context manager to calling :meth:`close` manually.''' if self.handle is not None: self.close_handle(self.nsdk, self.handle) self.handle = None
def close(self)
Closes the handle, if it is still open. Usually, you should prefer using the handle as a context manager to calling :meth:`close` manually.
7.153658
2.885468
2.479202
'''Yields all (direct and indirect) children with LINK_CHILD.''' return chain.from_iterable( c.all_nodes_in_subtree() for lnk, c in self.children if lnk == self.LINK_CHILD)
def all_original_children(self)
Yields all (direct and indirect) children with LINK_CHILD.
9.342512
5.195504
1.798192
'''Creates a SDK option list for use with the :code:`sdkopts` parameter of :func:`.initialize` from a list :code:`argv` of command line parameters. An element in :code:`argv` is treated as an SDK option if starts with :code:`prefix`. The return value of this function will then contain the remainder of that parameter (without the prefix). If :code:`remove` is :data:`True`, these arguments will be removed from :code:`argv`. :param argv: An iterable of command line parameter strings. Defaults to :data:`sys.argv`. Must be a :obj:`~typing.MutableSequence` if :code:`remove` is :data:`True`. :type argv: ~typing.Iterable[str] or ~typing.MutableSequence[str] :param bool remove: Whether to remove a command line parameter that was recognized as an SDK option from :code:`argv` (if :data:`True`) or leave :code:`argv` unmodified (if :data:`False`). If :data:`True`, :code:`argv` must be a :obj:`~typing.MutableSequence`. :param str prefix: The prefix string by which SDK options are recognized and which is removed from the copy of the command line parameter that is added to the return value. :rtype: list[str] ''' if argv is None: argv = sys.argv if not remove: return [param[len(prefix):] for param in argv if param.startswith(prefix)] result = [] for i in range(len(argv) - 1, -1, -1): if argv[i].startswith(prefix): result.append(argv[i][len(prefix):]) del argv[i] result.reverse() return result
def sdkopts_from_commandline(argv=None, remove=False, prefix='--dt_')
Creates a SDK option list for use with the :code:`sdkopts` parameter of :func:`.initialize` from a list :code:`argv` of command line parameters. An element in :code:`argv` is treated as an SDK option if starts with :code:`prefix`. The return value of this function will then contain the remainder of that parameter (without the prefix). If :code:`remove` is :data:`True`, these arguments will be removed from :code:`argv`. :param argv: An iterable of command line parameter strings. Defaults to :data:`sys.argv`. Must be a :obj:`~typing.MutableSequence` if :code:`remove` is :data:`True`. :type argv: ~typing.Iterable[str] or ~typing.MutableSequence[str] :param bool remove: Whether to remove a command line parameter that was recognized as an SDK option from :code:`argv` (if :data:`True`) or leave :code:`argv` unmodified (if :data:`False`). If :data:`True`, :code:`argv` must be a :obj:`~typing.MutableSequence`. :param str prefix: The prefix string by which SDK options are recognized and which is removed from the copy of the command line parameter that is added to the return value. :rtype: list[str]
3.243554
1.239798
2.616196
'''Attempts to initialize the SDK with the specified options. Even if initialization fails, a dummy SDK will be available so that SDK functions can be called but will do nothing. If you call this function multiple times, you must call :func:`shutdown` just as many times. The options from all but the first :code:`initialize` call will be ignored (the return value will have the :data:`InitResult.STATUS_ALREADY_INITIALIZED` status code in that case). :param sdkopts: A sequence of strings of the form :samp:`{NAME}={VALUE}` that set the given SDK options. Igored in all but the first :code:`initialize` call. :type sdkopts: ~typing.Iterable[str] :param str sdklibname: The file or directory name of the native C SDK DLL. If None, the shared library packaged directly with the agent is used. Using a value other than None is only acceptable for debugging. You are responsible for providing a native SDK version that matches the Python SDK version. :rtype: .InitResult ''' global _sdk_ref_count #pylint:disable=global-statement global _sdk_instance #pylint:disable=global-statement with _sdk_ref_lk: logger.debug("initialize: ref count = %d", _sdk_ref_count) result = _try_init_noref(sdkopts, sdklibname) if _sdk_instance is None: _sdk_instance = SDK(try_get_sdk()) _sdk_ref_count += 1 return result
def initialize(sdkopts=(), sdklibname=None)
Attempts to initialize the SDK with the specified options. Even if initialization fails, a dummy SDK will be available so that SDK functions can be called but will do nothing. If you call this function multiple times, you must call :func:`shutdown` just as many times. The options from all but the first :code:`initialize` call will be ignored (the return value will have the :data:`InitResult.STATUS_ALREADY_INITIALIZED` status code in that case). :param sdkopts: A sequence of strings of the form :samp:`{NAME}={VALUE}` that set the given SDK options. Igored in all but the first :code:`initialize` call. :type sdkopts: ~typing.Iterable[str] :param str sdklibname: The file or directory name of the native C SDK DLL. If None, the shared library packaged directly with the agent is used. Using a value other than None is only acceptable for debugging. You are responsible for providing a native SDK version that matches the Python SDK version. :rtype: .InitResult
6.63466
1.719622
3.858208
'''Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception ''' global _sdk_ref_count #pylint:disable=global-statement global _sdk_instance #pylint:disable=global-statement global _should_shutdown #pylint:disable=global-statement with _sdk_ref_lk: logger.debug("shutdown: ref count = %d, should_shutdown = %s", \ _sdk_ref_count, _should_shutdown) nsdk = nativeagent.try_get_sdk() if not nsdk: logger.warning('shutdown: SDK not initialized or already shut down') _sdk_ref_count = 0 return None if _sdk_ref_count > 1: logger.debug('shutdown: reference count is now %d', _sdk_ref_count) _sdk_ref_count -= 1 return None logger.info('shutdown: Shutting down SDK.') try: if _should_shutdown: _rc = nsdk.shutdown() if _rc == ErrorCode.NOT_INITIALIZED: logger.warning('shutdown: native SDK was not initialized') else: nativeagent.checkresult(nsdk, _rc, 'shutdown') _should_shutdown = False except SDKError as e: logger.warning('shutdown failed', exc_info=sys.exc_info()) return e _sdk_ref_count = 0 _sdk_instance = None nativeagent._force_initialize(None) #pylint:disable=protected-access logger.debug('shutdown: completed') return None
def shutdown()
Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception
3.579187
3.210112
1.114972
if not tracer_h: return if e_ty is None and e_val is None: e_ty, e_val = sys.exc_info()[:2] if e_ty is None and e_val is not None: e_ty = type(e_val) nsdk.tracer_error(tracer_h, getfullname(e_ty), str(e_val))
def error_from_exc(nsdk, tracer_h, e_val=None, e_ty=None)
Attach appropriate error information to tracer_h. If e_val and e_ty are None, the current exception is used.
2.548128
2.610462
0.976122
initial = {} if "request" in context: initial.update({ "referrer": context["request"].META.get("HTTP_REFERER", ""), "campaign": context["request"].GET.get("wlc", "") }) return WaitingListEntryForm(initial=initial)
def waitinglist_entry_form(context)
Get a (new) form object to post a new comment. Syntax:: {% waitinglist_entry_form as [varname] %}
3.744678
4.860417
0.770444
host_and_port = request.urlparts[1] try: host, _ = host_and_port.split(':') except ValueError: # No port yet. Host defaults to '127.0.0.1' in bottle.request. return DEFAULT_BIND return host or DEFAULT_BIND
def _host()
Get the Host from the most recent HTTP request.
6.470524
6.022722
1.074352
logger.debug("wait for {port_num}".format(**locals())) t_start = time.time() sleeps = 0.1 while time.time() - t_start < timeout: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((_host(), port_num)) return True except (IOError, socket.error): time.sleep(sleeps) finally: s.close() return False
def wait_for(port_num, timeout)
waits while process starts. Args: port_num - port number timeout - specify how long, in seconds, a command can take before times out. return True if process started, return False if not
2.545629
2.57487
0.988643
log_file = os.path.join(dbpath, 'mongod.log') cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend", "--repair"] proc = subprocess.Popen( cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) timeout = 45 t_start = time.time() while time.time() - t_start < timeout: line = str(proc.stdout.readline()) logger.info("repair output: %s" % (line,)) return_code = proc.poll() if return_code is not None: if return_code: raise Exception("mongod --repair failed with exit code %s, " "check log file: %s" % (return_code, log_file)) # Success when poll() returns 0 return time.sleep(1) proc.terminate() raise Exception("mongod --repair failed to exit after %s seconds, " "check log file: %s" % (timeout, log_file))
def repair_mongo(name, dbpath)
repair mongodb after usafe shutdown
2.625686
2.586561
1.015126
logger.debug( "mprocess(name={name!r}, config_path={config_path!r}, port={port!r}, " "timeout={timeout!r})".format(**locals())) if not (config_path and isinstance(config_path, str) and os.path.exists(config_path)): raise OSError("can't find config file {config_path}".format(**locals())) cfg = read_config(config_path) cmd = [name, "--config", config_path] if cfg.get('port', None) is None or port: port = port or PortPool().port(check=True) cmd.extend(['--port', str(port)]) host = "{host}:{port}".format(host=_host(), port=port) try: logger.debug("execute process: %s", ' '.join(cmd)) proc = subprocess.Popen( cmd, stdout=DEVNULL if silence_stdout else None, stderr=subprocess.STDOUT) if proc.poll() is not None: logger.debug("process is not alive") raise OSError("Process started, but died immediately.") except (OSError, TypeError) as err: message = "exception while executing process: {err}".format(err=err) logger.debug(message) raise OSError(message) if timeout > 0 and wait_for(port, timeout): logger.debug("process '{name}' has started: pid={proc.pid}, host={host}".format(**locals())) return (proc, host) elif timeout > 0: logger.debug("hasn't connected to pid={proc.pid} with host={host} during timeout {timeout} ".format(**locals())) logger.debug("terminate process with pid={proc.pid}".format(**locals())) kill_mprocess(proc) proc_alive(proc) and time.sleep(3) # wait while process stoped message = ("Could not connect to process during " "{timeout} seconds".format(timeout=timeout)) raise TimeoutError(message, errno.ETIMEDOUT) return (proc, host)
def mprocess(name, config_path, port=None, timeout=180, silence_stdout=True)
start 'name' process with params from config_path. Args: name - process name or path config_path - path to file where should be stored configuration port - process's port timeout - specify how long, in seconds, a command can take before times out. if timeout <=0 - doesn't wait for complete start process silence_stdout - if True (default), redirect stdout to /dev/null return tuple (Popen object, host) if process started, return (None, None) if not
3.442606
3.312762
1.039195
if PY3: try: return process.wait(timeout=timeout) except subprocess.TimeoutExpired as exc: raise TimeoutError(str(exc)) # On Python 2, simulate the timeout parameter and raise TimeoutError. start = time.time() while True: exit_code = process.poll() if exit_code is not None: return exit_code if time.time() - start > timeout: raise TimeoutError("Process %s timed out after %s seconds" % (process.pid, timeout)) time.sleep(0.05)
def wait_mprocess(process, timeout)
Compatibility function for waiting on a process with a timeout. Raises TimeoutError when the timeout is reached.
2.428396
2.513182
0.966263
if process and proc_alive(process): process.terminate() process.communicate() return not proc_alive(process)
def kill_mprocess(process)
kill process Args: process - Popen object for process
4.514599
6.036248
0.747915
for key in ('keyFile', 'logPath', 'dbpath'): remove_path(cfg.get(key, None)) isinstance(config_path, str) and os.path.exists(config_path) and remove_path(config_path)
def cleanup_mprocess(config_path, cfg)
remove all process's stuff Args: config_path - process's options file cfg - process's config
5.008581
5.909231
0.847586
if path is None or not os.path.exists(path): return if platform.system() == 'Windows': # Need to have write permission before deleting the file. os.chmod(path, stat.S_IWRITE) try: if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path): shutil.os.remove(path) except OSError: logger.exception("Could not remove path: %s" % path)
def remove_path(path)
remove path from file system If path is None - do nothing
2.462608
2.383775
1.03307
if config_path is None: config_path = tempfile.mktemp(prefix="mongo-") cfg = params.copy() if 'setParameter' in cfg: set_parameters = cfg.pop('setParameter') try: for key, value in set_parameters.items(): cfg['setParameter = ' + key] = value except AttributeError: reraise(RequestError, 'Not a valid value for setParameter: %r ' 'Expected "setParameter": {<param name> : value, ...}' % set_parameters) # fix boolean value for key, value in cfg.items(): if isinstance(value, bool): cfg[key] = json.dumps(value) with open(config_path, 'w') as fd: data = '\n'.join('%s=%s' % (key, item) for key, item in cfg.items()) fd.write(data) return config_path
def write_config(params, config_path=None)
write mongo*'s config file Args: params - options wich file contains config_path - path to the config_file, will create if None Return config_path where config_path - path to mongo*'s options file
3.629696
3.618781
1.003016
result = {} with open(config_path, 'r') as fd: for line in fd.readlines(): if '=' in line: key, value = line.split('=', 1) try: result[key] = json.loads(value) except ValueError: result[key] = value.rstrip('\n') return result
def read_config(config_path)
read config_path and return options as dictionary
2.017526
1.93965
1.040149
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((_host(), port)) return True except socket.error: return False finally: s.close()
def __check_port(self, port)
check port status return True if port is free, False else
2.669137
2.624048
1.017183
if port in self.__closed: self.__closed.remove(port) self.__ports.add(port)
def release_port(self, port)
release port
5.08552
4.91672
1.034332
if not self.__ports: # refresh ports if sequence is empty self.refresh() try: port = self.__ports.pop() if check: while not self.__check_port(port): self.release_port(port) port = self.__ports.pop() except (IndexError, KeyError): raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed)) self.__closed.add(port) return port
def port(self, check=False)
return next opened port Args: check - check is port realy free
4.491643
4.337358
1.035571
if only_closed: opened = filter(self.__check_port, self.__closed) self.__closed = self.__closed.difference(opened) self.__ports = self.__ports.union(opened) else: ports = self.__closed.union(self.__ports) self.__ports = set(filter(self.__check_port, ports)) self.__closed = ports.difference(self.__ports)
def refresh(self, only_closed=False)
refresh ports status Args: only_closed - check status only for closed ports
3.000093
2.723241
1.101663
self.__init_range(min_port, max_port, port_sequence)
def change_range(self, min_port=1025, max_port=2000, port_sequence=None)
change Pool port range
5.126773
4.748275
1.079713
prefix = '/' + version if version else "" for r in routes: path, method = r route(prefix + path, method, routes[r])
def setup_versioned_routes(routes, version=None)
Set up routes with a version prefix.
6.117077
5.260535
1.162824
logger.info('daemonize_posix') try: pid = os.fork() if pid > 0: logger.debug('forked first child, pid = %d' % (pid,)) return pid logger.debug('in child after first fork, pid = %d' % (pid, )) except OSError as error: logger.exception('fork #1') sys.stderr.write("fork #1 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent logger.debug('forked second child, pid = %d, exiting' % (pid,)) sys.exit(0) except OSError as error: logger.exception('fork #2') sys.stderr.write("fork #2 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # redirect standard file descriptors logger.info('daemonized, pid = %d' % (pid, )) sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() os.dup2(self.stdin.fileno(), sys.stdin.fileno()) os.dup2(self.stdout.fileno(), sys.stdout.fileno()) os.dup2(self.stderr.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile, 'w+') as fd: fd.write("%s\n" % pid)
def daemonize_posix(self)
do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
1.579396
1.565337
1.008982
# Check for a pidfile to see if the daemon already runs logger.info('Starting daemon') try: with open(self.pidfile, 'r') as fd: pid = int(fd.read().strip()) except IOError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon pid = self.daemonize() if pid: return pid self.run()
def start(self)
Start the daemon
2.225248
2.108263
1.055489
# Get the pid from the pidfile logger.debug("reading %s" % (self.pidfile,)) try: with open(self.pidfile, 'r') as fd: pid = int(fd.read().strip()) except IOError: logger.exception("reading %s" % (self.pidfile, )) pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart if os.name == "nt": subprocess.call(["taskkill", "/f", "/t", "/pid", str(pid)]) if os.path.exists(self.pidfile): os.remove(self.pidfile) else: # Try killing the daemon process try: os.kill(pid, SIGTERM) except OSError as err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: raise
def stop(self)
Stop the daemon
1.87355
1.806767
1.036962
for item in self.server_map: self.member_del(item, reconfig=False) self.server_map.clear()
def cleanup(self)
remove all members without reconfig
9.852913
5.495946
1.79276
for key, value in self.server_map.items(): if value == hostname: return key
def host2id(self, hostname)
return member id by hostname
4.337164
3.605918
1.202791
self.server_map = dict([(member['_id'], member['host']) for member in config['members']])
def update_server_map(self, config)
update server_map ({member_id:hostname})
7.722077
4.245595
1.818845
self.update_server_map(config) # init_server - server which can init replica set init_server = [member['host'] for member in config['members'] if not (member.get('arbiterOnly', False) or member.get('priority', 1) == 0)][0] servers = [member['host'] for member in config['members']] if not self.wait_while_reachable(servers): logger.error("all servers must be reachable") self.cleanup() return False try: result = self.connection(init_server).admin.command("replSetInitiate", config) logger.debug("replica init result: {result}".format(**locals())) except pymongo.errors.PyMongoError: raise if int(result.get('ok', 0)) == 1: # Wait while members come up return self.waiting_member_state() else: self.cleanup() return False
def repl_init(self, config)
create replica set by config return True if replica set created successfuly, else False
4.560389
4.26102
1.070258
# Need to use self.server_map, in case no Servers are left running. for member_id in self.server_map: host = self.member_id_to_host(member_id) server_id = self._servers.host_to_server_id(host) # Reset each member. self._servers.command(server_id, 'reset') # Wait for all members to have a state of 1, 2, or 7. # Note that this also waits for a primary to become available. self.waiting_member_state() # Wait for Server states to match the config from the primary. self.waiting_config_state() return self.info()
def reset(self)
Ensure all members are running and available.
7.315045
6.417456
1.139867
cfg = config.copy() cfg['version'] += 1 try: result = self.run_command("replSetReconfig", cfg) if int(result.get('ok', 0)) != 1: return False except pymongo.errors.AutoReconnect: self.update_server_map(cfg) # use new server_map self.waiting_member_state() self.waiting_config_state() return self.connection() and True
def repl_update(self, config)
Reconfig Replicaset with new config
5.926288
5.277911
1.122847
hosts = ','.join(x['host'] for x in self.members()) mongodb_uri = 'mongodb://' + hosts + '/?replicaSet=' + self.repl_id result = {"id": self.repl_id, "auth_key": self.auth_key, "members": self.members(), "mongodb_uri": mongodb_uri, "orchestration": 'replica_sets'} if self.login: # Add replicaSet URI parameter. uri = ('%s&replicaSet=%s' % (self.mongodb_auth_uri(hosts), self.repl_id)) result['mongodb_auth_uri'] = uri return result
def info(self)
return information about replica set
5.211688
4.623315
1.127262
repl_config = self.config member_id = max([member['_id'] for member in repl_config['members']]) + 1 member_config = self.member_create(params, member_id) repl_config['members'].append(member_config) if not self.repl_update(repl_config): self.member_del(member_id, reconfig=True) raise ReplicaSetError("Could not add member to ReplicaSet.") return member_id
def repl_member_add(self, params)
create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False
3.812676
3.813761
0.999716
logger.debug("run_command({command}, {arg}, {is_eval}, {member_id})".format(**locals())) mode = is_eval and 'eval' or 'command' hostname = None if isinstance(member_id, int): hostname = self.member_id_to_host(member_id) result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg) logger.debug("command result: {result}".format(result=result)) return result
def run_command(self, command, arg=None, is_eval=False, member_id=None)
run command on replica set if member_id is specified command will be execute on this server if member_id is not specified command will be execute on the primary Args: command - command string arg - command argument is_eval - if True execute command as eval member_id - member id return command's result
3.209944
2.980452
1.076999
try: admin = self.connection().admin config = admin.command('replSetGetConfig')['config'] except pymongo.errors.OperationFailure: # replSetGetConfig was introduced in 2.7.5. config = self.connection().local.system.replset.find_one() return config
def config(self)
return replica set config, use rs.conf() command
4.127492
3.675673
1.122921
member_config = params.get('rsParams', {}) server_id = params.pop('server_id', None) version = params.pop('version', self._version) proc_params = {'replSet': self.repl_id} proc_params.update(params.get('procParams', {})) if self.enable_ipv6: enable_ipv6_single(proc_params) # Make sure that auth isn't set the first time we start the servers. proc_params = self._strip_auth(proc_params) # Don't pass in auth_key the first time we start the servers. server_id = self._servers.create( name='mongod', procParams=proc_params, sslParams=self.sslParams, version=version, server_id=server_id ) member_config.update({"_id": member_id, "host": self._servers.hostname(server_id)}) return member_config
def member_create(self, params, member_id)
start new mongod instances as part of replica set Args: params - member params member_id - member index return member config
5.096691
4.522647
1.126927
server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) if reconfig and member_id in [member['_id'] for member in self.members()]: config = self.config config['members'].pop(member_id) self.repl_update(config) self._servers.remove(server_id) return True
def member_del(self, member_id, reconfig=True)
remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False
4.409665
4.218329
1.045358
config = self.config config['members'][member_id].update(params.get("rsParams", {})) return self.repl_update(config)
def member_update(self, member_id, params)
update member's values with reconfig replica Args: member_id - member index params - updates member params return True if operation success otherwise False
11.370342
8.78833
1.2938
server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) server_info = self._servers.info(server_id) result = {'_id': member_id, 'server_id': server_id, 'mongodb_uri': server_info['mongodb_uri'], 'procInfo': server_info['procInfo'], 'statuses': server_info['statuses']} if self.login: result['mongodb_auth_uri'] = self.mongodb_auth_uri( self._servers.hostname(server_id)) result['rsInfo'] = {} if server_info['procInfo']['alive']: # Can't call serverStatus on arbiter when running with auth enabled. # (SERVER-5479) if self.login or self.auth_key: arbiter_ids = [member['_id'] for member in self.arbiters()] if member_id in arbiter_ids: result['rsInfo'] = { 'arbiterOnly': True, 'secondary': False, 'primary': False} return result repl = self.run_command('serverStatus', arg=None, is_eval=False, member_id=member_id)['repl'] logger.debug("member {member_id} repl info: {repl}".format(**locals())) for key in ('votes', 'tags', 'arbiterOnly', 'buildIndexes', 'hidden', 'priority', 'slaveDelay', 'votes', 'secondary'): if key in repl: result['rsInfo'][key] = repl[key] result['rsInfo']['primary'] = repl.get('ismaster', False) return result
def member_info(self, member_id)
return information about member
4.292181
4.271036
1.004951
server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) return self._servers.command(server_id, command)
def member_command(self, member_id, command)
apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False
4.828486
5.275161
0.915325
result = list() for member in self.run_command(command="replSetGetStatus", is_eval=False)['members']: result.append({ "_id": member['_id'], "host": member["name"], "server_id": self._servers.host_to_server_id(member["name"]), "state": member['state'] }) return result
def members(self)
return list of members information
5.035237
4.807412
1.04739
members = self.run_command(command='replSetGetStatus', is_eval=False)['members'] return [member['name'] for member in members if member['state'] == state]
def get_members_in_state(self, state)
return all members of replica set in specific state
6.36074
4.068819
1.563289
if self.login and not self.restart_required: try: db = client[self.auth_source] if self.x509_extra_user: db.authenticate( DEFAULT_SUBJECT, mechanism='MONGODB-X509' ) else: db.authenticate( self.login, self.password) except Exception: logger.exception( "Could not authenticate to %r as %s/%s" % (client, self.login, self.password)) raise
def _authenticate_client(self, client)
Authenticate the client if necessary.
5.313257
5.114601
1.038841
logger.debug("connection({hostname}, {read_preference}, {timeout})".format(**locals())) t_start = time.time() servers = hostname or ",".join(self.server_map.values()) while True: try: if hostname is None: c = pymongo.MongoReplicaSetClient( servers, replicaSet=self.repl_id, read_preference=read_preference, socketTimeoutMS=self.socket_timeout, w=self._write_concern, fsync=True, **self.kwargs) connected(c) if c.primary: self._authenticate_client(c) return c raise pymongo.errors.AutoReconnect("No replica set primary available") else: logger.debug("connection to the {servers}".format(**locals())) c = pymongo.MongoClient( servers, socketTimeoutMS=self.socket_timeout, w=self._write_concern, fsync=True, **self.kwargs) connected(c) self._authenticate_client(c) return c except (pymongo.errors.PyMongoError): exc_type, exc_value, exc_tb = sys.exc_info() err_message = traceback.format_exception(exc_type, exc_value, exc_tb) logger.error("Exception {exc_type} {exc_value}".format(**locals())) logger.error(err_message) if time.time() - t_start > timeout: raise pymongo.errors.AutoReconnect("Couldn't connect while timeout {timeout} second".format(**locals())) time.sleep(1)
def connection(self, hostname=None, read_preference=pymongo.ReadPreference.PRIMARY, timeout=300)
return MongoReplicaSetClient object if hostname specified return MongoClient object if hostname doesn't specified Args: hostname - connection uri read_preference - default PRIMARY timeout - specify how long, in seconds, a command can take before server times out.
2.685024
2.672918
1.004529
return [ { "_id": self.host2id(member), "host": member, "server_id": self._servers.host_to_server_id(member) } for member in self.get_members_in_state(2) ]
def secondaries(self)
return list of secondaries members
6.187929
4.94754
1.250708
return [ { "_id": self.host2id(member), "host": member, "server_id": self._servers.host_to_server_id(member) } for member in self.get_members_in_state(7) ]
def arbiters(self)
return list of arbiters
5.97486
5.513206
1.083736
members = [self.member_info(item["_id"]) for item in self.members()] result = [] for member in members: if member['rsInfo'].get('hidden'): server_id = member['server_id'] result.append({ '_id': member['_id'], 'host': self._servers.hostname(server_id), 'server_id': server_id}) return result
def hidden(self)
return list of hidden members
5.400218
4.725083
1.142883
servers = self.run_command('ismaster').get('passives', []) return [member for member in self.members() if member['host'] in servers]
def passives(self)
return list of passive servers
10.640398
8.314163
1.279792
t_start = time.time() while True: try: for server in servers: # TODO: use state code to check if server is reachable server_info = self.connection( hostname=server, timeout=5).admin.command('ismaster') logger.debug("server_info: {server_info}".format(server_info=server_info)) if int(server_info['ok']) != 1: raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals)) return True except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure): if time.time() - t_start > timeout: return False time.sleep(0.1)
def wait_while_reachable(self, servers, timeout=60)
wait while all servers be reachable Args: servers - list of servers
3.085078
3.072927
1.003954
t_start = time.time() while not self.check_member_state(): if time.time() - t_start > timeout: return False time.sleep(0.1) return True
def waiting_member_state(self, timeout=300)
Wait for all RS members to be in an acceptable state.
2.51876
2.502418
1.006531
t_start = time.time() while not self.check_config_state(): if time.time() - t_start > timeout: return False time.sleep(0.1) return True
def waiting_config_state(self, timeout=300)
waiting while real state equal config state Args: timeout - specify how long, in seconds, a command can take before server times out. return True if operation success otherwise False
2.49663
2.352852
1.061108
bad_states = (0, 3, 4, 5, 6, 9) try: rs_status = self.run_command('replSetGetStatus') bad_members = [member for member in rs_status['members'] if member['state'] in bad_states] if bad_members: return False except pymongo.errors.AutoReconnect: # catch 'No replica set primary available' Exception return False logger.debug("all members in correct state") return True
def check_member_state(self)
Verify that all RS members have an acceptable state.
4.544603
3.946075
1.151677
config = self.config self.update_server_map(config) for member in config['members']: cfg_member_info = self.default_params.copy() cfg_member_info.update(member) # Remove attributes we can't check. for attr in ('priority', 'votes', 'tags', 'buildIndexes'): cfg_member_info.pop(attr, None) cfg_member_info['host'] = cfg_member_info['host'].lower() real_member_info = self.default_params.copy() info = self.member_info(member["_id"]) real_member_info["_id"] = info['_id'] member_hostname = self._servers.hostname(info['server_id']) real_member_info["host"] = member_hostname.lower() real_member_info.update(info['rsInfo']) logger.debug("real_member_info({member_id}): {info}".format(member_id=member['_id'], info=info)) for key in cfg_member_info: if cfg_member_info[key] != real_member_info.get(key, None): logger.debug("{key}: {value1} ! = {value2}".format(key=key, value1=cfg_member_info[key], value2=real_member_info.get(key, None))) return False return True
def check_config_state(self)
Return True if real state equal config state otherwise False.
3.316527
3.167645
1.047001
for member_id in self.server_map: host = self.server_map[member_id] server_id = self._servers.host_to_server_id(host) server = self._servers._storage[server_id] server.restart(timeout, config_callback) self.waiting_member_state()
def restart(self, timeout=300, config_callback=None)
Restart each member of the replica set.
5.318552
4.492019
1.184
super(ReplicaSets, self).set_settings(releases, default_release) Servers().set_settings(releases, default_release)
def set_settings(self, releases=None, default_release=None)
set path to storage
6.188276
6.278191
0.985678
repl_id = rs_params.get('id', None) if repl_id is not None and repl_id in self: raise ReplicaSetError( "replica set with id={id} already exists".format(id=repl_id)) repl = ReplicaSet(rs_params) self[repl.repl_id] = repl return repl.repl_id
def create(self, rs_params)
create new replica set Args: rs_params - replica set configuration Return repl_id which can use to take the replica set
3.198953
2.79117
1.146097
repl = self[repl_id] primary = repl.primary() return repl.member_info(repl.host2id(primary))
def primary(self, repl_id)
find and return primary hostname Args: repl_id - replica set identity
10.573673
11.384813
0.928752
repl = self._storage.pop(repl_id) repl.cleanup() del(repl)
def remove(self, repl_id)
remove replica set with kill members Args: repl_id - replica set identity return True if operation success otherwise False
8.206385
12.041945
0.681483
rs = self._storage[rs_id] try: return getattr(rs, command)(*args) except AttributeError: raise ValueError("Cannot issue the command %r to ReplicaSet %s" % (command, rs_id))
def command(self, rs_id, command, *args)
Call a ReplicaSet method.
4.158048
3.542956
1.17361
repl = self[repl_id] result = repl.member_del(member_id) self[repl_id] = repl return result
def member_del(self, repl_id, member_id)
remove member from replica set (reconfig replica) Args: repl_id - replica set identity member_id - member index
3.512825
4.586907
0.765837
repl = self[repl_id] member_id = repl.repl_member_add(params) self[repl_id] = repl return member_id
def member_add(self, repl_id, params)
create instance and add it to existing replcia Args: repl_id - replica set identity params - member params return True if operation success otherwise False
4.2566
4.630814
0.919191
repl = self[repl_id] result = repl.member_command(member_id, command) self[repl_id] = repl return result
def member_command(self, repl_id, member_id, command)
apply command(start, stop, restart) to the member of replica set Args: repl_id - replica set identity member_id - member index command - command: start, stop, restart return True if operation success otherwise False
3.115514
4.241361
0.734555
repl = self[repl_id] result = repl.member_update(member_id, params) self[repl_id] = repl return result
def member_update(self, repl_id, member_id, params)
apply new params to replica set member Args: repl_id - replica set identity member_id - member index params - new member's params return True if operation success otherwise False
3.434345
4.00587
0.857328
if self.auth_key: key_file_path = os.path.join(orchestration_mkdtemp(), 'key') with open(key_file_path, 'w') as fd: fd.write(self.auth_key) os.chmod(key_file_path, stat.S_IRUSR) return key_file_path
def key_file(self)
Get the path to the key file containig our auth key, or None.
2.914724
2.58404
1.127972
params = proc_params.copy() params.pop("auth", None) params.pop("clusterAuthMode", None) return params
def _strip_auth(self, proc_params)
Remove options from parameters that cause auth to be enabled.
4.594841
3.608183
1.27345
parts = ['mongodb://'] if self.login: parts.append(self.login) if self.password: parts.append(':' + self.password) parts.append('@') parts.append(hosts + '/') if self.login: parts.append('?authSource=' + self.auth_source) if self.x509_extra_user: parts.append('&authMechanism=MONGODB-X509') return ''.join(parts)
def mongodb_auth_uri(self, hosts)
Get a connection string with all info necessary to authenticate.
2.472293
2.386461
1.035966
if self.x509_extra_user: # Build dict of kwargs to pass to add_user. auth_dict = { 'name': DEFAULT_SUBJECT, 'roles': self._user_roles(db.client) } db.add_user(**auth_dict) # Fix kwargs to MongoClient. self.kwargs['ssl_certfile'] = DEFAULT_CLIENT_CERT # Add secondary user given from request. secondary_login = { 'name': self.login, 'roles': self._user_roles(db.client) } if self.password: secondary_login['password'] = self.password if mongo_version >= (3, 7, 2): # Use SCRAM_SHA-1 so that pymongo < 3.7 can authenticate. secondary_login['mechanisms'] = ['SCRAM-SHA-1'] db.add_user(**secondary_login)
def _add_users(self, db, mongo_version)
Add given user, and extra x509 user if necessary.
5.328122
4.874329
1.093099
link = _BASE_LINKS[rel].copy() link['rel'] = 'self' if self_rel else rel return link
def base_link(rel, self_rel=False)
Helper for getting a link document under the API root, given a rel.
4.7829
4.182787
1.143472
links = [ base_link('get-releases'), base_link('service'), server_link('get-servers'), server_link('add-server'), replica_set_link('add-replica-set'), replica_set_link('get-replica-sets'), sharded_cluster_link('add-sharded-cluster'), sharded_cluster_link('get-sharded-clusters') ] for link in links: if link['rel'] == rel_to: link['rel'] = 'self' return links
def all_base_links(rel_to=None)
Get a list of all links to be included to base (/) API requests.
2.887203
2.840647
1.016389
servers_href = '/v1/servers' link = _SERVER_LINKS[rel].copy() link['href'] = link['href'].format(**locals()) link['rel'] = 'self' if self_rel else rel return link
def server_link(rel, server_id=None, self_rel=False)
Helper for getting a Server link document, given a rel.
4.391305
4.224217
1.039555
return [ server_link(rel, server_id, self_rel=(rel == rel_to)) for rel in ('delete-server', 'get-server-info', 'server-command') ]
def all_server_links(server_id, rel_to=None)
Get a list of all links to be included with Servers.
6.343564
6.634352
0.956169
repls_href = '/v1/replica_sets' link = _REPLICA_SET_LINKS[rel].copy() link['href'] = link['href'].format(**locals()) link['rel'] = 'self' if self_rel else rel return link
def replica_set_link(rel, repl_id=None, member_id=None, self_rel=False)
Helper for getting a ReplicaSet link document, given a rel.
4.571668
4.573666
0.999563
return [ replica_set_link(rel, rs_id, self_rel=(rel == rel_to)) for rel in ( 'get-replica-set-info', 'delete-replica-set', 'replica-set-command', 'get-replica-set-members', 'add-replica-set-member', 'get-replica-set-secondaries', 'get-replica-set-primary', 'get-replica-set-arbiters', 'get-replica-set-hidden-members', 'get-replica-set-passive-members', 'get-replica-set-servers' ) ]
def all_replica_set_links(rs_id, rel_to=None)
Get a list of all links to be included with replica sets.
3.096224
3.154579
0.981502
clusters_href = '/v1/sharded_clusters' link = _SHARDED_CLUSTER_LINKS[rel].copy() link['href'] = link['href'].format(**locals()) link['rel'] = 'self' if self_rel else rel return link
def sharded_cluster_link(rel, cluster_id=None, shard_id=None, router_id=None, self_rel=False)
Helper for getting a ShardedCluster link document, given a rel.
4.309642
4.064373
1.060346
return [ sharded_cluster_link(rel, cluster_id, shard_id, router_id, self_rel=(rel == rel_to)) for rel in ( 'get-sharded-clusters', 'get-sharded-cluster-info', 'sharded-cluster-command', 'delete-sharded-cluster', 'add-shard', 'get-shards', 'get-configsvrs', 'get-routers', 'add-router' ) ]
def all_sharded_cluster_links(cluster_id, shard_id=None, router_id=None, rel_to=None)
Get a list of all links to be included with ShardedClusters.
4.20873
4.199787
1.002129
ShardedClusters().cleanup() ReplicaSets().cleanup() Servers().cleanup() sys.exit(0)
def cleanup_storage(*args)
Clean up processes after SIGTERM or SIGINT is received.
19.477207
14.36276
1.356091
parser = argparse.ArgumentParser(description='mongo-orchestration server') parser.add_argument('-f', '--config', action='store', default=None, type=str, dest='config') parser.add_argument('-e', '--env', action='store', type=str, dest='env', default=None) parser.add_argument(action='store', type=str, dest='command', default='start', choices=('start', 'stop', 'restart')) parser.add_argument('--no-fork', action='store_true', dest='no_fork', default=False) parser.add_argument('-b', '--bind', action='store', dest='bind', type=str, default=DEFAULT_BIND) parser.add_argument('-p', '--port', action='store', dest='port', type=int, default=DEFAULT_PORT) parser.add_argument('--enable-majority-read-concern', action='store_true', default=False) parser.add_argument('-s', '--server', action='store', dest='server', type=str, default=DEFAULT_SERVER, choices=('cherrypy', 'wsgiref')) parser.add_argument('--version', action='version', version='Mongo Orchestration v' + __version__) parser.add_argument('--socket-timeout-ms', action='store', dest='socket_timeout', type=int, default=DEFAULT_SOCKET_TIMEOUT) parser.add_argument('--pidfile', action='store', type=str, dest='pidfile', default=PID_FILE) cli_args = parser.parse_args() if cli_args.env and not cli_args.config: print("Specified release '%s' without a config file" % cli_args.env) sys.exit(1) if cli_args.command == 'stop' or not cli_args.config: return cli_args try: # read config with open(cli_args.config, 'r') as fd: config = json.loads(fd.read(), object_pairs_hook=SON) if not 'releases' in config: print("No releases defined in %s" % cli_args.config) sys.exit(1) releases = config['releases'] if cli_args.env is not None and cli_args.env not in releases: print("Release '%s' is not defined in %s" % (cli_args.env, cli_args.config)) sys.exit(1) cli_args.releases = releases return cli_args except (IOError): print("config file not found") sys.exit(1) except (ValueError): print("config file is corrupted") sys.exit(1)
def read_env()
return command-line arguments
2.102037
2.06701
1.016946
from mongo_orchestration import set_releases, cleanup_storage set_releases(releases, default_release) signal.signal(signal.SIGTERM, cleanup_storage) signal.signal(signal.SIGINT, cleanup_storage)
def setup(releases, default_release)
setup storages
4.753806
4.226957
1.12464
from bottle import default_app default_app.push() for module in ("mongo_orchestration.apps.servers", "mongo_orchestration.apps.replica_sets", "mongo_orchestration.apps.sharded_clusters"): __import__(module) app = default_app.pop() return app
def get_app()
return bottle app that includes all sub-apps
4.708878
4.582235
1.027638
for i in range(CONNECT_ATTEMPTS): try: conn = socket.create_connection((host, port), CONNECT_TIMEOUT) conn.close() return True except (IOError, socket.error): time.sleep(1) return False
def await_connection(host, port)
Wait for the mongo-orchestration server to accept connections.
2.409432
2.488177
0.968352
if self.version >= (2, 4): params = config.get('setParameter', {}) # Set enableTestCommands by default but allow enableTestCommands:0. params.setdefault('enableTestCommands', 1) # Reduce transactionLifetimeLimitSeconds for faster driver testing. if self.version >= (4, 1) and not self.is_mongos: params.setdefault('transactionLifetimeLimitSeconds', 3) # Increase transaction lock timeout to reduce the chance that tests # fail with LockTimeout: "Unable to acquire lock {...} within 5ms". if self.version >= (4, 0) and not self.is_mongos: params.setdefault('maxTransactionLockRequestTimeoutMillis', 25) config['setParameter'] = params compressors = config.get('networkMessageCompressors') if compressors is None: if self.version >= (4, 1, 7): # SERVER-38168 added zstd support in 4.1.7. config['networkMessageCompressors'] = 'zstd,zlib,snappy,noop' elif self.version >= (3, 5, 9): # SERVER-27310 added zlib support in 3.5.9. config['networkMessageCompressors'] = 'zlib,snappy,noop' elif self.version >= (3, 4): config['networkMessageCompressors'] = 'snappy,noop'
def __init_config_params(self, config)
Conditionally enable options in the Server's config file.
4.607162
4.517455
1.019858
c = pymongo.MongoClient( self.hostname, fsync=True, socketTimeoutMS=self.socket_timeout, **self.kwargs) connected(c) if not self.is_mongos and self.login and not self.restart_required: db = c[self.auth_source] if self.x509_extra_user: auth_dict = { 'name': DEFAULT_SUBJECT, 'mechanism': 'MONGODB-X509'} else: auth_dict = {'name': self.login, 'password': self.password} try: db.authenticate(**auth_dict) except: logger.exception("Could not authenticate to %s with %r" % (self.hostname, auth_dict)) raise return c
def connection(self)
return authenticated connection
4.331326
4.116457
1.052198
if not self.__version: command = (self.name, '--version') logger.debug(command) stdout, _ = subprocess.Popen( command, stdout=subprocess.PIPE).communicate() version_output = str(stdout) match = re.search(self.version_patt, version_output) if match is None: raise ServersError( 'Could not determine version of %s from string: %s' % (self.name, version_output)) version_string = match.group('version') self.__version = tuple(map(int, version_string.split('.'))) return self.__version
def version(self)
Get the version of MongoDB that this Server runs as a tuple.
3.031488
2.753084
1.101125
mode = is_eval and 'eval' or 'command' if isinstance(arg, tuple): name, d = arg else: name, d = arg, {} result = getattr(self.connection.admin, mode)(command, name, **d) return result
def run_command(self, command, arg=None, is_eval=False)
run command on the server Args: command - command string arg - command argument is_eval - if True execute command as eval return command's result
5.241944
5.531235
0.947699
proc_info = {"name": self.name, "params": self.cfg, "alive": self.is_alive, "optfile": self.config_path} if self.is_alive: proc_info['pid'] = self.proc.pid logger.debug("proc_info: {proc_info}".format(**locals())) mongodb_uri = '' server_info = {} status_info = {} if self.hostname and self.cfg.get('port', None): try: c = self.connection server_info = c.server_info() logger.debug("server_info: {server_info}".format(**locals())) mongodb_uri = 'mongodb://' + self.hostname status_info = {"primary": c.is_primary, "mongos": c.is_mongos} logger.debug("status_info: {status_info}".format(**locals())) except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure): server_info = {} status_info = {} result = {"mongodb_uri": mongodb_uri, "statuses": status_info, "serverInfo": server_info, "procInfo": proc_info, "orchestration": 'servers'} if self.login: result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname) logger.debug("return {result}".format(result=result)) return result
def info(self)
return info about server as dict object
3.283996
3.186856
1.030482
if self.is_alive: return True try: dbpath = self.cfg.get('dbpath') if dbpath and self._is_locked: # repair if needed logger.info("Performing repair on locked dbpath %s", dbpath) process.repair_mongo(self.name, self.cfg['dbpath']) self.proc, self.hostname = process.mprocess( self.name, self.config_path, self.cfg.get('port', None), timeout, self.silence_stdout) self.pid = self.proc.pid logger.debug("pid={pid}, hostname={hostname}".format(pid=self.pid, hostname=self.hostname)) self.host = self.hostname.split(':')[0] self.port = int(self.hostname.split(':')[1]) # Wait for Server to respond to isMaster. # Only try 6 times, each ConnectionFailure is 30 seconds. max_attempts = 6 for i in range(max_attempts): try: self.run_command('isMaster') break except pymongo.errors.ConnectionFailure: logger.exception('isMaster command failed:') else: raise TimeoutError( "Server did not respond to 'isMaster' after %d attempts." % max_attempts) except (OSError, TimeoutError): logpath = self.cfg.get('logpath') if logpath: # Copy the server logs into the mongo-orchestration logs. logger.error( "Could not start Server. Please find server log below.\n" "=====================================================") with open(logpath) as lp: logger.error(lp.read()) else: logger.exception( 'Could not start Server, and no logpath was provided!') reraise(TimeoutError, 'Could not start Server. ' 'Please check server log located in ' + self.cfg.get('logpath', '<no logpath given>') + ' or the mongo-orchestration log in ' + LOG_FILE + ' for more details.') if self.restart_required: if self.login: # Add users to the appropriate database. self._add_users() self.stop() # Restart with keyfile and auth. if self.is_mongos: self.config_path, self.cfg = self.__init_mongos(self.cfg) else: # Add auth options to this Server's config file. self.config_path, self.cfg = self.__init_mongod( self.cfg, add_auth=True) self.restart_required = False self.start() return True
def start(self, timeout=300)
start server return True of False
4.368931
4.342755
1.006028
# Return early if this server has already exited. if not process.proc_alive(self.proc): return logger.info("Attempting to connect to %s", self.hostname) client = self.connection # Attempt the shutdown command twice, the first attempt might fail due # to an election. attempts = 2 for i in range(attempts): logger.info("Attempting to send shutdown command to %s", self.hostname) try: client.admin.command("shutdown", force=True) except ConnectionFailure: # A shutdown succeeds by closing the connection but a # connection error does not necessarily mean that the shutdown # has succeeded. pass # Wait for the server to exit otherwise rerun the shutdown command. try: return process.wait_mprocess(self.proc, 5) except TimeoutError as exc: logger.info("Timed out waiting on process: %s", exc) continue raise ServersError("Server %s failed to shutdown after %s attempts" % (self.hostname, attempts))
def shutdown(self)
Send shutdown command and wait for the process to exit.
5.655403
5.313157
1.064415
try: self.shutdown() except (PyMongoError, ServersError) as exc: logger.info("Killing %s with signal, shutdown command failed: %r", self.name, exc) return process.kill_mprocess(self.proc)
def stop(self)
stop server
12.36088
11.923445
1.036687
self.stop() if config_callback: self.cfg = config_callback(self.cfg.copy()) self.config_path = process.write_config(self.cfg) return self.start(timeout)
def restart(self, timeout=300, config_callback=None)
restart server: stop() and start() return status of start command
4.52153
4.062832
1.112901
name = os.path.split(name)[1] if server_id is None: server_id = str(uuid4()) if server_id in self: raise ServersError("Server with id %s already exists." % server_id) bin_path = self.bin_path(version) server = Server(os.path.join(bin_path, name), procParams, sslParams, auth_key, login, password, auth_source) if autostart: server.start(timeout) self[server_id] = server return server_id
def create(self, name, procParams, sslParams={}, auth_key=None, login=None, password=None, auth_source='admin', timeout=300, autostart=True, server_id=None, version=None)
create new server Args: name - process name or path procParams - dictionary with specific params for instance auth_key - authorization key login - username for the admin collection password - password timeout - specify how long, in seconds, a command can take before times out. autostart - (default: True), autostart instance Return server_id where server_id - id which can use to take the server from servers collection
2.493801
2.70569
0.921688