sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def publish_metric(self, metric_name, metric_value, epoch_seconds=None): '''Record a single hit on a given metric. Args: metric_name: The name of the metric to record with Carbon. metric_value: The value to record with Carbon. epoch_seconds: Optionally specify the time for the metric hit. Returns: None ''' if epoch_seconds is None: epoch_seconds = self._reactor.seconds() self._client_factory.publish_metric(metric_name, metric_value, int(epoch_seconds))
Record a single hit on a given metric. Args: metric_name: The name of the metric to record with Carbon. metric_value: The value to record with Carbon. epoch_seconds: Optionally specify the time for the metric hit. Returns: None
entailment
def register_repeating_metric(self, metric_name, frequency, getter): '''Record hits to a metric at a specified interval. Args: metric_name: The name of the metric to record with Carbon. frequency: The frequency with which to poll the getter and record the value with Carbon. getter: A function which takes no arguments and returns the value to record with Carbon. Returns: RepeatingMetricHandle instance. Call .stop() on it to stop recording the metric. ''' l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter) repeating_metric_handle = RepeatingMetricHandle(l, frequency) self._repeating_metric_handles.append(repeating_metric_handle) if self.running: repeating_metric_handle.start() return repeating_metric_handle
Record hits to a metric at a specified interval. Args: metric_name: The name of the metric to record with Carbon. frequency: The frequency with which to poll the getter and record the value with Carbon. getter: A function which takes no arguments and returns the value to record with Carbon. Returns: RepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.
entailment
def setup(console=False, port=None, menu=True): """Setup integration Registers Pyblish for Maya plug-ins and appends an item to the File-menu Arguments: console (bool): Display console with GUI port (int, optional): Port from which to start looking for an available port to connect with Pyblish QML, default provided by Pyblish Integration. """ if self._has_been_setup: teardown() register_plugins() register_host() if menu: add_to_filemenu() self._has_menu = True self._has_been_setup = True print("pyblish: Loaded successfully.")
Setup integration Registers Pyblish for Maya plug-ins and appends an item to the File-menu Arguments: console (bool): Display console with GUI port (int, optional): Port from which to start looking for an available port to connect with Pyblish QML, default provided by Pyblish Integration.
entailment
def show(): """Try showing the most desirable GUI This function cycles through the currently registered graphical user interfaces, if any, and presents it to the user. """ parent = None current = QtWidgets.QApplication.activeWindow() while current: parent = current current = parent.parent() window = (_discover_gui() or _show_no_gui)(parent) return window
Try showing the most desirable GUI This function cycles through the currently registered graphical user interfaces, if any, and presents it to the user.
entailment
def _nuke_set_zero_margins(widget_object): """Remove Nuke margins when docked UI .. _More info: https://gist.github.com/maty974/4739917 """ parentApp = QtWidgets.QApplication.allWidgets() parentWidgetList = [] for parent in parentApp: for child in parent.children(): if widget_object.__class__.__name__ == child.__class__.__name__: parentWidgetList.append( parent.parentWidget()) parentWidgetList.append( parent.parentWidget().parentWidget()) parentWidgetList.append( parent.parentWidget().parentWidget().parentWidget()) for sub in parentWidgetList: for tinychild in sub.children(): try: tinychild.setContentsMargins(0, 0, 0, 0) except Exception: pass
Remove Nuke margins when docked UI .. _More info: https://gist.github.com/maty974/4739917
entailment
def dock(window): """ Expecting a window to parent into a Nuke panel, that is dockable. """ # Deleting existing dock # There is a bug where existing docks are kept in-memory when closed via UI if self._dock: print("Deleting existing dock...") parent = self._dock dialog = None stacked_widget = None main_windows = [] # Getting dock parents while parent: if isinstance(parent, QtWidgets.QDialog): dialog = parent if isinstance(parent, QtWidgets.QStackedWidget): stacked_widget = parent if isinstance(parent, QtWidgets.QMainWindow): main_windows.append(parent) parent = parent.parent() dialog.deleteLater() if len(main_windows) > 1: # Then it's a floating window if stacked_widget.count() == 1: # Then it's empty and we can close it, # as is native Nuke UI behaviour main_windows[0].deleteLater() # Creating new dock pane = nuke.getPaneFor("Properties.1") widget_path = "pyblish_nuke.lib.pyblish_nuke_dockwidget" panel = nukescripts.panels.registerWidgetAsPanel(widget_path, window.windowTitle(), "pyblish_nuke.dock", True).addToPane(pane) panel_widget = panel.customKnob.getObject().widget panel_widget.layout().addWidget(window) _nuke_set_zero_margins(panel_widget) self._dock = panel_widget return self._dock
Expecting a window to parent into a Nuke panel, that is dockable.
entailment
def remove_index_from_handle(handle_with_index): ''' Returns index and handle separately, in a tuple. :handle_with_index: The handle string with an index (e.g. 500:prefix/suffix) :return: index and handle as a tuple. ''' split = handle_with_index.split(':') if len(split) == 2: split[0] = int(split[0]) return split elif len(split) == 1: return (None, handle_with_index) elif len(split) > 2: raise handleexceptions.HandleSyntaxError( msg='Too many colons', handle=handle_with_index, expected_syntax='index:prefix/suffix')
Returns index and handle separately, in a tuple. :handle_with_index: The handle string with an index (e.g. 500:prefix/suffix) :return: index and handle as a tuple.
entailment
def check_handle_syntax(string): ''' Checks the syntax of a handle without an index (are prefix and suffix there, are there too many slashes?). :string: The handle without index, as string prefix/suffix. :raise: :exc:`~b2handle.handleexceptions.handleexceptions.HandleSyntaxError` :return: True. If it's not ok, exceptions are raised. ''' expected = 'prefix/suffix' try: arr = string.split('/') except AttributeError: raise handleexceptions.HandleSyntaxError(msg='The provided handle is None', expected_syntax=expected) if len(arr) < 2: msg = 'No slash' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if len(arr[0]) == 0: msg = 'Empty prefix' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if len(arr[1]) == 0: msg = 'Empty suffix' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if ':' in string: check_handle_syntax_with_index(string, base_already_checked=True) return True
Checks the syntax of a handle without an index (are prefix and suffix there, are there too many slashes?). :string: The handle without index, as string prefix/suffix. :raise: :exc:`~b2handle.handleexceptions.handleexceptions.HandleSyntaxError` :return: True. If it's not ok, exceptions are raised.
entailment
def create_authentication_string(username, password): ''' Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string. ''' username_utf8 = username.encode('utf-8') userpw_utf8 = password.encode('utf-8') username_perc = quote(username_utf8) userpw_perc = quote(userpw_utf8) authinfostring = username_perc + ':' + userpw_perc authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8') return authinfostring_base64
Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string.
entailment
def make_request_log_message(**args): ''' Creates a string containing all relevant information about a request made to the Handle System, for logging purposes. :handle: The handle that the request is about. :url: The url the request is sent to. :headers: The headers sent along with the request. :verify: Boolean parameter passed to the requests module (https verification). :resp: The request's response. :op: The library operation during which the request was sent. :payload: Optional. The payload sent with the request. :return: A formatted string. ''' mandatory_args = ['op', 'handle', 'url', 'headers', 'verify', 'resp'] optional_args = ['payload'] util.check_presence_of_mandatory_args(args, mandatory_args) util.add_missing_optional_args_with_value_none(args, optional_args) space = '\n ' message = '' message += '\n'+args['op']+' '+args['handle'] message += space+'URL: '+args['url'] message += space+'HEADERS: '+str(args['headers']) message += space+'VERIFY: '+str(args['verify']) if 'payload' in args.keys(): message += space+'PAYLOAD:'+space+str(args['payload']) message += space+'RESPONSECODE: '+str(args['resp'].status_code) message += space+'RESPONSE:'+space+str(args['resp'].content) return message
Creates a string containing all relevant information about a request made to the Handle System, for logging purposes. :handle: The handle that the request is about. :url: The url the request is sent to. :headers: The headers sent along with the request. :verify: Boolean parameter passed to the requests module (https verification). :resp: The request's response. :op: The library operation during which the request was sent. :payload: Optional. The payload sent with the request. :return: A formatted string.
entailment
def request_xml(url, auth=None): ''' Returns an etree.XMLRoot object loaded from the url :param str url: URL for the resource to load as an XML ''' try: r = requests.get(url, auth=auth, verify=False) return r.text.encode('utf-8') except BaseException: logger.error("Skipping %s (error parsing the XML)" % url) return
Returns an etree.XMLRoot object loaded from the url :param str url: URL for the resource to load as an XML
entailment
def _get_catalog_url(self, url): ''' Returns the appropriate catalog URL by replacing html with xml in some cases :param str url: URL to the catalog ''' u = urlparse.urlsplit(url) name, ext = os.path.splitext(u.path) if ext == ".html": u = urlparse.urlsplit(url.replace(".html", ".xml")) url = u.geturl() return url
Returns the appropriate catalog URL by replacing html with xml in some cases :param str url: URL to the catalog
entailment
def _yield_leaves(self, url, tree): ''' Yields a URL corresponding to a leaf dataset for each dataset described by the catalog :param str url: URL for the current catalog :param lxml.etree.Eleemnt tree: Current XML Tree ''' for leaf in tree.findall('.//{%s}dataset[@urlPath]' % INV_NS): # Subset by the skips name = leaf.get("name") if any([x.match(name) for x in self.skip]): logger.info("Skipping dataset based on 'skips'. Name: %s" % name) continue # Subset by before and after date_tag = leaf.find('.//{%s}date[@type="modified"]' % INV_NS) if date_tag is not None: try: dt = parse(date_tag.text) except ValueError: logger.error("Skipping dataset.Wrong date string %s " % date_tag.text) continue else: dt = dt.replace(tzinfo=pytz.utc) if self.after and dt < self.after: continue if self.before and dt > self.before: continue # Subset by the Selects defined gid = leaf.get('ID') if self.select is not None: if gid is not None and any([x.match(gid) for x in self.select]): logger.debug("Processing %s" % gid) yield "%s?dataset=%s" % (url, gid) else: logger.info("Ignoring dataset based on 'selects'. ID: %s" % gid) continue else: logger.debug("Processing %s" % gid) yield "%s?dataset=%s" % (url, gid)
Yields a URL corresponding to a leaf dataset for each dataset described by the catalog :param str url: URL for the current catalog :param lxml.etree.Eleemnt tree: Current XML Tree
entailment
def _compile_references(self, url, tree): ''' Returns a list of catalog reference URLs for the current catalog :param str url: URL for the current catalog :param lxml.etree.Eleemnt tree: Current XML Tree ''' references = [] for ref in tree.findall('.//{%s}catalogRef' % INV_NS): # Check skips title = ref.get("{%s}title" % XLINK_NS) if any([x.match(title) for x in self.skip]): logger.info("Skipping catalogRef based on 'skips'. Title: %s" % title) continue references.append(construct_url(url, ref.get("{%s}href" % XLINK_NS))) return references
Returns a list of catalog reference URLs for the current catalog :param str url: URL for the current catalog :param lxml.etree.Eleemnt tree: Current XML Tree
entailment
def _run(self, url, auth): ''' Performs a multiprocess depth-first-search of the catalog references and yields a URL for each leaf dataset found :param str url: URL for the current catalog :param requests.auth.AuthBase auth: requets auth object to use ''' if url in self.visited: logger.debug("Skipping %s (already crawled)" % url) return self.visited.append(url) logger.info("Crawling: %s" % url) url = self._get_catalog_url(url) # Get an etree object xml_content = request_xml(url, auth) for ds in self._build_catalog(url, xml_content): yield ds
Performs a multiprocess depth-first-search of the catalog references and yields a URL for each leaf dataset found :param str url: URL for the current catalog :param requests.auth.AuthBase auth: requets auth object to use
entailment
def _build_catalog(self, url, xml_content): ''' Recursive function to perform the DFS and yield the leaf datasets :param str url: URL for the current catalog :param str xml_content: XML Body returned from HTTP Request ''' try: tree = etree.XML(xml_content) except BaseException: return # Get a list of URLs references = self._compile_references(url, tree) # Using multiple processes, make HTTP requests for each child catalog jobs = [self.pool.apply_async(request_xml, args=(ref,)) for ref in references] responses = [j.get() for j in jobs] # This is essentially the graph traversal step for i, response in enumerate(responses): for ds in self._build_catalog(references[i], response): yield ds # Yield the leaves for ds in self._yield_leaves(url, tree): yield ds
Recursive function to perform the DFS and yield the leaf datasets :param str url: URL for the current catalog :param str xml_content: XML Body returned from HTTP Request
entailment
def find_module(fdr, fqname, path = None): '''Find a loader for module or package `fqname`. This method will be called with the fully qualified name of the module. If the finder is installed on `sys.meta_path`, it will receive a second argument, which is `None` for a top-level module, or `package.__path__` for submodules or subpackages [5]. It should return a loader object if the module was found, or `None` if it wasn't. If `find_module()` raises an exception, it will be propagated to the caller, aborting the import. [5] The path argument to `finder.find_module()` is there because the `pkg.__path__` variable may be needed at this point. It may either come from the actual parent module or be supplied by `imp.find_module()` or the proposed `imp.get_loader()` function. ''' if fqname in fdr.aliases: return Loader(fqname, fdr.aliases[fqname]) return None
Find a loader for module or package `fqname`. This method will be called with the fully qualified name of the module. If the finder is installed on `sys.meta_path`, it will receive a second argument, which is `None` for a top-level module, or `package.__path__` for submodules or subpackages [5]. It should return a loader object if the module was found, or `None` if it wasn't. If `find_module()` raises an exception, it will be propagated to the caller, aborting the import. [5] The path argument to `finder.find_module()` is there because the `pkg.__path__` variable may be needed at this point. It may either come from the actual parent module or be supplied by `imp.find_module()` or the proposed `imp.get_loader()` function.
entailment
def load_module(ldr, fqname): '''Load `fqname` from under `ldr.fspath`. The `fqname` argument is the fully qualified module name, eg. "spam.eggs.ham". As explained above, when :: finder.find_module("spam.eggs.ham") is called, "spam.eggs" has already been imported and added to `sys.modules`. However, the `find_module()` method isn't necessarily always called during an actual import: meta tools that analyze import dependencies (such as freeze, Installer or py2exe) don't actually load modules, so a finder shouldn't depend on the parent package being available in `sys.modules`. The `load_module()` method has a few responsibilities that it must fulfill before it runs any code: * If there is an existing module object named 'fullname' in `sys.modules`, the loader must use that existing module. (Otherwise, the `reload()` builtin will not work correctly.) If a module named 'fullname' does not exist in `sys.modules`, the loader must create a new module object and add it to `sys.modules`. Note that the module object must be in `sys.modules` before the loader executes the module code. This is crucial because the module code may (directly or indirectly) import itself; adding it to `sys.modules` beforehand prevents unbounded recursion in the worst case and multiple loading in the best. If the load fails, the loader needs to remove any module it may have inserted into `sys.modules`. If the module was already in `sys.modules` then the loader should leave it alone. * The `__file__` attribute must be set. This must be a string, but it may be a dummy value, for example "<frozen>". The privilege of not having a `__file__` attribute at all is reserved for built-in modules. * The `__name__` attribute must be set. If one uses `imp.new_module()` then the attribute is set automatically. * If it's a package, the __path__ variable must be set. This must be a list, but may be empty if `__path__` has no further significance to the importer (more on this later). * The `__loader__` attribute must be set to the loader object. This is mostly for introspection and reloading, but can be used for importer-specific extras, for example getting data associated with an importer. The `__package__` attribute [8] must be set. If the module is a Python module (as opposed to a built-in module or a dynamically loaded extension), it should execute the module's code in the module's global name space (`module.__dict__`). [8] PEP 366: Main module explicit relative imports http://www.python.org/dev/peps/pep-0366/ ''' scope = ldr.scope.split('.') modpath = fqname.split('.') if scope != modpath[0:len(scope)]: raise AssertionError( "%s responsible for %s got request for %s" % ( ldr.__class__.__name__, ldr.scope, fqname, ) ) if fqname in sys.modules: mod = sys.modules[fqname] else: mod = sys.modules.setdefault(fqname, types.ModuleType(fqname)) mod.__loader__ = ldr fspath = ldr.path_to(fqname) mod.__file__ = str(fspath) if fs.is_package(fspath): mod.__path__ = [ldr.fspath] mod.__package__ = str(fqname) else: mod.__package__ = str(fqname.rpartition('.')[0]) exec(fs.get_code(fspath), mod.__dict__) return mod
Load `fqname` from under `ldr.fspath`. The `fqname` argument is the fully qualified module name, eg. "spam.eggs.ham". As explained above, when :: finder.find_module("spam.eggs.ham") is called, "spam.eggs" has already been imported and added to `sys.modules`. However, the `find_module()` method isn't necessarily always called during an actual import: meta tools that analyze import dependencies (such as freeze, Installer or py2exe) don't actually load modules, so a finder shouldn't depend on the parent package being available in `sys.modules`. The `load_module()` method has a few responsibilities that it must fulfill before it runs any code: * If there is an existing module object named 'fullname' in `sys.modules`, the loader must use that existing module. (Otherwise, the `reload()` builtin will not work correctly.) If a module named 'fullname' does not exist in `sys.modules`, the loader must create a new module object and add it to `sys.modules`. Note that the module object must be in `sys.modules` before the loader executes the module code. This is crucial because the module code may (directly or indirectly) import itself; adding it to `sys.modules` beforehand prevents unbounded recursion in the worst case and multiple loading in the best. If the load fails, the loader needs to remove any module it may have inserted into `sys.modules`. If the module was already in `sys.modules` then the loader should leave it alone. * The `__file__` attribute must be set. This must be a string, but it may be a dummy value, for example "<frozen>". The privilege of not having a `__file__` attribute at all is reserved for built-in modules. * The `__name__` attribute must be set. If one uses `imp.new_module()` then the attribute is set automatically. * If it's a package, the __path__ variable must be set. This must be a list, but may be empty if `__path__` has no further significance to the importer (more on this later). * The `__loader__` attribute must be set to the loader object. This is mostly for introspection and reloading, but can be used for importer-specific extras, for example getting data associated with an importer. The `__package__` attribute [8] must be set. If the module is a Python module (as opposed to a built-in module or a dynamically loaded extension), it should execute the module's code in the module's global name space (`module.__dict__`). [8] PEP 366: Main module explicit relative imports http://www.python.org/dev/peps/pep-0366/
entailment
def zthread_fork(ctx, func, *args, **kwargs): """ Create an attached thread. An attached thread gets a ctx and a PAIR pipe back to its parent. It must monitor its pipe, and exit if the pipe becomes unreadable. Returns pipe, or NULL if there was an error. """ a = ctx.socket(zmq.PAIR) a.setsockopt(zmq.LINGER, 0) a.setsockopt(zmq.RCVHWM, 100) a.setsockopt(zmq.SNDHWM, 100) a.setsockopt(zmq.SNDTIMEO, 5000) a.setsockopt(zmq.RCVTIMEO, 5000) b = ctx.socket(zmq.PAIR) b.setsockopt(zmq.LINGER, 0) b.setsockopt(zmq.RCVHWM, 100) b.setsockopt(zmq.SNDHWM, 100) b.setsockopt(zmq.SNDTIMEO, 5000) a.setsockopt(zmq.RCVTIMEO, 5000) iface = "inproc://%s" % binascii.hexlify(os.urandom(8)) a.bind(iface) b.connect(iface) thread = threading.Thread(target=func, args=((ctx, b) + args), kwargs=kwargs) thread.daemon = False thread.start() return a
Create an attached thread. An attached thread gets a ctx and a PAIR pipe back to its parent. It must monitor its pipe, and exit if the pipe becomes unreadable. Returns pipe, or NULL if there was an error.
entailment
def _remap(object, name, value, safe=True): """Prevent accidental assignment of existing members Arguments: object (object): Parent of new attribute name (str): Name of new attribute value (object): Value of new attribute safe (bool): Whether or not to guarantee that the new attribute was not overwritten. Can be set to False under condition that it is superseded by extensive testing. """ if os.getenv("QT_TESTING") is not None and safe: # Cannot alter original binding. if hasattr(object, name): raise AttributeError("Cannot override existing name: " "%s.%s" % (object.__name__, name)) # Cannot alter classes of functions if type(object).__name__ != "module": raise AttributeError("%s != 'module': Cannot alter " "anything but modules" % object) elif hasattr(object, name): # Keep track of modifications self.__modified__.append(name) self.__remapped__.append(name) setattr(object, name, value)
Prevent accidental assignment of existing members Arguments: object (object): Parent of new attribute name (str): Name of new attribute value (object): Value of new attribute safe (bool): Whether or not to guarantee that the new attribute was not overwritten. Can be set to False under condition that it is superseded by extensive testing.
entailment
def init(): """Try loading each binding in turn Please note: the entire Qt module is replaced with this code: sys.modules["Qt"] = binding() This means no functions or variables can be called after this has executed. For debugging and testing, this module may be accessed through `Qt.__shim__`. """ preferred = os.getenv("QT_PREFERRED_BINDING") verbose = os.getenv("QT_VERBOSE") is not None bindings = (_pyside2, _pyqt5, _pyside, _pyqt4) if preferred: # Internal flag (used in installer) if preferred == "None": self.__wrapper_version__ = self.__version__ return preferred = preferred.split(os.pathsep) available = { "PySide2": _pyside2, "PyQt5": _pyqt5, "PySide": _pyside, "PyQt4": _pyqt4 } try: bindings = [available[binding] for binding in preferred] except KeyError: raise ImportError( "Available preferred Qt bindings: " "\n".join(preferred) ) for binding in bindings: _log("Trying %s" % binding.__name__, verbose) try: binding = binding() except ImportError as e: _log(" - ImportError(\"%s\")" % e, verbose) continue else: # Reference to this module binding.__shim__ = self binding.QtCompat = self sys.modules.update({ __name__: binding, # Fix #133, `from Qt.QtWidgets import QPushButton` __name__ + ".QtWidgets": binding.QtWidgets }) return # If not binding were found, throw this error raise ImportError("No Qt binding were found.")
Try loading each binding in turn Please note: the entire Qt module is replaced with this code: sys.modules["Qt"] = binding() This means no functions or variables can be called after this has executed. For debugging and testing, this module may be accessed through `Qt.__shim__`.
entailment
def log_instantiation(LOGGER, classname, args, forbidden, with_date=False): ''' Log the instantiation of an object to the given logger. :LOGGER: A logger to log to. Please see module "logging". :classname: The name of the class that is being instantiated. :args: A dictionary of arguments passed to the instantiation, which will be logged on debug level. :forbidden: A list of arguments whose values should not be logged, e.g. "password". :with_date: Optional. Boolean. Indicated whether the initiation date and time should be logged. ''' # Info: if with_date: LOGGER.info('Instantiating '+classname+' at '+datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')) else: LOGGER.info('Instantiating '+classname) # Debug: for argname in args: if args[argname] is not None: if argname in forbidden: LOGGER.debug('Param '+argname+'*******') else: LOGGER.debug('Param '+argname+'='+str(args[argname]))
Log the instantiation of an object to the given logger. :LOGGER: A logger to log to. Please see module "logging". :classname: The name of the class that is being instantiated. :args: A dictionary of arguments passed to the instantiation, which will be logged on debug level. :forbidden: A list of arguments whose values should not be logged, e.g. "password". :with_date: Optional. Boolean. Indicated whether the initiation date and time should be logged.
entailment
def filter_params(params): """ convert dict value if value is bool type, False -> "false" True -> "true" """ if params is not None: new_params = copy.deepcopy(params) new_params = dict((k, v) for k, v in new_params.items() if v is not None) for key, value in new_params.items(): if isinstance(value, bool): new_params[key] = "true" if value else "false" return new_params
convert dict value if value is bool type, False -> "false" True -> "true"
entailment
def _handler_response(self, response, data=None): """ error code response: { "request": "/statuses/home_timeline.json", "error_code": "20502", "error": "Need you follow uid." } :param response: :return: """ if response.status_code == 200: data = response.json() if isinstance(data, dict) and data.get("error_code"): raise WeiboAPIError(data.get("request"), data.get("error_code"), data.get("error")) else: return data else: raise WeiboRequestError( "Weibo API request error: status code: {code} url:{url} ->" " method:{method}: data={data}".format( code=response.status_code, url=response.url, method=response.request.method, data=data ) )
error code response: { "request": "/statuses/home_timeline.json", "error_code": "20502", "error": "Need you follow uid." } :param response: :return:
entailment
def get(self, suffix, params=None): """ request weibo api :param suffix: str, :param params: dict, url query parameters :return: """ url = self.base + suffix params = filter_params(params) response = self.session.get(url=url, params=params) return self._handler_response(response)
request weibo api :param suffix: str, :param params: dict, url query parameters :return:
entailment
def post(self, suffix, params=None, data=None, files=None): """ :return: """ url = self.base + suffix params = filter_params(params) response = self.session.post(url=url, params=params, data=data, files=files) return self._handler_response(response, data=data)
:return:
entailment
def search_handle(self, **args): ''' Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: * list_of_handles = search_handle('http://www.foo.com') * list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) * list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element. ''' LOGGER.debug('search_handle...') if self.__has_search_access: return self.__search_handle(**args) else: LOGGER.error( 'Searching not possible. Reason: No access '+ 'to search system (endpoint: '+ str(self.__search_url)+').' ) return None
Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: * list_of_handles = search_handle('http://www.foo.com') * list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) * list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element.
entailment
def create_revlookup_query(self, *fulltext_searchterms, **keyvalue_searchterms): ''' Create the part of the solr request that comes after the question mark, e.g. ?URL=*dkrz*&CHECKSUM=*abc*. If allowed search keys are configured, only these are used. If no'allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet. :param fulltext_searchterms: Optional. Any term specified will be used as search term. Not implemented yet, so will be ignored. :param keyvalue_searchterms: Optional. Key-value pairs. Any key-value pair will be used to search for the value in the field "key". Wildcards accepted (refer to the documentation of the reverse lookup servlet for syntax.) :return: The query string, after the "?". If no valid search terms were specified, None is returned. ''' LOGGER.debug('create_revlookup_query...') allowed_search_keys = self.__allowed_search_keys only_search_for_allowed_keys = False if len(allowed_search_keys) > 0: only_search_for_allowed_keys = True fulltext_searchterms_given = True fulltext_searchterms = b2handle.util.remove_value_none_from_list(fulltext_searchterms) if len(fulltext_searchterms) == 0: fulltext_searchterms_given = False if fulltext_searchterms_given: msg = 'Full-text search is not implemented yet.'+\ ' The provided searchterms '+str(fulltext_searchterms)+\ ' can not be used.' raise ReverseLookupException(msg=msg) keyvalue_searchterms_given = True keyvalue_searchterms = b2handle.util.remove_value_none_from_dict(keyvalue_searchterms) if len(keyvalue_searchterms) == 0: keyvalue_searchterms_given = False if not keyvalue_searchterms_given and not fulltext_searchterms_given: msg = 'No search terms have been specified. Please specify'+\ ' at least one key-value-pair.' raise ReverseLookupException(msg=msg) counter = 0 query = '?' for key, value in keyvalue_searchterms.items(): if only_search_for_allowed_keys and key not in allowed_search_keys: msg = 'Cannot search for key "'+key+'". Only searches '+\ 'for keys '+str(allowed_search_keys)+' are implemented.' raise ReverseLookupException(msg=msg) else: query = query+'&'+key+'='+value counter += 1 query = query.replace('?&', '?') LOGGER.debug('create_revlookup_query: query: '+query) if counter == 0: # unreachable? msg = 'No valid search terms have been specified.' raise ReverseLookupException(msg=msg) return query
Create the part of the solr request that comes after the question mark, e.g. ?URL=*dkrz*&CHECKSUM=*abc*. If allowed search keys are configured, only these are used. If no'allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet. :param fulltext_searchterms: Optional. Any term specified will be used as search term. Not implemented yet, so will be ignored. :param keyvalue_searchterms: Optional. Key-value pairs. Any key-value pair will be used to search for the value in the field "key". Wildcards accepted (refer to the documentation of the reverse lookup servlet for syntax.) :return: The query string, after the "?". If no valid search terms were specified, None is returned.
entailment
def __set_revlookup_auth_string(self, username, password): ''' Creates and sets the authentication string for accessing the reverse lookup servlet. No return, the string is set as an attribute to the client instance. :param username: Username. :param password: Password. ''' auth = b2handle.utilhandle.create_authentication_string(username, password) self.__revlookup_auth_string = auth
Creates and sets the authentication string for accessing the reverse lookup servlet. No return, the string is set as an attribute to the client instance. :param username: Username. :param password: Password.
entailment
def load_from_JSON(json_filename): ''' Create a new instance of a PIDClientCredentials with information read from a local JSON file. :param json_filename: The path to the json credentials file. The json file should have the following format: .. code:: json { "handle_server_url": "https://url.to.your.handle.server", "username": "index:prefix/suffix", "password": "ZZZZZZZ", "prefix": "prefix_to_use_for_writing_handles", "handleowner": "username_to_own_handles" } Any additional key-value-pairs are stored in the instance as config. :raises: :exc:`~b2handle.handleexceptions.CredentialsFormatError` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :return: An instance. ''' try: jsonfilecontent = json.loads(open(json_filename, 'r').read()) except ValueError as exc: raise CredentialsFormatError(msg="Invalid JSON syntax: "+str(exc)) instance = PIDClientCredentials(credentials_filename=json_filename,**jsonfilecontent) return instance
Create a new instance of a PIDClientCredentials with information read from a local JSON file. :param json_filename: The path to the json credentials file. The json file should have the following format: .. code:: json { "handle_server_url": "https://url.to.your.handle.server", "username": "index:prefix/suffix", "password": "ZZZZZZZ", "prefix": "prefix_to_use_for_writing_handles", "handleowner": "username_to_own_handles" } Any additional key-value-pairs are stored in the instance as config. :raises: :exc:`~b2handle.handleexceptions.CredentialsFormatError` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :return: An instance.
entailment
def fixture(app, fixtures, fixtures_dir='fixtures', raise_does_not_exist=False, reversible=True, models=[]): """ Load fixtures using a data migration. The migration will by default provide a rollback, deleting items by primary key. This is not always what you want ; you may set reversible=False to prevent rolling back. Usage: import myapp import anotherapp operations = [ migrations.RunPython(**fixture(myapp, 'eggs.yaml')), migrations.RunPython(**fixture(anotherapp, ['sausage.json', 'walks.yaml'])) migrations.RunPython(**fixture(yap, ['foo.json'], reversible=False)) ] """ fixture_path = os.path.join(app.__path__[0], fixtures_dir) if isinstance(fixtures, string_types): fixtures = [fixtures] def get_format(fixture): return os.path.splitext(fixture)[1][1:] def get_objects(): for fixture in fixtures: with open(os.path.join(fixture_path, fixture), 'rb') as f: objects = serializers.deserialize(get_format(fixture), f, ignorenonexistent=True) for obj in objects: yield obj def patch_apps(func): """ Patch the app registry. Note that this is necessary so that the Deserializer does not use the current version of the model, which may not necessarily be representative of the model the fixture was created for. """ @wraps(func) def inner(apps, schema_editor): try: # Firstly patch the serializers registry original_apps = django.core.serializers.python.apps django.core.serializers.python.apps = apps return func(apps, schema_editor) finally: # Ensure we always unpatch the serializers registry django.core.serializers.python.apps = original_apps return inner @patch_apps def load_fixture(apps, schema_editor): for obj in get_objects(): obj.save() @patch_apps def unload_fixture(apps, schema_editor): for obj in get_objects(): model = apps.get_model(app.__name__, obj.object.__class__.__name__) kwargs = dict() if 'id' in obj.object.__dict__: kwargs.update(id=obj.object.__dict__.get('id')) elif 'slug' in obj.object.__dict__: kwargs.update(slug=obj.object.__dict__.get('slug')) else: kwargs.update(**obj.object.__dict__) try: model.objects.get(**kwargs).delete() except model.DoesNotExist: if not raise_does_not_exist: raise FixtureObjectDoesNotExist(("Model %s instance with " "kwargs %s does not exist." % (model, kwargs))) kwargs = dict(code=load_fixture) if reversible: kwargs['reverse_code'] = unload_fixture return kwargs
Load fixtures using a data migration. The migration will by default provide a rollback, deleting items by primary key. This is not always what you want ; you may set reversible=False to prevent rolling back. Usage: import myapp import anotherapp operations = [ migrations.RunPython(**fixture(myapp, 'eggs.yaml')), migrations.RunPython(**fixture(anotherapp, ['sausage.json', 'walks.yaml'])) migrations.RunPython(**fixture(yap, ['foo.json'], reversible=False)) ]
entailment
def nonzero(self): """ Get all non-zero bits """ return [i for i in xrange(self.size()) if self.test(i)]
Get all non-zero bits
entailment
def tohexstring(self): """ Returns a hexadecimal string """ val = self.tostring() st = "{0:0x}".format(int(val, 2)) return st.zfill(len(self.bitmap)*2)
Returns a hexadecimal string
entailment
def fromhexstring(cls, hexstring): """ Construct BitMap from hex string """ bitstring = format(int(hexstring, 16), "0" + str(len(hexstring)/4) + "b") return cls.fromstring(bitstring)
Construct BitMap from hex string
entailment
def fromstring(cls, bitstring): """ Construct BitMap from string """ nbits = len(bitstring) bm = cls(nbits) for i in xrange(nbits): if bitstring[-i-1] == '1': bm.set(i) elif bitstring[-i-1] != '0': raise Exception("Invalid bit string!") return bm
Construct BitMap from string
entailment
def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass _version_path = os.path.join(os.path.dirname(__file__), '_version') if os.path.exists(_version_path): with open(_version_path) as f: l = f.readline().strip() return { 'version': l, 'error': None, 'dirty': None, 'full-revisionid': l } return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"}
Get version information or return default if unable to do so.
entailment
def get_valid_https_verify(value): ''' Get a value that can be the boolean representation of a string or a boolean itself and returns It as a boolean. If this is not the case, It returns a string. :value: The HTTPS_verify input value. A string can be passed as a path to a CA_BUNDLE certificate :returns: True, False or a string. ''' http_verify_value = value bool_values = {'false': False, 'true': True} if isinstance(value, bool): http_verify_value = value elif (isinstance(value, str) or isinstance(value, unicode)) and value.lower() in bool_values.keys(): http_verify_value = bool_values[value.lower()] return http_verify_value
Get a value that can be the boolean representation of a string or a boolean itself and returns It as a boolean. If this is not the case, It returns a string. :value: The HTTPS_verify input value. A string can be passed as a path to a CA_BUNDLE certificate :returns: True, False or a string.
entailment
def setup(self): """Setup filter (only called when filter is actually used).""" super(RequireJSFilter, self).setup() excluded_files = [] for bundle in self.excluded_bundles: excluded_files.extend( map(lambda f: os.path.splitext(f)[0], bundle.contents) ) if excluded_files: self.argv.append( 'exclude={0}'.format(','.join(excluded_files)) )
Setup filter (only called when filter is actually used).
entailment
def setup(self): """Initialize filter just before it will be used.""" super(CleanCSSFilter, self).setup() self.root = current_app.config.get('COLLECT_STATIC_ROOT')
Initialize filter just before it will be used.
entailment
def rebase_opt(self): """Determine which option name to use.""" if not hasattr(self, '_rebase_opt'): # out = b"MAJOR.MINOR.REVISION" // b"3.4.19" or b"4.0.0" out, err = Popen( ['cleancss', '--version'], stdout=PIPE).communicate() ver = int(out[:out.index(b'.')]) self._rebase_opt = ['--root', self.root] if ver == 3 else [] return self._rebase_opt
Determine which option name to use.
entailment
def input(self, _in, out, **kw): """Input filtering.""" args = [self.binary or 'cleancss'] + self.rebase_opt if self.extra_args: args.extend(self.extra_args) self.subprocess(args, out, _in)
Input filtering.
entailment
def output(self, _in, out, **kwargs): """Wrap translation in Angular module.""" out.write( 'angular.module("{0}", ["gettext"]).run(' '["gettextCatalog", function (gettextCatalog) {{'.format( self.catalog_name ) ) out.write(_in.read()) out.write('}]);')
Wrap translation in Angular module.
entailment
def input(self, _in, out, **kwargs): """Process individual translation file.""" language_code = _re_language_code.search(_in.read()).group( 'language_code' ) _in.seek(0) # move at the begining after matching the language catalog = read_po(_in) out.write('gettextCatalog.setStrings("{0}", '.format(language_code)) out.write(json.dumps({ key: value.string for key, value in catalog._messages.items() if key and value.string })) out.write(');')
Process individual translation file.
entailment
def get_user_and_check_auth(self, username, password): """Check the combination username/password that is valid on the database. """ constraint = sql.or_( models.USERS.c.name == username, models.USERS.c.email == username ) user = self.identity_from_db(models.USERS, constraint) if user is None: raise dci_exc.DCIException('User %s does not exists.' % username, status_code=401) return user, auth.check_passwords_equal(password, user.password)
Check the combination username/password that is valid on the database.
entailment
def retrieve_info(self): """Query the Github API to retrieve the needed infos.""" path = urlparse(self.url).path path = path.split('/')[1:] sanity_filter = re.compile('[\da-z-_]+', re.IGNORECASE) self.product = sanity_filter.match(path[0]).group(0) self.component = sanity_filter.match(path[1]).group(0) self.issue_id = int(path[3]) github_url = '%s/%s/%s/issues/%s' % (_URL_BASE, self.product, self.component, self.issue_id) result = requests.get(github_url) self.status_code = result.status_code if result.status_code == 200: result = result.json() self.title = result['title'] self.reporter = result['user']['login'] if result['assignee'] is not None: self.assignee = result['assignee']['login'] self.status = result['state'] self.created_at = result['created_at'] self.updated_at = result['updated_at'] self.closed_at = result['closed_at'] elif result.status_code == 404: self.title = 'private issue'
Query the Github API to retrieve the needed infos.
entailment
def disk_cache(cls, basename, function, *args, method=True, **kwargs): """ Cache the return value in the correct cache directory. Set 'method' to false for static methods. """ @utility.disk_cache(basename, cls.directory(), method=method) def wrapper(*args, **kwargs): return function(*args, **kwargs) return wrapper(*args, **kwargs)
Cache the return value in the correct cache directory. Set 'method' to false for static methods.
entailment
def download(cls, url, filename=None): """ Download a file into the correct cache directory. """ return utility.download(url, cls.directory(), filename)
Download a file into the correct cache directory.
entailment
def directory(cls, prefix=None): """ Path that should be used for caching. Different for all subclasses. """ prefix = prefix or utility.read_config().directory name = cls.__name__.lower() directory = os.path.expanduser(os.path.join(prefix, name)) utility.ensure_directory(directory) return directory
Path that should be used for caching. Different for all subclasses.
entailment
def get_last_rconfiguration_id(topic_id, remoteci_id, db_conn=None): """Get the rconfiguration_id of the last job run by the remoteci. :param topic_id: the topic :param remoteci_id: the remoteci id :return: last rconfiguration_id of the remoteci """ db_conn = db_conn or flask.g.db_conn __TABLE = models.JOBS query = sql.select([__TABLE.c.rconfiguration_id]). \ order_by(sql.desc(__TABLE.c.created_at)). \ where(sql.and_(__TABLE.c.topic_id == topic_id, __TABLE.c.remoteci_id == remoteci_id)). \ limit(1) rconfiguration_id = db_conn.execute(query).fetchone() if rconfiguration_id is not None: return str(rconfiguration_id[0]) else: return None
Get the rconfiguration_id of the last job run by the remoteci. :param topic_id: the topic :param remoteci_id: the remoteci id :return: last rconfiguration_id of the remoteci
entailment
def get_remoteci_configuration(topic_id, remoteci_id, db_conn=None): """Get a remoteci configuration. This will iterate over each configuration in a round robin manner depending on the last rconfiguration used by the remoteci.""" db_conn = db_conn or flask.g.db_conn last_rconfiguration_id = get_last_rconfiguration_id( topic_id, remoteci_id, db_conn=db_conn) _RCONFIGURATIONS = models.REMOTECIS_RCONFIGURATIONS _J_RCONFIGURATIONS = models.JOIN_REMOTECIS_RCONFIGURATIONS query = sql.select([_RCONFIGURATIONS]). \ select_from(_J_RCONFIGURATIONS. join(_RCONFIGURATIONS)). \ where(_J_RCONFIGURATIONS.c.remoteci_id == remoteci_id) query = query.where(sql.and_(_RCONFIGURATIONS.c.state != 'archived', _RCONFIGURATIONS.c.topic_id == topic_id)) query = query.order_by(sql.desc(_RCONFIGURATIONS.c.created_at)) query = query.order_by(sql.asc(_RCONFIGURATIONS.c.name)) all_rconfigurations = db_conn.execute(query).fetchall() if len(all_rconfigurations) > 0: for i in range(len(all_rconfigurations)): if str(all_rconfigurations[i]['id']) == last_rconfiguration_id: # if i==0, then indice -1 is the last element return all_rconfigurations[i - 1] return all_rconfigurations[0] else: return None
Get a remoteci configuration. This will iterate over each configuration in a round robin manner depending on the last rconfiguration used by the remoteci.
entailment
def send(self, request: Request) -> None: """ Dispatches a request. Expects one and one only target handler :param request: The request to dispatch :return: None, will throw a ConfigurationException if more than one handler factor is registered for the command """ handler_factories = self._registry.lookup(request) if len(handler_factories) != 1: raise ConfigurationException("There is no handler registered for this request") handler = handler_factories[0]() handler.handle(request)
Dispatches a request. Expects one and one only target handler :param request: The request to dispatch :return: None, will throw a ConfigurationException if more than one handler factor is registered for the command
entailment
def publish(self, request: Request) -> None: """ Dispatches a request. Expects zero or more target handlers :param request: The request to dispatch :return: None. """ handler_factories = self._registry.lookup(request) for factory in handler_factories: handler = factory() handler.handle(request)
Dispatches a request. Expects zero or more target handlers :param request: The request to dispatch :return: None.
entailment
def post(self, request: Request) -> None: """ Dispatches a request over middleware. Returns when message put onto outgoing channel by producer, does not wait for response from a consuming application i.e. is fire-and-forget :param request: The request to dispatch :return: None """ if self._producer is None: raise ConfigurationException("Command Processor requires a BrightsideProducer to post to a Broker") if self._message_mapper_registry is None: raise ConfigurationException("Command Processor requires a BrightsideMessage Mapper Registry to post to a Broker") message_mapper = self._message_mapper_registry.lookup(request) message = message_mapper(request) self._message_store.add(message) self._producer.send(message)
Dispatches a request over middleware. Returns when message put onto outgoing channel by producer, does not wait for response from a consuming application i.e. is fire-and-forget :param request: The request to dispatch :return: None
entailment
def ignore_whitespace_text_nodes(cls, wrapped_node): """ Find and delete any text nodes containing nothing but whitespace in in the given node and its descendents. This is useful for cleaning up excess low-value text nodes in a document DOM after parsing a pretty-printed XML document. """ for child in wrapped_node.children: if child.is_text and child.value.strip() == '': child.delete() else: cls.ignore_whitespace_text_nodes(child)
Find and delete any text nodes containing nothing but whitespace in in the given node and its descendents. This is useful for cleaning up excess low-value text nodes in a document DOM after parsing a pretty-printed XML document.
entailment
def get_ns_info_from_node_name(self, name, impl_node): """ Return a three-element tuple with the prefix, local name, and namespace URI for the given element/attribute name (in the context of the given node's hierarchy). If the name has no associated prefix or namespace information, None is return for those tuple members. """ if '}' in name: ns_uri, name = name.split('}') ns_uri = ns_uri[1:] prefix = self.get_ns_prefix_for_uri(impl_node, ns_uri) elif ':' in name: prefix, name = name.split(':') ns_uri = self.get_ns_uri_for_prefix(impl_node, prefix) if ns_uri is None: raise exceptions.UnknownNamespaceException( "Prefix '%s' does not have a defined namespace URI" % prefix) else: prefix, ns_uri = None, None return prefix, name, ns_uri
Return a three-element tuple with the prefix, local name, and namespace URI for the given element/attribute name (in the context of the given node's hierarchy). If the name has no associated prefix or namespace information, None is return for those tuple members.
entailment
def up(self, count=1, to_name=None): """ :return: a builder representing an ancestor of the current element, by default the parent element. :param count: return the n'th ancestor element; defaults to 1 which means the immediate parent. If *count* is greater than the number of number of ancestors return the document's root element. :type count: integer >= 1 or None :param to_name: return the nearest ancestor element with the matching name, or the document's root element if there are no matching elements. This argument trumps the ``count`` argument. :type to_name: string or None """ elem = self._element up_count = 0 while True: # Don't go up beyond the document root if elem.is_root or elem.parent is None: break elem = elem.parent if to_name is None: up_count += 1 if up_count >= count: break else: if elem.name == to_name: break return Builder(elem)
:return: a builder representing an ancestor of the current element, by default the parent element. :param count: return the n'th ancestor element; defaults to 1 which means the immediate parent. If *count* is greater than the number of number of ancestors return the document's root element. :type count: integer >= 1 or None :param to_name: return the nearest ancestor element with the matching name, or the document's root element if there are no matching elements. This argument trumps the ``count`` argument. :type to_name: string or None
entailment
def element(self, *args, **kwargs): """ Add a child element to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: a new Builder that represents the child element. Delegates to :meth:`xml4h.nodes.Element.add_element`. """ child_element = self._element.add_element(*args, **kwargs) return Builder(child_element)
Add a child element to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: a new Builder that represents the child element. Delegates to :meth:`xml4h.nodes.Element.add_element`.
entailment
def attributes(self, *args, **kwargs): """ Add one or more attributes to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_attributes`. """ self._element.set_attributes(*args, **kwargs) return self
Add one or more attributes to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_attributes`.
entailment
def processing_instruction(self, target, data): """ Add a processing instruction node to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.add_instruction`. """ self._element.add_instruction(target, data) return self
Add a processing instruction node to the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.add_instruction`.
entailment
def ns_prefix(self, prefix, ns_uri): """ Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`. """ self._element.set_ns_prefix(prefix, ns_uri) return self
Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`.
entailment
def createCertRequest(pkey, digest="sha256"): """ Create a certificate request. Arguments: pkey - The key to associate with the request digest - Digestion method to use for signing, default is sha256 **name - The name of the subject of the request, possible arguments are: C - Country name ST - State or province name L - Locality name O - Organization name OU - Organizational unit name CN - Common name emailAddress - E-mail address Returns: The certificate request in an X509Req object """ req = crypto.X509Req() req.get_subject().C = "FR" req.get_subject().ST = "IDF" req.get_subject().L = "Paris" req.get_subject().O = "RedHat" # noqa req.get_subject().OU = "DCI" req.get_subject().CN = "DCI-remoteCI" req.set_pubkey(pkey) req.sign(pkey, digest) return req
Create a certificate request. Arguments: pkey - The key to associate with the request digest - Digestion method to use for signing, default is sha256 **name - The name of the subject of the request, possible arguments are: C - Country name ST - State or province name L - Locality name O - Organization name OU - Organizational unit name CN - Common name emailAddress - E-mail address Returns: The certificate request in an X509Req object
entailment
def verify_existence_and_get(id, table, name=None, get_id=False): """Verify the existence of a resource in the database and then return it if it exists, according to the condition, or raise an exception. :param id: id of the resource :param table: the table object :param name: the name of the row to look for :param get_id: if True, return only the ID :return: """ where_clause = table.c.id == id if name: where_clause = table.c.name == name if 'state' in table.columns: where_clause = sql.and_(table.c.state != 'archived', where_clause) query = sql.select([table]).where(where_clause) result = flask.g.db_conn.execute(query).fetchone() if result is None: raise dci_exc.DCIException('Resource "%s" not found.' % id, status_code=404) if get_id: return result.id return result
Verify the existence of a resource in the database and then return it if it exists, according to the condition, or raise an exception. :param id: id of the resource :param table: the table object :param name: the name of the row to look for :param get_id: if True, return only the ID :return:
entailment
def user_topic_ids(user): """Retrieve the list of topics IDs a user has access to.""" if user.is_super_admin() or user.is_read_only_user(): query = sql.select([models.TOPICS]) else: query = (sql.select([models.JOINS_TOPICS_TEAMS.c.topic_id]) .select_from( models.JOINS_TOPICS_TEAMS.join( models.TOPICS, sql.and_(models.JOINS_TOPICS_TEAMS.c.topic_id == models.TOPICS.c.id, # noqa models.TOPICS.c.state == 'active')) # noqa ).where( sql.or_(models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.teams_ids), # noqa models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.child_teams_ids)))) # noqa rows = flask.g.db_conn.execute(query).fetchall() return [str(row[0]) for row in rows]
Retrieve the list of topics IDs a user has access to.
entailment
def verify_team_in_topic(user, topic_id): """Verify that the user's team does belongs to the given topic. If the user is an admin or read only user then it belongs to all topics. """ if user.is_super_admin() or user.is_read_only_user(): return if str(topic_id) not in user_topic_ids(user): raise dci_exc.Unauthorized()
Verify that the user's team does belongs to the given topic. If the user is an admin or read only user then it belongs to all topics.
entailment
def _format_level_1(rows, root_table_name): """ Transform sqlalchemy source: [{'a_id' : 'id1', 'a_name' : 'name1, 'b_id' : 'id2', 'b_name' : 'name2}, {'a_id' : 'id3', 'a_name' : 'name3, 'b_id' : 'id4', 'b_name' : 'name4} ] to [{'id' : 'id1', 'name': 'name2', 'b' : {'id': 'id2', 'name': 'name2'}, {'id' : 'id3', 'name': 'name3', 'b' : {'id': 'id4', 'name': 'name4'} ] """ result_rows = [] for row in rows: row = dict(row) result_row = {} prefixes_to_remove = [] for field in row: if field.startswith('next_topic'): prefix = 'next_topic' suffix = field[11:] if suffix == 'id_1': suffix = 'id' else: prefix, suffix = field.split('_', 1) if suffix == 'id' and row[field] is None: prefixes_to_remove.append(prefix) if prefix not in result_row: result_row[prefix] = {suffix: row[field]} else: result_row[prefix].update({suffix: row[field]}) # remove field with id == null for prefix_to_remove in prefixes_to_remove: result_row.pop(prefix_to_remove) root_table_fields = result_row.pop(root_table_name) result_row.update(root_table_fields) result_rows.append(result_row) return result_rows
Transform sqlalchemy source: [{'a_id' : 'id1', 'a_name' : 'name1, 'b_id' : 'id2', 'b_name' : 'name2}, {'a_id' : 'id3', 'a_name' : 'name3, 'b_id' : 'id4', 'b_name' : 'name4} ] to [{'id' : 'id1', 'name': 'name2', 'b' : {'id': 'id2', 'name': 'name2'}, {'id' : 'id3', 'name': 'name3', 'b' : {'id': 'id4', 'name': 'name4'} ]
entailment
def _format_level_2(rows, list_embeds, embed_many): """ From the _format_level_1 function we have a list of rows. Because of using joins, we have as many rows as join result. For example: [{'id' : 'id1', 'name' : 'name1, 'b' : {'id': 'id2, 'name': 'name2'} } {'id' : 'id1', 'name' : 'name1, 'b' : {'id' : 'id4', 'name' : 'name4} } ] Here there is two elements which correspond to one rows because of the embed field 'b'. So we should transform it to: [{'id' : 'id1', 'name' : 'name1, 'b' : [{'id': 'id2, 'name': 'name2'}, {'id' : 'id4', 'name' : 'name4}] } ] This is the purpose of this function. """ def _uniqify_list(list_of_dicts): # list() for py34 result = [] set_ids = set() for v in list_of_dicts: if v['id'] in set_ids: continue set_ids.add(v['id']) result.append(v) return result row_ids_to_embed_values = {} for row in rows: # for each row, associate rows's id -> {all embeds values} if row['id'] not in row_ids_to_embed_values: row_ids_to_embed_values[row['id']] = {} # add embeds values to the current row for embd in list_embeds: if embd not in row: continue if embd not in row_ids_to_embed_values[row['id']]: # create a list or a dict depending on embed_many if embed_many[embd]: row_ids_to_embed_values[row['id']][embd] = [row[embd]] else: row_ids_to_embed_values[row['id']][embd] = row[embd] else: if embed_many[embd]: row_ids_to_embed_values[row['id']][embd].append(row[embd]) # uniqify each embed list for embd in list_embeds: if embd in row_ids_to_embed_values[row['id']]: embed_values = row_ids_to_embed_values[row['id']][embd] if isinstance(embed_values, list): row_ids_to_embed_values[row['id']][embd] = _uniqify_list(embed_values) # noqa else: row_ids_to_embed_values[row['id']][embd] = {} if embed_many[embd]: row_ids_to_embed_values[row['id']][embd] = [] # last loop over the initial rows in order to keep the ordering result = [] # if row id in seen set then it means the row has been completely processed seen = set() for row in rows: if row['id'] in seen: continue seen.add(row['id']) new_row = {} # adds level 1 fields for field in row: if field not in list_embeds: new_row[field] = row[field] # adds all level 2 fields # list() for py34 row_ids_to_embed_values_keys = list(row_ids_to_embed_values[new_row['id']].keys()) # noqa row_ids_to_embed_values_keys.sort() # adds the nested fields if there is somes for embd in list_embeds: if embd in row_ids_to_embed_values_keys: if '.' in embd: prefix, suffix = embd.split('.', 1) new_row[prefix][suffix] = row_ids_to_embed_values[new_row['id']][embd] # noqa else: new_row[embd] = row_ids_to_embed_values[new_row['id']][embd] # noqa else: new_row_embd_value = {} if embed_many[embd]: new_row_embd_value = [] if '.' in embd: prefix, suffix = embd.split('.', 1) new_row[prefix][suffix] = new_row_embd_value else: new_row[embd] = new_row_embd_value # row is complete ! result.append(new_row) return result
From the _format_level_1 function we have a list of rows. Because of using joins, we have as many rows as join result. For example: [{'id' : 'id1', 'name' : 'name1, 'b' : {'id': 'id2, 'name': 'name2'} } {'id' : 'id1', 'name' : 'name1, 'b' : {'id' : 'id4', 'name' : 'name4} } ] Here there is two elements which correspond to one rows because of the embed field 'b'. So we should transform it to: [{'id' : 'id1', 'name' : 'name1, 'b' : [{'id': 'id2, 'name': 'name2'}, {'id' : 'id4', 'name' : 'name4}] } ] This is the purpose of this function.
entailment
def common_values_dict(): """Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place. """ now = datetime.datetime.utcnow().isoformat() etag = utils.gen_etag() values = { 'id': utils.gen_uuid(), 'created_at': now, 'updated_at': now, 'etag': etag } return values
Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place.
entailment
def execute(self, fetchall=False, fetchone=False, use_labels=True): """ :param fetchall: get all rows :param fetchone: get only one row :param use_labels: prefix row columns names by the table name :return: """ query = self.get_query(use_labels=use_labels) if fetchall: return flask.g.db_conn.execute(query).fetchall() elif fetchone: return flask.g.db_conn.execute(query).fetchone()
:param fetchall: get all rows :param fetchone: get only one row :param use_labels: prefix row columns names by the table name :return:
entailment
def get_identity(identity): """Returns some information about the currently authenticated identity""" return flask.Response( json.dumps( { 'identity': { 'id': identity.id, 'etag': identity.etag, 'name': identity.name, 'fullname': identity.fullname, 'email': identity.email, 'timezone': identity.timezone, 'teams': _encode_dict(identity.teams) } } ), 200, headers={'ETag': identity.etag}, content_type='application/json' )
Returns some information about the currently authenticated identity
entailment
def get_issues_by_resource(resource_id, table): """Get all issues for a specific job.""" v1_utils.verify_existence_and_get(resource_id, table) # When retrieving the issues for a job, we actually retrieve # the issues attach to the job itself + the issues attached to # the components the job has been run with. if table.name == 'jobs': JJI = models.JOIN_JOBS_ISSUES JJC = models.JOIN_JOBS_COMPONENTS JCI = models.JOIN_COMPONENTS_ISSUES # Get all the issues attach to all the components attach to a job j1 = sql.join( _TABLE, sql.join( JCI, JJC, sql.and_( JCI.c.component_id == JJC.c.component_id, JJC.c.job_id == resource_id, ), ), _TABLE.c.id == JCI.c.issue_id, ) query = sql.select([_TABLE]).select_from(j1) rows = flask.g.db_conn.execute(query) rows = [dict(row) for row in rows] # Get all the issues attach to a job j2 = sql.join( _TABLE, JJI, sql.and_( _TABLE.c.id == JJI.c.issue_id, JJI.c.job_id == resource_id ) ) query2 = sql.select([_TABLE]).select_from(j2) rows2 = flask.g.db_conn.execute(query2) rows += [dict(row) for row in rows2] # When retrieving the issues for a component, we only retrieve the # issues attached to the specified component. else: JCI = models.JOIN_COMPONENTS_ISSUES query = (sql.select([_TABLE]) .select_from(JCI.join(_TABLE)) .where(JCI.c.component_id == resource_id)) rows = flask.g.db_conn.execute(query) rows = [dict(row) for row in rows] for row in rows: if row['tracker'] == 'github': l_tracker = github.Github(row['url']) elif row['tracker'] == 'bugzilla': l_tracker = bugzilla.Bugzilla(row['url']) row.update(l_tracker.dump()) return flask.jsonify({'issues': rows, '_meta': {'count': len(rows)}})
Get all issues for a specific job.
entailment
def unattach_issue(resource_id, issue_id, table): """Unattach an issue from a specific job.""" v1_utils.verify_existence_and_get(issue_id, _TABLE) if table.name == 'jobs': join_table = models.JOIN_JOBS_ISSUES where_clause = sql.and_(join_table.c.job_id == resource_id, join_table.c.issue_id == issue_id) else: join_table = models.JOIN_COMPONENTS_ISSUES where_clause = sql.and_(join_table.c.component_id == resource_id, join_table.c.issue_id == issue_id) query = join_table.delete().where(where_clause) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict('%s_issues' % table.name, issue_id) return flask.Response(None, 204, content_type='application/json')
Unattach an issue from a specific job.
entailment
def attach_issue(resource_id, table, user_id): """Attach an issue to a specific job.""" data = schemas.issue.post(flask.request.json) issue = _get_or_create_issue(data) # Second, insert a join record in the JOIN_JOBS_ISSUES or # JOIN_COMPONENTS_ISSUES database. if table.name == 'jobs': join_table = models.JOIN_JOBS_ISSUES else: join_table = models.JOIN_COMPONENTS_ISSUES key = '%s_id' % table.name[0:-1] query = join_table.insert().values({ 'user_id': user_id, 'issue_id': issue['id'], key: resource_id }) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict(join_table.name, '%s, issue_id' % key) result = json.dumps({'issue': dict(issue)}) return flask.Response(result, 201, content_type='application/json')
Attach an issue to a specific job.
entailment
def collect_staticroot_removal(app, blueprints): """Remove collect's static root folder from list.""" collect_root = app.extensions['collect'].static_root return [bp for bp in blueprints if ( bp.has_static_folder and bp.static_folder != collect_root)]
Remove collect's static root folder from list.
entailment
def get_all_jobstates(user, job_id): """Get all jobstates. """ args = schemas.args(flask.request.args.to_dict()) job = v1_utils.verify_existence_and_get(job_id, models.JOBS) if user.is_not_super_admin() and user.is_not_read_only_user(): if (job['team_id'] not in user.teams_ids and job['team_id'] not in user.child_teams_ids): raise dci_exc.Unauthorized() query = v1_utils.QueryBuilder(_TABLE, args, _JS_COLUMNS) query.add_extra_condition(_TABLE.c.job_id == job_id) # get the number of rows for the '_meta' section nb_rows = query.get_number_of_rows() rows = query.execute(fetchall=True) rows = v1_utils.format_result(rows, _TABLE.name, args['embed'], _EMBED_MANY) return flask.jsonify({'jobstates': rows, '_meta': {'count': nb_rows}})
Get all jobstates.
entailment
def create_client(access_token): """Create the dci client in the master realm.""" url = 'http://keycloak:8080/auth/admin/realms/dci-test/clients' r = requests.post(url, data=json.dumps(client_data), headers=get_auth_headers(access_token)) if r.status_code in (201, 409): print('Keycloak client dci created successfully.') else: raise Exception( 'Error while creating Keycloak client dci:\nstatus code %s\n' 'error: %s' % (r.status_code, r.content) )
Create the dci client in the master realm.
entailment
def create_user_dci(access_token): """Create the a dci user. username=dci, password=dci, email=dci@distributed-ci.io""" user_data = {'username': 'dci', 'email': 'dci@distributed-ci.io', 'enabled': True, 'emailVerified': True, 'credentials': [{'type': 'password', 'value': 'dci'}]} r = requests.post('http://keycloak:8080/auth/admin/realms/dci-test/users', data=json.dumps(user_data), headers=get_auth_headers(access_token)) if r.status_code in (201, 409): print('Keycloak user dci created successfully.') else: raise Exception('Error while creating user dci:\nstatus code %s\n' 'error: %s' % (r.status_code, r.content))
Create the a dci user. username=dci, password=dci, email=dci@distributed-ci.io
entailment
def initialize(self, name=None, dbname=None, base=None, generator=None, case=None, namespaces=None): self.name = none_or(name, str) """ The name of the site. : str | `None` """ self.dbname = none_or(dbname, str) """ The dbname of the site. : str | `None` """ self.base = none_or(base, str) """ TODO: ??? : str | `None` """ self.generator = none_or(generator, str) """ TODO: ??? : str | `None` """ self.case = none_or(case, str) """ TODO: ??? : str | `None` """ self.namespaces = none_or(namespaces, list) """ A list of :class:`mwtypes.Namespace` | `None` """
The name of the site. : str | `None`
entailment
def _serializeBooleans(params): """"Convert all booleans to lowercase strings""" serialized = {} for name, value in params.items(): if value is True: value = 'true' elif value is False: value = 'false' serialized[name] = value return serialized for k, v in params.items(): if isinstance(v, bool): params[k] = str(v).lower()
Convert all booleans to lowercase strings
entailment
def request(self, method, url, parameters=dict()): """Requests wrapper function""" # The requests library uses urllib, which serializes to "True"/"False" while Pingdom requires lowercase parameters = self._serializeBooleans(parameters) headers = {'App-Key': self.apikey} if self.accountemail: headers.update({'Account-Email': self.accountemail}) # Method selection handling if method.upper() == 'GET': response = requests.get(self.url + url, params=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'POST': response = requests.post(self.url + url, data=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'PUT': response = requests.put(self.url + url, data=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'DELETE': response = requests.delete(self.url + url, params=parameters, auth=(self.username, self.password), headers=headers) else: raise Exception("Invalid method in pingdom request") # Store pingdom api limits self.shortlimit = response.headers.get( 'Req-Limit-Short', self.shortlimit) self.longlimit = response.headers.get( 'Req-Limit-Long', self.longlimit) # Verify OK response if response.status_code != 200: sys.stderr.write('ERROR from %s: %d' % (response.url, response.status_code)) sys.stderr.write('Returned data: %s\n' % response.json()) response.raise_for_status() return response
Requests wrapper function
entailment
def actions(self, **parameters): """Returns a list of actions (alerts) that have been generated for your account. Optional Parameters: * from -- Only include actions generated later than this timestamp. Format is UNIX time. Type: Integer Default: None * to -- Only include actions generated prior to this timestamp. Format is UNIX time. Type: Integer Default: None * limit -- Limits the number of returned results to the specified quantity. Type: Integer (max 300) Default: 100 * offset -- Offset for listing. Type: Integer Default: 0 * checkids -- Comma-separated list of check identifiers. Limit results to actions generated from these checks. Type: String Default: All * contactids -- Comma-separated list of contact identifiers. Limit results to actions sent to these contacts. Type: String Default: All * status -- Comma-separated list of statuses. Limit results to actions with these statuses. Type: String ['sent', 'delivered', 'error', 'not_delivered', 'no_credits'] Default: All * via -- Comma-separated list of via mediums. Limit results to actions with these mediums. Type: String ['email', 'sms', 'twitter', 'iphone', 'android'] Default: All Returned structure: { 'alerts' : [ { 'contactname' : <String> Name of alerted contact 'contactid' : <String> Identifier of alerted contact 'checkid' : <String> Identifier of check 'time' : <Integer> Time of alert generation. Format UNIX time 'via' : <String> Alert medium ['email', 'sms', 'twitter', 'iphone', 'android'] 'status' : <String> Alert status ['sent', 'delivered', 'error', 'notdelivered', 'nocredits'] 'messageshort': <String> Short description of message 'messagefull' : <String> Full message body 'sentto' : <String> Target address, phone number, etc 'charged' : <Boolean> True if your account was charged for this message }, ... ] } """ # Warn user about unhandled parameters for key in parameters: if key not in ['from', 'to', 'limit', 'offset', 'checkids', 'contactids', 'status', 'via']: sys.stderr.write('%s not a valid argument for actions()\n' % key) response = self.request('GET', 'actions', parameters) return response.json()['actions']
Returns a list of actions (alerts) that have been generated for your account. Optional Parameters: * from -- Only include actions generated later than this timestamp. Format is UNIX time. Type: Integer Default: None * to -- Only include actions generated prior to this timestamp. Format is UNIX time. Type: Integer Default: None * limit -- Limits the number of returned results to the specified quantity. Type: Integer (max 300) Default: 100 * offset -- Offset for listing. Type: Integer Default: 0 * checkids -- Comma-separated list of check identifiers. Limit results to actions generated from these checks. Type: String Default: All * contactids -- Comma-separated list of contact identifiers. Limit results to actions sent to these contacts. Type: String Default: All * status -- Comma-separated list of statuses. Limit results to actions with these statuses. Type: String ['sent', 'delivered', 'error', 'not_delivered', 'no_credits'] Default: All * via -- Comma-separated list of via mediums. Limit results to actions with these mediums. Type: String ['email', 'sms', 'twitter', 'iphone', 'android'] Default: All Returned structure: { 'alerts' : [ { 'contactname' : <String> Name of alerted contact 'contactid' : <String> Identifier of alerted contact 'checkid' : <String> Identifier of check 'time' : <Integer> Time of alert generation. Format UNIX time 'via' : <String> Alert medium ['email', 'sms', 'twitter', 'iphone', 'android'] 'status' : <String> Alert status ['sent', 'delivered', 'error', 'notdelivered', 'nocredits'] 'messageshort': <String> Short description of message 'messagefull' : <String> Full message body 'sentto' : <String> Target address, phone number, etc 'charged' : <Boolean> True if your account was charged for this message }, ... ] }
entailment
def getChecks(self, **parameters): """Pulls all checks from pingdom Optional Parameters: * limit -- Limits the number of returned probes to the specified quantity. Type: Integer (max 25000) Default: 25000 * offset -- Offset for listing (requires limit.) Type: Integer Default: 0 * tags -- Filter listing by tag/s Type: String Default: None """ # Warn user about unhandled parameters for key in parameters: if key not in ['limit', 'offset', 'tags']: sys.stderr.write('%s not a valid argument for getChecks()\n' % key) response = self.request('GET', 'checks', parameters) return [PingdomCheck(self, x) for x in response.json()['checks']]
Pulls all checks from pingdom Optional Parameters: * limit -- Limits the number of returned probes to the specified quantity. Type: Integer (max 25000) Default: 25000 * offset -- Offset for listing (requires limit.) Type: Integer Default: 0 * tags -- Filter listing by tag/s Type: String Default: None
entailment
def getCheck(self, checkid): """Returns a detailed description of a specified check.""" check = PingdomCheck(self, {'id': checkid}) check.getDetails() return check
Returns a detailed description of a specified check.
entailment
def probes(self, **kwargs): """Returns a list of all Pingdom probe servers Parameters: * limit -- Limits the number of returned probes to the specified quantity Type: Integer * offset -- Offset for listing (requires limit). Type: Integer Default: 0 * onlyactive -- Return only active probes Type: Boolean Default: False * includedeleted -- Include old probes that are no longer in use Type: Boolean Default: False Returned structure: [ { 'id' : <Integer> Unique probe id 'country' : <String> Country 'city' : <String> City 'name' : <String> Name 'active' : <Boolean> True if probe is active 'hostname' : <String> DNS name 'ip' : <String> IP address 'countryiso': <String> Country ISO code }, ... ] """ # Warn user about unhandled parameters for key in kwargs: if key not in ['limit', 'offset', 'onlyactive', 'includedeleted']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of probes()\n') return self.request("GET", "probes", kwargs).json()['probes']
Returns a list of all Pingdom probe servers Parameters: * limit -- Limits the number of returned probes to the specified quantity Type: Integer * offset -- Offset for listing (requires limit). Type: Integer Default: 0 * onlyactive -- Return only active probes Type: Boolean Default: False * includedeleted -- Include old probes that are no longer in use Type: Boolean Default: False Returned structure: [ { 'id' : <Integer> Unique probe id 'country' : <String> Country 'city' : <String> City 'name' : <String> Name 'active' : <Boolean> True if probe is active 'hostname' : <String> DNS name 'ip' : <String> IP address 'countryiso': <String> Country ISO code }, ... ]
entailment
def traceroute(self, host, probeid): """Perform a traceroute to a specified target from a specified Pingdom probe. Provide hostname to check and probeid to check from Returned structure: { 'result' : <String> Traceroute output 'probeid' : <Integer> Probe identifier 'probedescription' : <String> Probe description } """ response = self.request('GET', 'traceroute', {'host': host, 'probeid': probeid}) return response.json()['traceroute']
Perform a traceroute to a specified target from a specified Pingdom probe. Provide hostname to check and probeid to check from Returned structure: { 'result' : <String> Traceroute output 'probeid' : <Integer> Probe identifier 'probedescription' : <String> Probe description }
entailment
def getContacts(self, **kwargs): """Returns a list of all contacts. Optional Parameters: * limit -- Limits the number of returned contacts to the specified quantity. Type: Integer Default: 100 * offset -- Offset for listing (requires limit.) Type: Integer Default: 0 Returned structure: [ 'id' : <Integer> Contact identifier 'name' : <String> Contact name 'email' : <String> Contact email 'cellphone' : <String> Contact telephone 'countryiso' : <String> Cellphone country ISO code 'defaultsmsprovider' : <String> Default SMS provider 'directtwitter' : <Boolean> Send Tweets as direct messages 'twitteruser' : <String> Twitter username 'paused' : <Boolean> True if contact is pasued 'iphonetokens' : <String list> iPhone tokens 'androidtokens' : <String list> android tokens ] """ # Warn user about unhandled parameters for key in kwargs: if key not in ['limit', 'offset']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of getContacts()\n') return [PingdomContact(self, x) for x in self.request("GET", "notification_contacts", kwargs).json()['contacts']]
Returns a list of all contacts. Optional Parameters: * limit -- Limits the number of returned contacts to the specified quantity. Type: Integer Default: 100 * offset -- Offset for listing (requires limit.) Type: Integer Default: 0 Returned structure: [ 'id' : <Integer> Contact identifier 'name' : <String> Contact name 'email' : <String> Contact email 'cellphone' : <String> Contact telephone 'countryiso' : <String> Cellphone country ISO code 'defaultsmsprovider' : <String> Default SMS provider 'directtwitter' : <Boolean> Send Tweets as direct messages 'twitteruser' : <String> Twitter username 'paused' : <Boolean> True if contact is pasued 'iphonetokens' : <String list> iPhone tokens 'androidtokens' : <String list> android tokens ]
entailment
def newContact(self, name, **kwargs): """Create a new contact. Provide new contact name and any optional arguments. Returns new PingdomContact instance Optional Parameters: * email -- Contact email address Type: String * cellphone -- Cellphone number, without the country code part. In some countries you are supposed to exclude leading zeroes. (Requires countrycode and countryiso) Type: String * countrycode -- Cellphone country code (Requires cellphone and countryiso) Type: String * countryiso -- Cellphone country ISO code. For example: US (USA), GB (Britain) or SE (Sweden) (Requires cellphone and countrycode) Type: String * defaultsmsprovider -- Default SMS provider Type: String ['clickatell', 'bulksms', 'esendex', 'cellsynt'] * directtwitter -- Send tweets as direct messages Type: Boolean Default: True * twitteruser -- Twitter user Type: String """ # Warn user about unhandled parameters for key in kwargs: if key not in ['email', 'cellphone', 'countrycode', 'countryiso', 'defaultsmsprovider', 'directtwitter', 'twitteruser']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of newContact()\n') kwargs['name'] = name contactinfo = self.request("POST", "notification_contacts", kwargs).json()['contact'] return PingdomContact(self, contactinfo)
Create a new contact. Provide new contact name and any optional arguments. Returns new PingdomContact instance Optional Parameters: * email -- Contact email address Type: String * cellphone -- Cellphone number, without the country code part. In some countries you are supposed to exclude leading zeroes. (Requires countrycode and countryiso) Type: String * countrycode -- Cellphone country code (Requires cellphone and countryiso) Type: String * countryiso -- Cellphone country ISO code. For example: US (USA), GB (Britain) or SE (Sweden) (Requires cellphone and countrycode) Type: String * defaultsmsprovider -- Default SMS provider Type: String ['clickatell', 'bulksms', 'esendex', 'cellsynt'] * directtwitter -- Send tweets as direct messages Type: Boolean Default: True * twitteruser -- Twitter user Type: String
entailment
def modifyContacts(self, contactids, paused): """Modifies a list of contacts. Provide comma separated list of contact ids and desired paused state Returns status message """ response = self.request("PUT", "notification_contacts", {'contactids': contactids, 'paused': paused}) return response.json()['message']
Modifies a list of contacts. Provide comma separated list of contact ids and desired paused state Returns status message
entailment
def getEmailReports(self): """Returns a list of PingdomEmailReport instances.""" reports = [PingdomEmailReport(self, x) for x in self.request('GET', 'reports.email').json()['subscriptions']] return reports
Returns a list of PingdomEmailReport instances.
entailment
def newEmailReport(self, name, **kwargs): """Creates a new email report Returns status message for operation Optional parameters: * checkid -- Check identifier. If omitted, this will be an overview report Type: Integer * frequency -- Report frequency Type: String ['monthly', 'weekly', 'daily'] * contactids -- Comma separated list of receiving contact identifiers Type: String * additionalemails -- Comma separated list of additional receiving emails Type: String """ # Warn user about unhandled parameters for key in kwargs: if key not in ['checkid', 'frequency', 'contactids', 'additionalemails']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of newEmailReport()\n') parameters = {'name': name} for key, value in kwargs.iteritems(): parameters[key] = value return self.request('POST', 'reports.email', parameters).json()['message']
Creates a new email report Returns status message for operation Optional parameters: * checkid -- Check identifier. If omitted, this will be an overview report Type: Integer * frequency -- Report frequency Type: String ['monthly', 'weekly', 'daily'] * contactids -- Comma separated list of receiving contact identifiers Type: String * additionalemails -- Comma separated list of additional receiving emails Type: String
entailment
def getSharedReports(self): """Returns a list of PingdomSharedReport instances""" response = self.request('GET', 'reports.shared').json()['shared']['banners'] reports = [PingdomSharedReport(self, x) for x in response] return reports
Returns a list of PingdomSharedReport instances
entailment
def newSharedReport(self, checkid, **kwargs): """Create a shared report (banner). Returns status message for operation Optional parameters: * auto -- Automatic period (If false, requires: fromyear, frommonth, fromday, toyear, tomonth, today) Type: Boolean * type -- Banner type Type: String ['uptime', 'response'] * fromyear -- Period start: year Type: Integer * frommonth -- Period start: month Type: Integer * fromday -- Period start: day Type: Integer * toyear -- Period end: year Type: Integer * tomonth -- Period end: month Type: Integer * today -- Period end: day Type: Integer """ # Warn user about unhandled parameters for key in kwargs: if key not in ['auto', 'type', 'fromyear', 'frommonth', 'fromday', 'toyear', 'tomonth', 'today', 'sharedtype']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of newSharedReport()\n') parameters = {'checkid': checkid, 'sharedtype': 'banner'} for key, value in kwargs.iteritems(): parameters[key] = value return self.request('POST', 'reports.shared', parameters).json()['message']
Create a shared report (banner). Returns status message for operation Optional parameters: * auto -- Automatic period (If false, requires: fromyear, frommonth, fromday, toyear, tomonth, today) Type: Boolean * type -- Banner type Type: String ['uptime', 'response'] * fromyear -- Period start: year Type: Integer * frommonth -- Period start: month Type: Integer * fromday -- Period start: day Type: Integer * toyear -- Period end: year Type: Integer * tomonth -- Period end: month Type: Integer * today -- Period end: day Type: Integer
entailment
def encry_decry_cascade(data, masterkey, bool_encry, assoc_data): """ When bool_encry is True, encrypt the data with master key. When it is False, the function extract the three nonce from the encrypted data (first 3*21 bytes), and decrypt the data. :param data: the data to encrypt or decrypt. :param masterkey: a 32 bytes key in bytes. :param bool_encry: if bool_encry is True, data is encrypted. Else, it will be decrypted. :param assoc_data: Additional data added to GCM authentication. :return: if bool_encry is True, corresponding nonce + encrypted data. Else, the decrypted data. """ engine1 = botan.cipher(algo="Serpent/GCM", encrypt=bool_encry) engine2 = botan.cipher(algo="AES-256/GCM", encrypt=bool_encry) engine3 = botan.cipher(algo="Twofish/GCM", encrypt=bool_encry) hash1 = botan.hash_function(algo="SHA-256") hash1.update(masterkey) hashed1 = hash1.final() hash2 = botan.hash_function(algo="SHA-256") hash2.update(hashed1) hashed2 = hash2.final() engine1.set_key(key=masterkey) engine1.set_assoc_data(assoc_data) engine2.set_key(key=hashed1) engine2.set_assoc_data(assoc_data) engine3.set_key(key=hashed2) engine3.set_assoc_data(assoc_data) if bool_encry is True: nonce1 = generate_nonce_timestamp() nonce2 = generate_nonce_timestamp() nonce3 = generate_nonce_timestamp() engine1.start(nonce=nonce1) engine2.start(nonce=nonce2) engine3.start(nonce=nonce3) cipher1 = engine1.finish(data) cipher2 = engine2.finish(cipher1) cipher3 = engine3.finish(cipher2) return nonce1 + nonce2 + nonce3 + cipher3 else: nonce1 = data[:__nonce_length__] nonce2 = data[__nonce_length__:__nonce_length__ * 2] nonce3 = data[__nonce_length__ * 2:__nonce_length__ * 3] encrypteddata = data[__nonce_length__ * 3:] engine1.start(nonce=nonce1) engine2.start(nonce=nonce2) engine3.start(nonce=nonce3) decrypteddata1 = engine3.finish(encrypteddata) if decrypteddata1 == b"": raise Exception("Integrity failure: Invalid passphrase or corrupted data") decrypteddata2 = engine2.finish(decrypteddata1) if decrypteddata2 == b"": raise Exception("Integrity failure: Invalid passphrase or corrupted data") decrypteddata3 = engine1.finish(decrypteddata2) if decrypteddata3 == b"": raise Exception("Integrity failure: Invalid passphrase or corrupted data") else: return decrypteddata3
When bool_encry is True, encrypt the data with master key. When it is False, the function extract the three nonce from the encrypted data (first 3*21 bytes), and decrypt the data. :param data: the data to encrypt or decrypt. :param masterkey: a 32 bytes key in bytes. :param bool_encry: if bool_encry is True, data is encrypted. Else, it will be decrypted. :param assoc_data: Additional data added to GCM authentication. :return: if bool_encry is True, corresponding nonce + encrypted data. Else, the decrypted data.
entailment
def register(self, request_class: Request, handler_factory: Callable[[], Handler]) -> None: """ Register the handler for the command :param request_class: The command or event to dispatch. It must implement getKey() :param handler_factory: A factory method to create the handler to dispatch to :return: """ key = request_class.__name__ is_command = request_class.is_command() is_event = request_class.is_event() is_present = key in self._registry if is_command and is_present: raise ConfigurationException("A handler for this request has already been registered") elif is_event and is_present: self._registry[key].append(handler_factory) elif is_command or is_event: self._registry[key] = [handler_factory]
Register the handler for the command :param request_class: The command or event to dispatch. It must implement getKey() :param handler_factory: A factory method to create the handler to dispatch to :return:
entailment
def lookup(self, request: Request) -> List[Callable[[], Handler]]: """ Looks up the handler associated with a request - matches the key on the request to a registered handler :param request: The request we want to find a handler for :return: """ key = request.__class__.__name__ if key not in self._registry: if request.is_command(): raise ConfigurationException("There is no handler registered for this request") elif request.is_event(): return [] # type: Callable[[] Handler] return self._registry[key]
Looks up the handler associated with a request - matches the key on the request to a registered handler :param request: The request we want to find a handler for :return:
entailment
def register(self, request_class: Request, mapper_func: Callable[[Request], BrightsideMessage]) -> None: """Adds a message mapper to a factory, using the requests key :param mapper_func: A callback that creates a BrightsideMessage from a Request :param request_class: A request type """ key = request_class.__name__ if key not in self._registry: self._registry[key] = mapper_func else: raise ConfigurationException("There is already a message mapper defined for this key; there can be only one")
Adds a message mapper to a factory, using the requests key :param mapper_func: A callback that creates a BrightsideMessage from a Request :param request_class: A request type
entailment
def lookup(self, request_class: Request) -> Callable[[Request], BrightsideMessage]: """ Looks up the message mapper function associated with this class. Function should take in a Request derived class and return a BrightsideMessage derived class, for sending on the wire :param request_class: :return: """ key = request_class.__class__.__name__ if key not in self._registry: raise ConfigurationException("There is no message mapper associated with this key; we require a mapper") else: return self._registry[key]
Looks up the message mapper function associated with this class. Function should take in a Request derived class and return a BrightsideMessage derived class, for sending on the wire :param request_class: :return:
entailment
def instance_of(*args): """ This type validation function can be used in two modes: * providing two arguments (x, ref_type), it returns `True` if isinstance(x, ref_type) and raises a HasWrongType error if not. If ref_type is a set of types, any match with one of the included types will do * providing a single argument (ref_type), this is a function generator. It returns a validation function to check that `instance_of(x, ref_type)`. :param args: :return: """ if len(args) == 2: # Standard mode value, ref_type = args if not isinstance(ref_type, set): # ref_type is a single type if isinstance(value, ref_type): return True else: raise HasWrongType(wrong_value=value, ref_type=ref_type) else: # ref_type is a set match = False # test against each of the provided types for ref in ref_type: if isinstance(value, ref): match = True break if match: return True else: raise HasWrongType(wrong_value=value, ref_type=ref_type, help_msg='Value should be an instance of any of {ref_type}') elif len(args) == 1: # Function generator mode ref_type = args[0] if not isinstance(ref_type, set): # ref_type is a single type def instance_of_ref(x): if isinstance(x, ref_type): return True else: raise HasWrongType(wrong_value=x, ref_type=ref_type) else: # ref_type is a set def instance_of_ref(x): match = False # test against each of the provided types for ref in ref_type: if isinstance(x, ref): match = True break if match: return True else: raise HasWrongType(wrong_value=x, ref_type=ref_type, help_msg='Value should be an instance of any of {ref_type}') instance_of_ref.__name__ = 'instance_of_{}'.format(ref_type) return instance_of_ref else: raise TypeError('instance_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args)))
This type validation function can be used in two modes: * providing two arguments (x, ref_type), it returns `True` if isinstance(x, ref_type) and raises a HasWrongType error if not. If ref_type is a set of types, any match with one of the included types will do * providing a single argument (ref_type), this is a function generator. It returns a validation function to check that `instance_of(x, ref_type)`. :param args: :return:
entailment
def subclass_of(*args): """ This type validation function can be used in two modes: * providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType error if not. If ref_type is a set of types, any match with one of the included types will do * providing a single argument (ref_type), this is a function generator. It returns a validation function to check that `subclass_of(c, ref_type)`. :param args: :return: """ if len(args) == 2: # Standard mode typ, ref_type = args if not isinstance(ref_type, set): # ref_type is a single type if issubclass(typ, ref_type): return True else: raise IsWrongType(wrong_value=typ, ref_type=ref_type) else: # ref_type is a set match = False # test against each of the provided types for ref in ref_type: if issubclass(typ, ref): match = True break if match: return True else: raise IsWrongType(wrong_value=typ, ref_type=ref_type, help_msg='Value should be a subclass of any of {ref_type}') elif len(args) == 1: # Function generator mode ref_type = args[0] if not isinstance(ref_type, set): def subclass_of_ref(x): if issubclass(x, ref_type): return True else: raise IsWrongType(wrong_value=x, ref_type=ref_type) else: # ref_type is a set def subclass_of_ref(x): match = False # test against each of the provided types for ref in ref_type: if issubclass(x, ref): match = True break if match: return True else: raise IsWrongType(wrong_value=x, ref_type=ref_type, help_msg='Value should be a subclass of any of {ref_type}') subclass_of_ref.__name__ = 'subclass_of_{}'.format(ref_type) return subclass_of_ref else: raise TypeError('subclass_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args)))
This type validation function can be used in two modes: * providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType error if not. If ref_type is a set of types, any match with one of the included types will do * providing a single argument (ref_type), this is a function generator. It returns a validation function to check that `subclass_of(c, ref_type)`. :param args: :return:
entailment
def disk_cache(basename, directory, method=False): """ Function decorator for caching pickleable return values on disk. Uses a hash computed from the function arguments for invalidation. If 'method', skip the first argument, usually being self or cls. The cache filepath is 'directory/basename-hash.pickle'. """ directory = os.path.expanduser(directory) ensure_directory(directory) def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): key = (tuple(args), tuple(kwargs.items())) # Don't use self or cls for the invalidation hash. if method and key: key = key[1:] filename = '{}-{}.pickle'.format(basename, hash(key)) filepath = os.path.join(directory, filename) if os.path.isfile(filepath): with open(filepath, 'rb') as handle: return pickle.load(handle) result = func(*args, **kwargs) with open(filepath, 'wb') as handle: pickle.dump(result, handle) return result return wrapped return wrapper
Function decorator for caching pickleable return values on disk. Uses a hash computed from the function arguments for invalidation. If 'method', skip the first argument, usually being self or cls. The cache filepath is 'directory/basename-hash.pickle'.
entailment
def download(url, directory, filename=None): """ Download a file and return its filename on the local file system. If the file is already there, it will not be downloaded again. The filename is derived from the url if not provided. Return the filepath. """ if not filename: _, filename = os.path.split(url) directory = os.path.expanduser(directory) ensure_directory(directory) filepath = os.path.join(directory, filename) if os.path.isfile(filepath): return filepath print('Download', filepath) with urlopen(url) as response, open(filepath, 'wb') as file_: shutil.copyfileobj(response, file_) return filepath
Download a file and return its filename on the local file system. If the file is already there, it will not be downloaded again. The filename is derived from the url if not provided. Return the filepath.
entailment