code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def deserialize(self, data, columns=None): """ Deserializes SON to a DataFrame Parameters ---------- data: SON data columns: None, or list of strings optionally you can deserialize a subset of the data in the SON. Index columns are ALWAYS deserialized, and should not be specified Returns ------- pandas dataframe or series """ if not data: return pd.DataFrame() meta = data[0][METADATA] if isinstance(data, list) else data[METADATA] index = INDEX in meta if columns: if index: columns = columns[:] columns.extend(meta[INDEX]) if len(columns) > len(set(columns)): raise Exception("Duplicate columns specified, cannot de-serialize") if not isinstance(data, list): df = self.converter.objify(data, columns) else: df = pd.concat([self.converter.objify(d, columns) for d in data], ignore_index=not index) if index: df = df.set_index(meta[INDEX]) if meta[TYPE] == 'series': return df[df.columns[0]] return df
Deserializes SON to a DataFrame Parameters ---------- data: SON data columns: None, or list of strings optionally you can deserialize a subset of the data in the SON. Index columns are ALWAYS deserialized, and should not be specified Returns ------- pandas dataframe or series
def __buildDomainRanges(self, aProp): """ extract domain/range details and add to Python objects """ domains = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u'http://schema.org/domainIncludes')), aProp.rdflib_graph.objects( None, rdflib.RDFS.domain)) ranges = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u'http://schema.org/rangeIncludes')), aProp.rdflib_graph.objects( None, rdflib.RDFS.range)) for x in domains: if isBlankNode(x): aProp.domains += [RDF_Entity(x, None, self.namespaces, is_Bnode=True)] else: aClass = self.get_class(uri=str(x)) if aClass: aProp.domains += [aClass] aClass.domain_of += [aProp] else: # edge case: it's not an OntoClass instance aProp.domains += [OntoClass(x, None, self.namespaces, ext_model=True)] for x in ranges: if isBlankNode(x): aProp.domains += [RDF_Entity(x, None, self.namespaces, is_Bnode=True)] else: aClass = self.get_class(uri=str(x)) if aClass: aProp.ranges += [aClass] aClass.range_of += [aProp] else: # eg a DataType property has xsd:STRING # here we're storing an ontospy entities but not adding it to # the main index aProp.ranges += [OntoClass(x, None, self.namespaces, ext_model=True)]
extract domain/range details and add to Python objects
def force_bytes(s, encoding='utf-8', errors='strict'): """A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) else: return s.encode(encoding, errors)
A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
def schema(ctx, schema): """ Load schema definitions from a YAML file. """ data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj['grano'].schemata.upsert(schema)
Load schema definitions from a YAML file.
def check_profile_id(self, profile_name: str) -> Profile: """ Consult locally stored ID of profile with given name, check whether ID matches and whether name has changed and return current name of the profile, and store ID of profile. :param profile_name: Profile name :return: Instance of current profile """ profile = None with suppress(ProfileNotExistsException): profile = Profile.from_username(self.context, profile_name) profile_exists = profile is not None id_filename = self._get_id_filename(profile_name) try: with open(id_filename, 'rb') as id_file: profile_id = int(id_file.read()) if (not profile_exists) or \ (profile_id != profile.userid): if profile_exists: self.context.log("Profile {0} does not match the stored unique ID {1}.".format(profile_name, profile_id)) else: self.context.log("Trying to find profile {0} using its unique ID {1}.".format(profile_name, profile_id)) profile_from_id = Profile.from_id(self.context, profile_id) newname = profile_from_id.username self.context.log("Profile {0} has changed its name to {1}.".format(profile_name, newname)) if ((format_string_contains_key(self.dirname_pattern, 'profile') or format_string_contains_key(self.dirname_pattern, 'target'))): os.rename(self.dirname_pattern.format(profile=profile_name.lower(), target=profile_name.lower()), self.dirname_pattern.format(profile=newname.lower(), target=newname.lower())) else: os.rename('{0}/{1}_id'.format(self.dirname_pattern.format(), profile_name.lower()), '{0}/{1}_id'.format(self.dirname_pattern.format(), newname.lower())) return profile_from_id return profile except (FileNotFoundError, ValueError): pass if profile_exists: self.save_profile_id(profile) return profile raise ProfileNotExistsException("Profile {0} does not exist.".format(profile_name))
Consult locally stored ID of profile with given name, check whether ID matches and whether name has changed and return current name of the profile, and store ID of profile. :param profile_name: Profile name :return: Instance of current profile
def copyPropList(self, cur): """Do a copy of an attribute list. """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlCopyPropList(self._o, cur__o) if ret is None:raise treeError('xmlCopyPropList() failed') __tmp = xmlAttr(_obj=ret) return __tmp
Do a copy of an attribute list.
def associate_failure_node(self, parent, child=None, **kwargs): """Add a node to run on failure. =====API DOCS===== Add a node to run on failure. :param parent: Primary key of parent node to associate failure node to. :type parent: int :param child: Primary key of child node to be associated. :type child: int :param `**kwargs`: Fields used to create child node if ``child`` is not provided. :returns: Dictionary of only one key "changed", which indicates whether the association succeeded. :rtype: dict =====API DOCS===== """ return self._assoc_or_create('failure', parent, child, **kwargs)
Add a node to run on failure. =====API DOCS===== Add a node to run on failure. :param parent: Primary key of parent node to associate failure node to. :type parent: int :param child: Primary key of child node to be associated. :type child: int :param `**kwargs`: Fields used to create child node if ``child`` is not provided. :returns: Dictionary of only one key "changed", which indicates whether the association succeeded. :rtype: dict =====API DOCS=====
def spl_json(self): """Private method. May be removed at any time.""" _splj = {} _splj["type"] = self._type _splj["value"] = self._value return _splj
Private method. May be removed at any time.
def delete_dependency(self, from_task_name, to_task_name): """ Delete a dependency between two tasks. """ logger.debug('Deleting dependency from {0} to {1}'.format(from_task_name, to_task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) self.delete_edge(from_task_name, to_task_name) self.commit()
Delete a dependency between two tasks.
def cached_value(self, source_file, configuration): """Return the cached declarations or None. :param source_file: Header file name :type source_file: str :param configuration: Configuration object :type configuration: :class:`parser.xml_generator_configuration_t` :rtype: Cached declarations or None """ # Check if the cache contains an entry for source_file key = self._create_cache_key(source_file) entry = self.__index.get(key) if entry is None: # print "CACHE: %s: Not cached"%source_file return None # Check if the entry is still valid. It is not valid if: # - the source_file has been updated # - the configuration object has changed (i.e. the header is parsed # by gccxml with different settings which may influence the # declarations) # - the included files have been updated # (this list is part of the cache entry as it cannot be known # by the caller when cached_value() is called. It was instead # passed to update()) # Check if the config is different... configsig = self._create_config_signature(configuration) if configsig != entry.configsig: # print "CACHE: %s: Config mismatch"%source_file return None # Check if any of the dependent files has been modified... for id_, sig in entry.filesigs: if self.__filename_rep.is_file_modified(id_, sig): # print "CACHE: %s: Entry not up to date"%source_file return None # Load and return the cached declarations cachefilename = self._create_cache_filename(source_file) decls = self._read_file(cachefilename) # print "CACHE: Using cached decls for",source_file return decls
Return the cached declarations or None. :param source_file: Header file name :type source_file: str :param configuration: Configuration object :type configuration: :class:`parser.xml_generator_configuration_t` :rtype: Cached declarations or None
def restore(self, fade=False): """Restore the state of a device to that which was previously saved. For coordinator devices restore everything. For slave devices only restore volume etc., not transport info (transport info comes from the slave's coordinator). Args: fade (bool): Whether volume should be faded up on restore. """ if self.is_coordinator: # Start by ensuring that the speaker is paused as we don't want # things all rolling back when we are changing them, as this could # include things like audio transport_info = self.device.get_current_transport_info() if transport_info is not None: if transport_info['current_transport_state'] == 'PLAYING': self.device.pause() # Check if the queue should be restored self._restore_queue() # Reinstate what was playing if self.is_playing_queue and self.playlist_position > 0: # was playing from playlist if self.playlist_position is not None: # The position in the playlist returned by # get_current_track_info starts at 1, but when # playing from playlist, the index starts at 0 # if position > 0: self.playlist_position -= 1 self.device.play_from_queue(self.playlist_position, False) if self.track_position is not None: if self.track_position != "": self.device.seek(self.track_position) # reinstate track, position, play mode, cross fade # Need to make sure there is a proper track selected first self.device.play_mode = self.play_mode self.device.cross_fade = self.cross_fade elif self.is_playing_cloud_queue: # was playing a cloud queue started by Alexa # No way yet to re-start this so prevent it throwing an error! pass else: # was playing a stream (radio station, file, or nothing) # reinstate uri and meta data if self.media_uri != "": self.device.play_uri( self.media_uri, self.media_metadata, start=False) # For all devices: # Reinstate all the properties that are pretty easy to do self.device.mute = self.mute self.device.bass = self.bass self.device.treble = self.treble self.device.loudness = self.loudness # Reinstate volume # Can only change volume on device with fixed volume set to False # otherwise get uPnP error, so check first. Before issuing a network # command to check, fixed volume always has volume set to 100. # So only checked fixed volume if volume is 100. if self.volume == 100: fixed_vol = self.device.renderingControl.GetOutputFixed( [('InstanceID', 0)])['CurrentFixed'] else: fixed_vol = False # now set volume if not fixed if not fixed_vol: if fade: # if fade requested in restore # set volume to 0 then fade up to saved volume (non blocking) self.device.volume = 0 self.device.ramp_to_volume(self.volume) else: # set volume self.device.volume = self.volume # Now everything is set, see if we need to be playing, stopped # or paused ( only for coordinators) if self.is_coordinator: if self.transport_state == 'PLAYING': self.device.play() elif self.transport_state == 'STOPPED': self.device.stop()
Restore the state of a device to that which was previously saved. For coordinator devices restore everything. For slave devices only restore volume etc., not transport info (transport info comes from the slave's coordinator). Args: fade (bool): Whether volume should be faded up on restore.
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN out_strings = [] for i in range(tot_strings): out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN] out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE)) return out_strings
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
def fit_model(ts, sc=None): """ Fits an AR(1) + GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a AR+GARCH model as a Numpy array Returns an ARGARCH model """ assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.ARGARCH.fitModel(_py2java(sc, Vectors.dense(ts))) return ARGARCHModel(jmodel=jmodel, sc=sc)
Fits an AR(1) + GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a AR+GARCH model as a Numpy array Returns an ARGARCH model
def create_producer(self): """Context manager that yields an instance of ``Producer``.""" with self.connection_pool.acquire(block=True) as conn: yield self.producer(conn)
Context manager that yields an instance of ``Producer``.
def install_vendored(cls, prefix, root=None, expose=None): """Install an importer for all vendored code with the given import prefix. All distributions listed in ``expose`` will also be made available for import in direct, un-prefixed form. :param str prefix: The import prefix the installed importer will be responsible for. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param expose: Optional names of distributions to expose for direct, un-prefixed import. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. """ from pex import vendor root = cls._abs_root(root) vendored_path_items = [spec.relpath for spec in vendor.iter_vendor_specs()] installed = list(cls._iter_installed_vendor_importers(prefix, root, vendored_path_items)) assert len(installed) <= 1, ( 'Unexpected extra importers installed for vendored code:\n\t{}' .format('\n\t'.join(map(str, installed))) ) if installed: vendor_importer = installed[0] else: # Install all vendored code for pex internal access to it through the vendor import `prefix`. vendor_importer = cls.install(uninstallable=True, prefix=prefix, path_items=vendored_path_items, root=root) if expose: # But only expose the bits needed. exposed_paths = [] for path in cls.expose(expose, root): sys.path.insert(0, path) exposed_paths.append(os.path.relpath(path, root)) vendor_importer._expose(exposed_paths)
Install an importer for all vendored code with the given import prefix. All distributions listed in ``expose`` will also be made available for import in direct, un-prefixed form. :param str prefix: The import prefix the installed importer will be responsible for. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param expose: Optional names of distributions to expose for direct, un-prefixed import. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found.
def reference_preprocessing(job, config): """ Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace """ job.fileStore.logToMaster('Preparing Reference Files') genome_id = config.genome_fasta if getattr(config, 'genome_fai', None) is None: config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id, cores=config.cores).rv() if getattr(config, 'genome_dict', None) is None: config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary, genome_id, cores=config.cores, memory=config.xmx).rv() return config
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace
def remove(name, path): ''' Removes installed alternative for defined <name> and <path> or fallback to default alternative, if some defined before. name is the master name for this link group (e.g. pager) path is the location of one of the alternative target files. (e.g. /usr/bin/less) ''' ret = {'name': name, 'path': path, 'result': True, 'changes': {}, 'comment': ''} isinstalled = __salt__['alternatives.check_exists'](name, path) if isinstalled: if __opts__['test']: ret['comment'] = ('Alternative for {0} will be removed' .format(name)) ret['result'] = None return ret __salt__['alternatives.remove'](name, path) current = __salt__['alternatives.show_current'](name) if current: ret['result'] = True ret['comment'] = ( 'Alternative for {0} removed. Falling back to path {1}' ).format(name, current) ret['changes'] = {'path': current} return ret ret['comment'] = 'Alternative for {0} removed'.format(name) ret['changes'] = {} return ret current = __salt__['alternatives.show_current'](name) if current: ret['result'] = True ret['comment'] = ( 'Alternative for {0} is set to it\'s default path {1}' ).format(name, current) return ret ret['result'] = False ret['comment'] = ( 'Alternative for {0} doesn\'t exist' ).format(name) return ret
Removes installed alternative for defined <name> and <path> or fallback to default alternative, if some defined before. name is the master name for this link group (e.g. pager) path is the location of one of the alternative target files. (e.g. /usr/bin/less)
def _call_function(name, returner=None, **kwargs): ''' Calls a function from the specified module. :param name: :param kwargs: :return: ''' argspec = salt.utils.args.get_function_argspec(__salt__[name]) # func_kw is initialized to a dictionary of keyword arguments the function to be run accepts func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code argspec.defaults or [])) # func_args is initialized to a list of positional arguments that the function to be run accepts func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])] arg_type, kw_to_arg_type, na_type, kw_type = [], {}, {}, False for funcset in reversed(kwargs.get('func_args') or []): if not isinstance(funcset, dict): # We are just receiving a list of args to the function to be run, so just append # those to the arg list that we will pass to the func. arg_type.append(funcset) else: for kwarg_key in six.iterkeys(funcset): # We are going to pass in a keyword argument. The trick here is to make certain # that if we find that in the *args* list that we pass it there and not as a kwarg if kwarg_key in func_args: kw_to_arg_type[kwarg_key] = funcset[kwarg_key] continue else: # Otherwise, we're good and just go ahead and pass the keyword/value pair into # the kwargs list to be run. func_kw.update(funcset) arg_type.reverse() for arg in func_args: if arg in kw_to_arg_type: arg_type.append(kw_to_arg_type[arg]) _exp_prm = len(argspec.args or []) - len(argspec.defaults or []) _passed_prm = len(arg_type) missing = [] if na_type and _exp_prm > _passed_prm: for arg in argspec.args: if arg not in func_kw: missing.append(arg) if missing: raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing))) elif _exp_prm > _passed_prm: raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format( _exp_prm, _passed_prm)) mret = __salt__[name](*arg_type, **func_kw) if returner is not None: returners = salt.loader.returners(__opts__, __salt__) if returner in returners: returners[returner]({'id': __opts__['id'], 'ret': mret, 'fun': name, 'jid': salt.utils.jid.gen_jid(__opts__)}) return mret
Calls a function from the specified module. :param name: :param kwargs: :return:
def show_history(self, status=None, nids=None, full_history=False, metadata=False): """ Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental) """ nrows, ncols = get_terminal_size() works_done = [] # Loop on the tasks and show the history of the work is not in works_done for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) # Print the history of the flow. if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts) print(self.history.to_string(metadata=metadata))
Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental)
def unpack_response(file_information_class, buffer): """ Pass in the buffer value from the response object to unpack it and return a list of query response structures for the request. :param buffer: The raw bytes value of the SMB2QueryDirectoryResponse buffer field. :return: List of query_info.* structures based on the FileInformationClass used in the initial query request. """ structs = smbprotocol.query_info resp_structure = { FileInformationClass.FILE_DIRECTORY_INFORMATION: structs.FileDirectoryInformation, FileInformationClass.FILE_NAMES_INFORMATION: structs.FileNamesInformation, FileInformationClass.FILE_BOTH_DIRECTORY_INFORMATION: structs.FileBothDirectoryInformation, FileInformationClass.FILE_ID_BOTH_DIRECTORY_INFORMATION: structs.FileIdBothDirectoryInformation, FileInformationClass.FILE_FULL_DIRECTORY_INFORMATION: structs.FileFullDirectoryInformation, FileInformationClass.FILE_ID_FULL_DIRECTORY_INFORMATION: structs.FileIdFullDirectoryInformation, }[file_information_class] query_results = [] current_offset = 0 is_next = True while is_next: result = resp_structure() result.unpack(buffer[current_offset:]) query_results.append(result) current_offset += result['next_entry_offset'].get_value() is_next = result['next_entry_offset'].get_value() != 0 return query_results
Pass in the buffer value from the response object to unpack it and return a list of query response structures for the request. :param buffer: The raw bytes value of the SMB2QueryDirectoryResponse buffer field. :return: List of query_info.* structures based on the FileInformationClass used in the initial query request.
def _float(text): """Fonction to convert the 'decimal point assumed' format of TLE to actual float >>> _float('0000+0') 0.0 >>> _float('+0000+0') 0.0 >>> _float('34473-3') 0.00034473 >>> _float('-60129-4') -6.0129e-05 >>> _float('+45871-4') 4.5871e-05 """ text = text.strip() if text[0] in ('-', '+'): text = "%s.%s" % (text[0], text[1:]) else: text = "+.%s" % text if "+" in text[1:] or "-" in text[1:]: value, exp_sign, expo = text.rpartition('+') if '+' in text[1:] else text.rpartition('-') v = float('{value}e{exp_sign}{expo}'.format(value=value, exp_sign=exp_sign, expo=expo)) else: v = float(text) return v
Fonction to convert the 'decimal point assumed' format of TLE to actual float >>> _float('0000+0') 0.0 >>> _float('+0000+0') 0.0 >>> _float('34473-3') 0.00034473 >>> _float('-60129-4') -6.0129e-05 >>> _float('+45871-4') 4.5871e-05
def sym(self, nested_scope=None): """Return the correspond symbolic number.""" operation = self.children[0].operation() expr = self.children[1].sym(nested_scope) return operation(expr)
Return the correspond symbolic number.
def dns_get_conf(self, domainName, environment): """ Returns the existing domain configuration and token from the ADNS """ response = self.client.service.dns_get_conf(domainName, environment) dns_config = CotendoDNS(response) return dns_config
Returns the existing domain configuration and token from the ADNS
def validate(self, value, model=None, context=None): """ Validate Perform value validation and return result :param value: value to check :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult """ regex = self.regex() match = regex.match(value) if not match: return Error(self.not_email) # success otherwise return Error()
Validate Perform value validation and return result :param value: value to check :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult
def setProduct(self, cache=False, *args, **kwargs): """Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu. """ if cache: try: prods = self.allmambuproductsclass(*args, **kwargs) except AttributeError as ae: from .mambuproduct import AllMambuProducts self.allmambuproductsclass = AllMambuProducts prods = self.allmambuproductsclass(*args, **kwargs) for prod in prods: if prod['encodedKey'] == self['productTypeKey']: self['product'] = prod try: # asked for cache, but cache was originally empty prods.noinit except AttributeError: return 1 return 0 try: product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) except AttributeError as ae: from .mambuproduct import MambuProduct self.mambuproductclass = MambuProduct product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) self['product'] = product return 1
Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu.
def _edit_tags(self, tag, items, locked=True, remove=False): """ Helper to edit and refresh a tags. Parameters: tag (str): tag name items (list): list of tags to add locked (bool): lock this field. remove (bool): If this is active remove the tags in items. """ if not isinstance(items, list): items = [items] value = getattr(self, tag + 's') existing_cols = [t.tag for t in value if t and remove is False] d = tag_helper(tag, existing_cols + items, locked, remove) self.edit(**d) self.refresh()
Helper to edit and refresh a tags. Parameters: tag (str): tag name items (list): list of tags to add locked (bool): lock this field. remove (bool): If this is active remove the tags in items.
def add_edge(self, source, target): """Returns a new edge connecting source and target vertices. Args: source: The source Vertex. target: The target Vertex. Returns: A new Edge linking source to target. """ edge = Edge(len(self.edges)) self.edges.append(edge) source.out_edges.append(edge.idx) target.in_edges.append(edge.idx) edge.source = source.idx edge.target = target.idx return edge
Returns a new edge connecting source and target vertices. Args: source: The source Vertex. target: The target Vertex. Returns: A new Edge linking source to target.
def encrypt(api_context, request_bytes, custom_headers): """ :type api_context: bunq.sdk.context.ApiContext :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: bytes """ key = Random.get_random_bytes(_AES_KEY_SIZE) iv = Random.get_random_bytes(_BLOCK_SIZE) _add_header_client_encryption_key(api_context, key, custom_headers) _add_header_client_encryption_iv(iv, custom_headers) request_bytes = _encrypt_request_bytes(request_bytes, key, iv) _add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers) return request_bytes
:type api_context: bunq.sdk.context.ApiContext :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: bytes
def absorb(self, trits, offset=0, length=None): # type: (Sequence[int], Optional[int], Optional[int]) -> None """ Absorb trits into the sponge. :param trits: Sequence of trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``. """ pad = ((len(trits) % HASH_LENGTH) or HASH_LENGTH) trits += [0] * (HASH_LENGTH - pad) if length is None: length = len(trits) if length < 1: raise with_context( exc=ValueError('Invalid length passed to ``absorb``.'), context={ 'trits': trits, 'offset': offset, 'length': length, }, ) # Copy trits from ``trits`` into internal state, one hash at a # time, transforming internal state in between hashes. while offset < length: start = offset stop = min(start + HASH_LENGTH, length) # Copy the next hash worth of trits to internal state. # # Note that we always copy the trits to the start of the # state. ``self._state`` is 3 hashes long, but only the # first hash is "public"; the other 2 are only accessible to # :py:meth:`_transform`. self._state[0:stop - start] = trits[start:stop] # Transform. self._transform() # Move on to the next hash. offset += HASH_LENGTH
Absorb trits into the sponge. :param trits: Sequence of trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``.
def check_api_key(request, key, hproPk): """Check if an API key is valid""" if settings.PIAPI_STANDALONE: return True (_, _, hproject) = getPlugItObject(hproPk) if not hproject: return False if hproject.plugItApiKey is None or hproject.plugItApiKey == '': return False return hproject.plugItApiKey == key
Check if an API key is valid
def create(self, project, title, href, **attrs): """ Create a new :class:`WikiLink` :param project: :class:`Project` id :param title: title of the wiki link :param href: href for the wiki link :param attrs: optional attributes for the :class:`WikiLink` """ attrs.update({'project': project, 'title': title, 'href': href}) return self._new_resource(payload=attrs)
Create a new :class:`WikiLink` :param project: :class:`Project` id :param title: title of the wiki link :param href: href for the wiki link :param attrs: optional attributes for the :class:`WikiLink`
def s2p(self): """Return 2 proton separation energy""" M_P = 7.28897050 # proton mass excess in MeV f = lambda parent, daugther: -parent + daugther + 2 * M_P return self.derived('s2p', (-2, 0), f)
Return 2 proton separation energy
def convertDict2Attrs(self, *args, **kwargs): """The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Loan (or your own itemclass) object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuLoan (or your own itemclass) just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each itemclass, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list. """ for n,l in enumerate(self.attrs): # ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE! try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: loan = self.mambuloanclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambuloanclass = self.itemclass loan = self.mambuloanclass(urlfunc=None, entid=None, *args, **kwargs) loan.init(l, *args, **kwargs) self.attrs[n] = loan
The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Loan (or your own itemclass) object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuLoan (or your own itemclass) just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each itemclass, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list.
def intersect(self, other): """ Makes a striplog of all intersections. Args: Striplog. The striplog instance to intersect with. Returns: Striplog. The result of the intersection. """ if not isinstance(other, self.__class__): m = "You can only intersect striplogs with each other." raise StriplogError(m) result = [] for iv in self: for jv in other: try: result.append(iv.intersect(jv)) except IntervalError: # The intervals don't overlap pass return Striplog(result)
Makes a striplog of all intersections. Args: Striplog. The striplog instance to intersect with. Returns: Striplog. The result of the intersection.
def fetchMore(self, index): '''Fetch additional data under *index*.''' sourceModel = self.sourceModel() if not sourceModel: return False return sourceModel.fetchMore(self.mapToSource(index))
Fetch additional data under *index*.
def parse_response(response): """ parse response and return a dictionary if the content type. is json/application. :param response: HTTPRequest :return dictionary for json content type otherwise response body """ if response.headers.get('Content-Type', JSON_TYPE).startswith(JSON_TYPE): return ResponseObject(json.loads(response.body)) else: return response.body
parse response and return a dictionary if the content type. is json/application. :param response: HTTPRequest :return dictionary for json content type otherwise response body
def rm_r(sftp, path): """Recursively delete contents of path https://stackoverflow.com/a/23256181 """ files = sftp.listdir(path) for f in files: filepath = os.path.join(path, f) logger.info('Deleting: %s' % (filepath)) try: sftp.remove(filepath) except IOError: rm_r(sftp, filepath)
Recursively delete contents of path https://stackoverflow.com/a/23256181
def shell(command, *args): '''Pass a command into the shell.''' if args: command = command.format(*args) print LOCALE['shell'].format(command) try: return subprocess.check_output(command, shell=True) except subprocess.CalledProcessError, ex: return ex
Pass a command into the shell.
def get_typecast(self): """Returns the typecast or ``None`` of this object as a string.""" midx, marker = self.token_next_by(m=(T.Punctuation, '::')) nidx, next_ = self.token_next(midx, skip_ws=False) return next_.value if next_ else None
Returns the typecast or ``None`` of this object as a string.
def cumprod_to_tensor_axis(self, cumprod): """Maximum tensor axis i such that self.cumprod[i] == cumprod, or None.""" try: return len(self) - 1 - self.cumprod[::-1].index(cumprod) except ValueError: return None
Maximum tensor axis i such that self.cumprod[i] == cumprod, or None.
def get_file_info(hash, context=None): """Returns information about the file, identified by ``hash``. If the `context` (an ident-hash) is supplied, the information returned will be specific to that context. """ if context is None: stmt = _get_sql('get-file-info.sql') args = dict(hash=hash) else: stmt = _get_sql('get-file-info-in-context.sql') id, version = get_id_n_version(context) args = dict(hash=hash, id=id, version=version) with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute(stmt, args) try: filename, media_type = cursor.fetchone() except TypeError: raise FileNotFound(hash) return filename, media_type
Returns information about the file, identified by ``hash``. If the `context` (an ident-hash) is supplied, the information returned will be specific to that context.
def load_xml(self, xmlfile, **kwargs): """Load sources from an XML file.""" extdir = kwargs.get('extdir', self.extdir) coordsys = kwargs.get('coordsys', 'CEL') if not os.path.isfile(xmlfile): xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile) root = ElementTree.ElementTree(file=xmlfile).getroot() diffuse_srcs = [] srcs = [] ra, dec = [], [] for s in root.findall('source'): src = Source.create_from_xml(s, extdir=extdir) if src.diffuse: diffuse_srcs += [src] else: srcs += [src] ra += [src['RAJ2000']] dec += [src['DEJ2000']] src_skydir = SkyCoord(ra=np.array(ra) * u.deg, dec=np.array(dec) * u.deg) radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T glonlat = np.vstack((src_skydir.galactic.l.deg, src_skydir.galactic.b.deg)).T offset = self.skydir.separation(src_skydir).deg offset_cel = wcs_utils.sky_to_offset(self.skydir, radec[:, 0], radec[:, 1], 'CEL') offset_gal = wcs_utils.sky_to_offset(self.skydir, glonlat[:, 0], glonlat[:, 1], 'GAL') m0 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius']) m1 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius_roi'], square=True, coordsys=coordsys) m = (m0 & m1) srcs = np.array(srcs)[m] for i, s in enumerate(srcs): s.data['offset'] = offset[m][i] s.data['offset_ra'] = offset_cel[:, 0][m][i] s.data['offset_dec'] = offset_cel[:, 1][m][i] s.data['offset_glon'] = offset_gal[:, 0][m][i] s.data['offset_glat'] = offset_gal[:, 1][m][i] self.load_source(s, False, merge_sources=self.config['merge_sources']) for i, s in enumerate(diffuse_srcs): self.load_source(s, False, merge_sources=self.config['merge_sources']) self._build_src_index() return srcs
Load sources from an XML file.
def build_kernel_to_data(self, Y, knn=None, bandwidth=None, bandwidth_scale=None): """Build a kernel from new input data `Y` to the `self.data` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions knn : `int` or `None`, optional (default: `None`) If `None`, defaults to `self.knn` bandwidth : `float`, `callable`, or `None`, optional (default: `None`) If `None`, defaults to `self.bandwidth` bandwidth_scale : `float`, optional (default : `None`) Rescaling factor for bandwidth. If `None`, defaults to self.bandwidth_scale Returns ------- K_yx: array-like, [n_samples_y, n_samples] kernel matrix where each row represents affinities of a single sample in `Y` to all samples in `self.data`. Raises ------ ValueError: if the supplied data is the wrong shape """ if knn is None: knn = self.knn if bandwidth is None: bandwidth = self.bandwidth if bandwidth_scale is None: bandwidth_scale = self.bandwidth_scale if knn > self.data.shape[0]: warnings.warn("Cannot set knn ({k}) to be greater than " "n_samples ({n}). Setting knn={n}".format( k=knn, n=self.data.shape[0])) Y = self._check_extension_shape(Y) tasklogger.log_start("KNN search") if self.decay is None or self.thresh == 1: # binary connectivity matrix K = self.knn_tree.kneighbors_graph( Y, n_neighbors=knn, mode='connectivity') tasklogger.log_complete("KNN search") else: # sparse fast alpha decay knn_tree = self.knn_tree search_knn = min(knn * 20, self.data_nu.shape[0]) distances, indices = knn_tree.kneighbors( Y, n_neighbors=search_knn) self._check_duplicates(distances, indices) tasklogger.log_complete("KNN search") tasklogger.log_start("affinities") if bandwidth is None: bandwidth = distances[:, knn - 1] bandwidth = bandwidth * bandwidth_scale radius = bandwidth * np.power(-1 * np.log(self.thresh), 1 / self.decay) update_idx = np.argwhere( np.max(distances, axis=1) < radius).reshape(-1) tasklogger.log_debug("search_knn = {}; {} remaining".format( search_knn, len(update_idx))) if len(update_idx) > 0: distances = [d for d in distances] indices = [i for i in indices] while len(update_idx) > Y.shape[0] // 10 and \ search_knn < self.data_nu.shape[0] / 2: # increase the knn search search_knn = min(search_knn * 20, self.data_nu.shape[0]) dist_new, ind_new = knn_tree.kneighbors( Y[update_idx], n_neighbors=search_knn) for i, idx in enumerate(update_idx): distances[idx] = dist_new[i] indices[idx] = ind_new[i] update_idx = [i for i, d in enumerate(distances) if np.max(d) < (radius if isinstance(bandwidth, numbers.Number) else radius[i])] tasklogger.log_debug("search_knn = {}; {} remaining".format( search_knn, len(update_idx))) if search_knn > self.data_nu.shape[0] / 2: knn_tree = NearestNeighbors( search_knn, algorithm='brute', n_jobs=self.n_jobs).fit(self.data_nu) if len(update_idx) > 0: tasklogger.log_debug( "radius search on {}".format(len(update_idx))) # give up - radius search dist_new, ind_new = knn_tree.radius_neighbors( Y[update_idx, :], radius=radius if isinstance(bandwidth, numbers.Number) else np.max(radius[update_idx])) for i, idx in enumerate(update_idx): distances[idx] = dist_new[i] indices[idx] = ind_new[i] if isinstance(bandwidth, numbers.Number): data = np.concatenate(distances) / bandwidth else: data = np.concatenate([distances[i] / bandwidth[i] for i in range(len(distances))]) indices = np.concatenate(indices) indptr = np.concatenate( [[0], np.cumsum([len(d) for d in distances])]) K = sparse.csr_matrix((data, indices, indptr), shape=(Y.shape[0], self.data_nu.shape[0])) K.data = np.exp(-1 * np.power(K.data, self.decay)) # handle nan K.data = np.where(np.isnan(K.data), 1, K.data) # TODO: should we zero values that are below thresh? K.data[K.data < self.thresh] = 0 K = K.tocoo() K.eliminate_zeros() K = K.tocsr() tasklogger.log_complete("affinities") return K
Build a kernel from new input data `Y` to the `self.data` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions knn : `int` or `None`, optional (default: `None`) If `None`, defaults to `self.knn` bandwidth : `float`, `callable`, or `None`, optional (default: `None`) If `None`, defaults to `self.bandwidth` bandwidth_scale : `float`, optional (default : `None`) Rescaling factor for bandwidth. If `None`, defaults to self.bandwidth_scale Returns ------- K_yx: array-like, [n_samples_y, n_samples] kernel matrix where each row represents affinities of a single sample in `Y` to all samples in `self.data`. Raises ------ ValueError: if the supplied data is the wrong shape
def isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType): """ Parameters: - db_name - tbl_name - part_vals - eventType """ self.send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType) return self.recv_isPartitionMarkedForEvent()
Parameters: - db_name - tbl_name - part_vals - eventType
def create_client_from_env(username=None, api_key=None, endpoint_url=None, timeout=None, auth=None, config_file=None, proxy=None, user_agent=None, transport=None, verify=True): """Creates a SoftLayer API client using your environment. Settings are loaded via keyword arguments, environemtal variables and config file. :param username: an optional API username if you wish to bypass the package's built-in username :param api_key: an optional API key if you wish to bypass the package's built in API key :param endpoint_url: the API endpoint base URL you wish to connect to. Set this to API_PRIVATE_ENDPOINT to connect via SoftLayer's private network. :param proxy: proxy to be used to make API calls :param integer timeout: timeout for API requests :param auth: an object which responds to get_headers() to be inserted into the xml-rpc headers. Example: `BasicAuthentication` :param config_file: A path to a configuration file used to load settings :param user_agent: an optional User Agent to report when making API calls if you wish to bypass the packages built in User Agent string :param transport: An object that's callable with this signature: transport(SoftLayer.transports.Request) :param bool verify: decide to verify the server's SSL/TLS cert. DO NOT SET TO FALSE WITHOUT UNDERSTANDING THE IMPLICATIONS. Usage: >>> import SoftLayer >>> client = SoftLayer.create_client_from_env() >>> resp = client.call('Account', 'getObject') >>> resp['companyName'] 'Your Company' """ settings = config.get_client_settings(username=username, api_key=api_key, endpoint_url=endpoint_url, timeout=timeout, proxy=proxy, verify=verify, config_file=config_file) if transport is None: url = settings.get('endpoint_url') if url is not None and '/rest' in url: # If this looks like a rest endpoint, use the rest transport transport = transports.RestTransport( endpoint_url=settings.get('endpoint_url'), proxy=settings.get('proxy'), timeout=settings.get('timeout'), user_agent=user_agent, verify=verify, ) else: # Default the transport to use XMLRPC transport = transports.XmlRpcTransport( endpoint_url=settings.get('endpoint_url'), proxy=settings.get('proxy'), timeout=settings.get('timeout'), user_agent=user_agent, verify=verify, ) # If we have enough information to make an auth driver, let's do it if auth is None and settings.get('username') and settings.get('api_key'): # NOTE(kmcdonald): some transports mask other transports, so this is # a way to find the 'real' one real_transport = getattr(transport, 'transport', transport) if isinstance(real_transport, transports.XmlRpcTransport): auth = slauth.BasicAuthentication( settings.get('username'), settings.get('api_key'), ) elif isinstance(real_transport, transports.RestTransport): auth = slauth.BasicHTTPAuthentication( settings.get('username'), settings.get('api_key'), ) return BaseClient(auth=auth, transport=transport)
Creates a SoftLayer API client using your environment. Settings are loaded via keyword arguments, environemtal variables and config file. :param username: an optional API username if you wish to bypass the package's built-in username :param api_key: an optional API key if you wish to bypass the package's built in API key :param endpoint_url: the API endpoint base URL you wish to connect to. Set this to API_PRIVATE_ENDPOINT to connect via SoftLayer's private network. :param proxy: proxy to be used to make API calls :param integer timeout: timeout for API requests :param auth: an object which responds to get_headers() to be inserted into the xml-rpc headers. Example: `BasicAuthentication` :param config_file: A path to a configuration file used to load settings :param user_agent: an optional User Agent to report when making API calls if you wish to bypass the packages built in User Agent string :param transport: An object that's callable with this signature: transport(SoftLayer.transports.Request) :param bool verify: decide to verify the server's SSL/TLS cert. DO NOT SET TO FALSE WITHOUT UNDERSTANDING THE IMPLICATIONS. Usage: >>> import SoftLayer >>> client = SoftLayer.create_client_from_env() >>> resp = client.call('Account', 'getObject') >>> resp['companyName'] 'Your Company'
def splice(self, mark, newdata): """Replace the data after the marked location with the specified data.""" self.jump_to(mark) self._data = self._data[:self._offset] + bytearray(newdata)
Replace the data after the marked location with the specified data.
def _verify_run(out, cmd=None): ''' Crash to the log if command execution was not successful. ''' if out.get('retcode', 0) and out['stderr']: if cmd: log.debug('Command: \'%s\'', cmd) log.debug('Return code: %s', out.get('retcode')) log.debug('Error output:\n%s', out.get('stderr', 'N/A')) raise CommandExecutionError(out['stderr'])
Crash to the log if command execution was not successful.
def boolmask(indices, maxval=None): """ Constructs a list of booleans where an item is True if its position is in `indices` otherwise it is False. Args: indices (list): list of integer indices maxval (int): length of the returned list. If not specified this is inferred from `indices` Note: In the future the arg `maxval` may change its name to `shape` Returns: list: mask: list of booleans. mask[idx] is True if idx in indices Example: >>> import ubelt as ub >>> indices = [0, 1, 4] >>> mask = ub.boolmask(indices, maxval=6) >>> assert mask == [True, True, False, False, True, False] >>> mask = ub.boolmask(indices) >>> assert mask == [True, True, False, False, True] """ if maxval is None: indices = list(indices) maxval = max(indices) + 1 mask = [False] * maxval for index in indices: mask[index] = True return mask
Constructs a list of booleans where an item is True if its position is in `indices` otherwise it is False. Args: indices (list): list of integer indices maxval (int): length of the returned list. If not specified this is inferred from `indices` Note: In the future the arg `maxval` may change its name to `shape` Returns: list: mask: list of booleans. mask[idx] is True if idx in indices Example: >>> import ubelt as ub >>> indices = [0, 1, 4] >>> mask = ub.boolmask(indices, maxval=6) >>> assert mask == [True, True, False, False, True, False] >>> mask = ub.boolmask(indices) >>> assert mask == [True, True, False, False, True]
def build_image_list(config, image, imagefile, all_local, include_allanchore, dockerfile=None, exclude_file=None): """Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic""" if not image and not (imagefile or all_local): raise click.BadOptionUsage('No input found for image source. One of <image>, <imagefile>, or <all> must be specified') if image and imagefile: raise click.BadOptionUsage('Only one of <image> and <imagefile> can be specified') filter_images = [] if exclude_file: with open(exclude_file) as f: for line in f.readlines(): filter_images.append(line.strip()) imagelist = {} if image: imagelist[image] = {'dockerfile':dockerfile} if imagefile: filelist = anchore_utils.read_kvfile_tolist(imagefile) for i in range(len(filelist)): l = filelist[i] imageId = l[0] try: dfile = l[1] except: dfile = None imagelist[imageId] = {'dockerfile':dfile} if all_local: docker_cli = contexts['docker_cli'] if docker_cli: for f in docker_cli.images(all=True, quiet=True, filters={'dangling': False}): if f not in imagelist and f not in filter_images: imagelist[f] = {'dockerfile':None} else: raise Exception("Could not load any images from local docker host - is docker running?") if include_allanchore: ret = contexts['anchore_db'].load_all_images().keys() if ret and len(ret) > 0: for l in list(set(imagelist.keys()) | set(ret)): imagelist[l] = {'dockerfile':None} # Remove excluded items for excluded in filter_images: docker_cli = contexts['docker_cli'] if not docker_cli: raise Exception("Could not query docker - is docker running?") for img in docker_cli.images(name=excluded, quiet=True): imagelist.pop(img, None) return imagelist
Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic
def get_bool(self, key, default=UndefinedKey): """Return boolean representation of value found at key :param key: key to use (dot separated). E.g., a.b.c :type key: basestring :param default: default value if key not found :type default: bool :return: boolean value :type return: bool """ # String conversions as per API-recommendations: # https://github.com/typesafehub/config/blob/master/HOCON.md#automatic-type-conversions bool_conversions = { None: None, 'true': True, 'yes': True, 'on': True, 'false': False, 'no': False, 'off': False } string_value = self.get_string(key, default) if string_value is not None: string_value = string_value.lower() try: return bool_conversions[string_value] except KeyError: raise ConfigException( u"{key} does not translate to a Boolean value".format(key=key))
Return boolean representation of value found at key :param key: key to use (dot separated). E.g., a.b.c :type key: basestring :param default: default value if key not found :type default: bool :return: boolean value :type return: bool
def get_n_cluster_per_event_hist(cluster_table): '''Calculates the number of cluster in every event. Parameters ---------- cluster_table : pytables.table Returns ------- numpy.Histogram ''' logging.info("Histogram number of cluster per event") cluster_in_events = analysis_utils.get_n_cluster_in_events(cluster_table)[:, 1] # get the number of cluster for every event return np.histogram(cluster_in_events, bins=range(0, np.max(cluster_in_events) + 2))
Calculates the number of cluster in every event. Parameters ---------- cluster_table : pytables.table Returns ------- numpy.Histogram
def integer(self, x): """ returns a plain integer """ if type(x) is str: hex = binascii.unhexlify(x) return int.from_bytes(hex, 'big') return x.value if isinstance(x, FiniteField.Value) else x
returns a plain integer
def get_profile(self, ann_el_demand_per_sector): """ Get the profiles for the given annual demand Parameters ---------- ann_el_demand_per_sector : dictionary Key: sector, value: annual value Returns ------- pandas.DataFrame : Table with all profiles """ return self.slp_frame.multiply(pd.Series( ann_el_demand_per_sector), axis=1).dropna(how='all', axis=1) * 4
Get the profiles for the given annual demand Parameters ---------- ann_el_demand_per_sector : dictionary Key: sector, value: annual value Returns ------- pandas.DataFrame : Table with all profiles
def iob2json(input_data, n_sents=10, *args, **kwargs): """ Convert IOB files into JSON format for use with train cli. """ docs = [] for group in minibatch(docs, n_sents): group = list(group) first = group.pop(0) to_extend = first["paragraphs"][0]["sentences"] for sent in group[1:]: to_extend.extend(sent["paragraphs"][0]["sentences"]) docs.append(first) return docs
Convert IOB files into JSON format for use with train cli.
def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo
Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version.
def state(self, abbr: bool = False) -> str: """Get a random administrative district of country. :param abbr: Return ISO 3166-2 code. :return: Administrative district. """ return self.random.choice( self._data['state']['abbr' if abbr else 'name'])
Get a random administrative district of country. :param abbr: Return ISO 3166-2 code. :return: Administrative district.
def write_min_max(self, file): """ Writes minimum and maximum values to a table. """ report = CaseReport(self.case) col1_header = "Attribute" col1_width = 19 col2_header = "Minimum" col3_header = "Maximum" col_width = 22 sep = "="*col1_width +" "+ "="*col_width +" "+ "="*col_width + "\n" # Row headers file.write(sep) file.write("%s" % col1_header.center(col1_width)) file.write(" ") file.write("%s" % col2_header.center(col_width)) file.write(" ") file.write("%s" % col3_header.center(col_width)) file.write("\n") file.write(sep) # Rows min_val, min_i = getattr(report, "min_v_magnitude") max_val, max_i = getattr(report, "max_v_magnitude") file.write("%s %7.3f p.u. @ bus %2d %7.3f p.u. @ bus %2d\n" % ("Voltage Amplitude".ljust(col1_width), min_val, min_i, max_val, max_i)) min_val, min_i = getattr(report, "min_v_angle") max_val, max_i = getattr(report, "max_v_angle") file.write("%s %16.3f %16.3f\n" % ("Voltage Phase Angle".ljust(col1_width), min_val, max_val)) file.write(sep) file.write("\n") del report
Writes minimum and maximum values to a table.
def langids(self): """ Return the USB device's supported language ID codes. These are 16-bit codes familiar to Windows developers, where for example instead of en-US you say 0x0409. USB_LANGIDS.pdf on the usb.org developer site for more info. String requests using a LANGID not in this array should not be sent to the device. This property will cause some USB traffic the first time it is accessed and cache the resulting value for future use. """ if self._langids is None: try: self._langids = util.get_langids(self) except USBError: self._langids = () return self._langids
Return the USB device's supported language ID codes. These are 16-bit codes familiar to Windows developers, where for example instead of en-US you say 0x0409. USB_LANGIDS.pdf on the usb.org developer site for more info. String requests using a LANGID not in this array should not be sent to the device. This property will cause some USB traffic the first time it is accessed and cache the resulting value for future use.
def _serialize_json(obj, fp): """ Serialize ``obj`` as a JSON formatted stream to ``fp`` """ json.dump(obj, fp, indent=4, default=serialize)
Serialize ``obj`` as a JSON formatted stream to ``fp``
def legacy_signature(**kwargs_mapping): """ This decorator makes it possible to call a function using old argument names when they are passed as keyword arguments. @legacy_signature(old_arg1='arg1', old_arg2='arg2') def func(arg1, arg2=1): return arg1 + arg2 func(old_arg1=1) == 2 func(old_arg1=1, old_arg2=2) == 3 """ def signature_decorator(f): @wraps(f) def wrapper(*args, **kwargs): redirected_kwargs = { kwargs_mapping[k] if k in kwargs_mapping else k: v for k, v in kwargs.items() } return f(*args, **redirected_kwargs) return wrapper return signature_decorator
This decorator makes it possible to call a function using old argument names when they are passed as keyword arguments. @legacy_signature(old_arg1='arg1', old_arg2='arg2') def func(arg1, arg2=1): return arg1 + arg2 func(old_arg1=1) == 2 func(old_arg1=1, old_arg2=2) == 3
def get_versions(): """Return the list of supported PDF versions. See :meth:`restrict_to_version`. :return: A list of :ref:`PDF_VERSION` strings. *New in cairo 1.10.* """ versions = ffi.new('cairo_pdf_version_t const **') num_versions = ffi.new('int *') cairo.cairo_pdf_get_versions(versions, num_versions) versions = versions[0] return [versions[i] for i in range(num_versions[0])]
Return the list of supported PDF versions. See :meth:`restrict_to_version`. :return: A list of :ref:`PDF_VERSION` strings. *New in cairo 1.10.*
def start_auth(self, context, internal_req): """ See super class method satosa.backends.base.BackendModule#start_auth :type context: satosa.context.Context :type internal_req: satosa.internal.InternalData :rtype: satosa.response.Response """ target_entity_id = context.get_decoration(Context.KEY_TARGET_ENTITYID) if target_entity_id: entity_id = target_entity_id return self.authn_request(context, entity_id) # if there is only one IdP in the metadata, bypass the discovery service idps = self.sp.metadata.identity_providers() if len(idps) == 1 and "mdq" not in self.config["sp_config"]["metadata"]: entity_id = idps[0] return self.authn_request(context, entity_id) return self.disco_query()
See super class method satosa.backends.base.BackendModule#start_auth :type context: satosa.context.Context :type internal_req: satosa.internal.InternalData :rtype: satosa.response.Response
def pop_first_arg(argv): """ find first positional arg (does not start with -), take it out of array and return it separately returns (arg, array) """ for arg in argv: if not arg.startswith('-'): argv.remove(arg) return (arg, argv) return (None, argv)
find first positional arg (does not start with -), take it out of array and return it separately returns (arg, array)
def rpush(self, name, *values): """ Push the value into the list from the *right* side :param name: str the name of the redis key :param values: a list of values or single value to push :return: Future() """ with self.pipe as pipe: v_encode = self.valueparse.encode values = [v_encode(v) for v in self._parse_values(values)] return pipe.rpush(self.redis_key(name), *values)
Push the value into the list from the *right* side :param name: str the name of the redis key :param values: a list of values or single value to push :return: Future()
def responses_of(self, request): """ Find the responses corresponding to a request. This function isn't actually used by VCR internally, but is provided as an external API. """ responses = [response for index, response in self._responses(request)] if responses: return responses # The cassette doesn't contain the request asked for. raise UnhandledHTTPRequestError( "The cassette (%r) doesn't contain the request (%r) asked for" % (self._path, request) )
Find the responses corresponding to a request. This function isn't actually used by VCR internally, but is provided as an external API.
def mean_field(self): """Calculates mean field""" mean_field = [] for sp_oper in [self.oper['O'], self.oper['O_d']]: avgO = np.array([self.expected(op) for op in sp_oper]) avgO[abs(avgO) < 1e-10] = 0. mean_field.append(avgO*self.param['ekin']) return np.array(mean_field)
Calculates mean field
def raise_on_errors(errors, level=logging.CRITICAL): """Raise a CoTError if errors. Helper function because I had this code block everywhere. Args: errors (list): the error errors level (int, optional): the log level to use. Defaults to logging.CRITICAL Raises: CoTError: if errors is non-empty """ if errors: log.log(level, "\n".join(errors)) raise CoTError("\n".join(errors))
Raise a CoTError if errors. Helper function because I had this code block everywhere. Args: errors (list): the error errors level (int, optional): the log level to use. Defaults to logging.CRITICAL Raises: CoTError: if errors is non-empty
def _get_algorithm_info(self, algorithm_info): '''Get algorithm info''' if algorithm_info['algorithm'] not in self.ALGORITHMS: raise Exception('Algorithm not supported: %s' % algorithm_info['algorithm']) algorithm = self.ALGORITHMS[algorithm_info['algorithm']] algorithm_info.update(algorithm) return algorithm_info
Get algorithm info
def clear_dns_cache(self, host: Optional[str]=None, port: Optional[int]=None) -> None: """Remove specified host/port or clear all dns local cache.""" if host is not None and port is not None: self._cached_hosts.remove((host, port)) elif host is not None or port is not None: raise ValueError("either both host and port " "or none of them are allowed") else: self._cached_hosts.clear()
Remove specified host/port or clear all dns local cache.
def update_and_transform(self, y, exogenous, **kwargs): """Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function. """ check_is_fitted(self, "p_") self._check_endog(y) _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs) # Update this *after* getting the exog features self.n_ += len(y) return y, Xt
Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function.
def as_dict(self): """ Returns dict representations of Xmu object """ d = MSONable.as_dict(self) d["data"] = self.data.tolist() return d
Returns dict representations of Xmu object
def in_stroke(self, x, y): """Tests whether the given point is inside the area that would be affected by a :meth:`stroke` operation given the current path and stroking parameters. Surface dimensions and clipping are not taken into account. See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`, :meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`. :param x: X coordinate of the point to test :param y: Y coordinate of the point to test :type x: float :type y: float :returns: A boolean. """ return bool(cairo.cairo_in_stroke(self._pointer, x, y))
Tests whether the given point is inside the area that would be affected by a :meth:`stroke` operation given the current path and stroking parameters. Surface dimensions and clipping are not taken into account. See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`, :meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`. :param x: X coordinate of the point to test :param y: Y coordinate of the point to test :type x: float :type y: float :returns: A boolean.
def add_source(self, evidence_line, source, label=None, src_type=None): """ Applies the triples: <evidence> <dc:source> <source> <source> <rdf:type> <type> <source> <rdfs:label> "label" TODO this should belong in a higher level class :param evidence_line: str curie :param source: str source as curie :param label: optional, str type as curie :param type: optional, str type as curie :return: None """ self.graph.addTriple(evidence_line, self.globaltt['source'], source) self.model.addIndividualToGraph(source, label, src_type) return
Applies the triples: <evidence> <dc:source> <source> <source> <rdf:type> <type> <source> <rdfs:label> "label" TODO this should belong in a higher level class :param evidence_line: str curie :param source: str source as curie :param label: optional, str type as curie :param type: optional, str type as curie :return: None
def keyword(self, text): """Push a keyword onto the token queue.""" cls = self.KEYWORDS[text] self.push_token(cls(text, self.lineno, self.offset))
Push a keyword onto the token queue.
def connect(self): """ Construct the psycopg2 connection instance :return: psycopg2.connect instance """ if self._conn: return self._conn self._conn = psycopg2.connect( self.config, cursor_factory=psycopg2.extras.RealDictCursor, ) self._conn.set_session(autocommit=True) psycopg2.extras.register_hstore(self._conn) return self._conn
Construct the psycopg2 connection instance :return: psycopg2.connect instance
def read_namespaced_ingress_status(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_ingress_status # noqa: E501 read status of the specified Ingress # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_ingress_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1Ingress If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_ingress_status_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_ingress_status_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
read_namespaced_ingress_status # noqa: E501 read status of the specified Ingress # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_ingress_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1Ingress If the method is called asynchronously, returns the request thread.
def poisson_ll(data, means): """ Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair """ if sparse.issparse(data): return sparse_poisson_ll(data, means) genes, cells = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) for i in range(clusters): means_i = np.tile(means[:,i], (cells, 1)) means_i = means_i.transpose() + eps #ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0) ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0) return ll
Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair
def is_volatile(self): """ True if combination of field access properties result in a field that should be interpreted as volatile. (Any hardware-writable field is inherently volatile) """ hw = self.get_property('hw') return ( (hw in (rdltypes.AccessType.rw, rdltypes.AccessType.rw1, rdltypes.AccessType.w, rdltypes.AccessType.w1)) or self.get_property('counter') or (self.get_property('next') is not None) or self.get_property('hwset') or self.get_property('hwclr') )
True if combination of field access properties result in a field that should be interpreted as volatile. (Any hardware-writable field is inherently volatile)
def paths_wanted(self): """The set of paths where we expect to find missing nodes.""" return set(address.new(b, target='all') for b in self.missing_nodes)
The set of paths where we expect to find missing nodes.
def extract(self, item, list_article_candidate): """Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected """ # Save extracted languages in list languages_extracted = [] # Save the extracted language of newspaper in extra variable, because newspaper extract meta-language # which is very accurate. language_newspaper = None for article_candidate in list_article_candidate: if article_candidate.language is not None: languages_extracted.append(article_candidate.language) if article_candidate.extractor == "newspaper": language_newspaper = article_candidate.language if not languages_extracted: return None # Create a set of the extracted languages, so every lang appears once languages_extracted_set = set(languages_extracted) # Count how often every language has been extracted languages_extracted_number = [] for language in languages_extracted_set: languages_extracted_number.append((languages_extracted.count(language), language)) if not (languages_extracted_number): return None # If there is no favorite language, return the language extracted by newspaper if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None: return language_newspaper if languages_extracted_number: return (max(languages_extracted_number))[1] else: return None
Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected
def add_to_class(self, cls, name): ''' Hook that replaces the `Field` attribute on a class with a named ``FieldDescriptor``. Called by the metaclass during construction of the ``Model``. ''' self._name = name self._container_model_class = cls setattr(cls, name, FieldDescriptor(self)) self._bound = True
Hook that replaces the `Field` attribute on a class with a named ``FieldDescriptor``. Called by the metaclass during construction of the ``Model``.
def cmd_position(self, args): '''position x-m y-m z-m''' if (len(args) != 3): print("Usage: position x y z (meters)") return if (len(args) == 3): x_m = float(args[0]) y_m = float(args[1]) z_m = float(args[2]) print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m)) self.master.mav.set_position_target_local_ned_send( 0, # system time in milliseconds 1, # target system 0, # target component 8, # coordinate frame MAV_FRAME_BODY_NED 3576, # type mask (pos only) x_m, y_m, z_m, # position x,y,z 0, 0, 0, # velocity x,y,z 0, 0, 0, # accel x,y,z 0, 0)
position x-m y-m z-m
def render(self, rect, data): """ Displays the elements according to the align properties. """ # Make sure we're aligned correctly if self.horizontal_align not in VerticalLM._VALID_ALIGN_HORIZONTAL: raise ValueError('Horizontal align is not valid.') if self.vertical_align not in VerticalLM._VALID_ALIGN_VERTICAL: raise ValueError('Vertical align is not valid.') # Work out the extra height we have to distribute extra_height = rect.h - self.get_minimum_size(data).y num_elements = len(self.elements) if num_elements == 0: return elif num_elements > 1: per_margin = 1.0 / float(num_elements-1) else: per_margin = 0.0 per_element = 1.0 / float(num_elements) # Work out the starting y coordinate y = rect.y if self.vertical_align == VerticalLM.ALIGN_MIDDLE: y = rect.y + extra_height*0.5 elif self.vertical_align == VerticalLM.ALIGN_TOP: y = rect.y + extra_height # Render each child element for element in reversed(self.elements): size = element.get_minimum_size(data) # Work out the x-coordinates if self.horizontal_align == VerticalLM.ALIGN_LEFT: x = rect.x w = size.x elif self.horizontal_align == VerticalLM.ALIGN_CENTER: x = rect.center - size.x*0.5 w = size.x elif self.horizontal_align == VerticalLM.ALIGN_RIGHT: x = rect.right - size.x w = size.x else: assert self.horizontal_align == VerticalLM.ALIGN_GROW x = rect.x w = rect.w # Work out the y-coordinates if self.vertical_align in VerticalLM._ALIGN_SIMPLE_SET: h = size.y next_y = y + size.y + self.margin elif self.vertical_align == VerticalLM.ALIGN_EQUAL_SPACING: h = size.y next_y = y + size.y + self.margin + extra_height*per_margin else: assert self.vertical_align == VerticalLM.ALIGN_EQUAL_GROWTH h = size.y + extra_height*per_element next_y = y + h + self.margin # Render and move on. element.render(datatypes.Rectangle(x, y, w, h), data) y = next_y
Displays the elements according to the align properties.
def _check_no_current_table(new_obj, current_table): """ Raises exception if we try to add a relation or a column with no current table. """ if current_table is None: msg = 'Cannot add {} before adding table' if isinstance(new_obj, Relation): raise NoCurrentTableException(msg.format('relation')) if isinstance(new_obj, Column): raise NoCurrentTableException(msg.format('column'))
Raises exception if we try to add a relation or a column with no current table.
def set_policy(name, table='filter', family='ipv4', **kwargs): ''' .. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if __salt__['iptables.get_policy']( table, kwargs['chain'], family) == kwargs['policy']: ret['result'] = True ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}' .format(kwargs['chain'], table, family, kwargs['policy'])) return ret if __opts__['test']: ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format( kwargs['chain'], table, family, kwargs['policy'] ) return ret if not __salt__['iptables.set_policy']( table, kwargs['chain'], kwargs['policy'], family): ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format( kwargs['chain'], kwargs['policy'], family ) if 'save' in kwargs: if kwargs['save']: __salt__['iptables.save'](filename=None, family=family) ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format( kwargs['chain'], kwargs['policy'], family ) return ret else: ret['result'] = False ret['comment'] = 'Failed to set iptables default policy' return ret
.. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy
def _run_grid_multithread(self, func, iterables): ''' running case with mutil process to support selenium grid-mode(multiple web) and appium grid-mode(multiple devices). @param func: function object @param iterables: iterable objects ''' f = lambda x: threading.Thread(target = func,args = (x,)) threads = map(f, iterables) for thread in threads: thread.setDaemon(True) thread.start() thread.join()
running case with mutil process to support selenium grid-mode(multiple web) and appium grid-mode(multiple devices). @param func: function object @param iterables: iterable objects
def create(self,image_path, size=1024, sudo=False): '''create will create a a new image Parameters ========== image_path: full path to image size: image sizein MiB, default is 1024MiB filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3 ''' from spython.utils import check_install check_install() cmd = self.init_command('image.create') cmd = cmd + ['--size', str(size), image_path ] output = self.run_command(cmd,sudo=sudo) self.println(output) if not os.path.exists(image_path): bot.exit("Could not create image %s" %image_path) return image_path
create will create a a new image Parameters ========== image_path: full path to image size: image sizein MiB, default is 1024MiB filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3
def node_inclusion_predicate_builder(nodes: Iterable[BaseEntity]) -> NodePredicate: """Build a function that returns true for the given nodes.""" nodes = set(nodes) @node_predicate def node_inclusion_predicate(node: BaseEntity) -> bool: """Return true if the node is in the given set of nodes.""" return node in nodes return node_inclusion_predicate
Build a function that returns true for the given nodes.
def get_buckets(min_length, max_length, bucket_count): ''' Get bucket by length. ''' if bucket_count <= 0: return [max_length] unit_length = int((max_length - min_length) // (bucket_count)) buckets = [min_length + unit_length * (i + 1) for i in range(0, bucket_count)] buckets[-1] = max_length return buckets
Get bucket by length.
def process_response(self, request_id=None): """ Process the SAML Response sent by the IdP. :param request_id: Is an optional argument. Is the ID of the AuthNRequest sent by this SP to the IdP. :type request_id: string :raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found """ self.__errors = [] self.__error_reason = None if 'post_data' in self.__request_data and 'SAMLResponse' in self.__request_data['post_data']: # AuthnResponse -- HTTP_POST Binding response = OneLogin_Saml2_Response(self.__settings, self.__request_data['post_data']['SAMLResponse']) self.__last_response = response.get_xml_document() if response.is_valid(self.__request_data, request_id): self.__attributes = response.get_attributes() self.__nameid = response.get_nameid() self.__nameid_format = response.get_nameid_format() self.__session_index = response.get_session_index() self.__session_expiration = response.get_session_not_on_or_after() self.__last_message_id = response.get_id() self.__last_assertion_id = response.get_assertion_id() self.__last_authn_contexts = response.get_authn_contexts() self.__authenticated = True self.__last_assertion_not_on_or_after = response.get_assertion_not_on_or_after() else: self.__errors.append('invalid_response') self.__error_reason = response.get_error() else: self.__errors.append('invalid_binding') raise OneLogin_Saml2_Error( 'SAML Response not found, Only supported HTTP_POST Binding', OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND )
Process the SAML Response sent by the IdP. :param request_id: Is an optional argument. Is the ID of the AuthNRequest sent by this SP to the IdP. :type request_id: string :raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found
def add_torques(self, torques): '''Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton. ''' j = 0 for joint in self.joints: joint.add_torques( list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF)) j += joint.ADOF
Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton.
def _value_with_fmt(self, val): """Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format """ fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime): fmt = self.datetime_format elif isinstance(val, date): fmt = self.date_format elif isinstance(val, timedelta): val = val.total_seconds() / float(86400) fmt = '0' else: val = compat.to_str(val) return val, fmt
Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format
def delete_component(self, id): """Delete component by id. :param id: ID of the component to use :type id: str :rtype: Response """ url = self._get_url('component/' + str(id)) return self._session.delete(url)
Delete component by id. :param id: ID of the component to use :type id: str :rtype: Response
def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions """ found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found
Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions
def list_current_filter_set(self,raw=False): """User to list a currently selected filter set""" buf = [] self.open_umanager() self.ser.write(''.join((self.cmd_current_filter_list,self.cr))) if self.read_loop(lambda x: x.endswith(self.umanager_prompt),self.timeout,lambda x,y,z: buf.append(y.rstrip()[:-1])): if raw: rv = buf = buf[0] else: rv, buf = self.filter_organizer(buf[0]) else: raise Dam1021Error(16,"Failed to list currently selected filter set") self.close_umanager() log.info(buf) return rv
User to list a currently selected filter set
def update_launch_metadata(self, scaling_group, metadata): """ Adds the given metadata dict to the existing metadata for the scaling group's launch configuration. """ if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.launchConfiguration.get("args", {}).get( "server", {}).get("metadata", {}) curr_meta.update(metadata) return self.update_launch_config(scaling_group, metadata=curr_meta)
Adds the given metadata dict to the existing metadata for the scaling group's launch configuration.
def ban_show(self, ban_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/bans#get-ban" api_path = "/api/v2/bans/{ban_id}" api_path = api_path.format(ban_id=ban_id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/chat/bans#get-ban
def _compute_f1(self, C, mag, rrup): """ Compute f1 term (eq.4, page 105) """ r = np.sqrt(rrup ** 2 + C['c4'] ** 2) f1 = ( C['a1'] + C['a12'] * (8.5 - mag) ** C['n'] + (C['a3'] + C['a13'] * (mag - C['c1'])) * np.log(r) ) if mag <= C['c1']: f1 += C['a2'] * (mag - C['c1']) else: f1 += C['a4'] * (mag - C['c1']) return f1
Compute f1 term (eq.4, page 105)
def is_dict_equal(d1, d2, keys=None, ignore_none_values=True): """ Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false """ if keys or ignore_none_values: d1 = {k: v for k, v in d1.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} d2 = {k: v for k, v in d2.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} return d1 == d2
Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false
def plot_and_save(self, **kwargs): """Used when the plot method defined does not create a figure nor calls save_plot Then the plot method has to use self.fig""" self.fig = pyplot.figure() self.plot() self.axes = pyplot.gca() self.save_plot(self.fig, self.axes, **kwargs) pyplot.close(self.fig)
Used when the plot method defined does not create a figure nor calls save_plot Then the plot method has to use self.fig