code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
async def linsert(self, name, where, refvalue, value): """ Insert ``value`` in list ``name`` either immediately before or after [``where``] ``refvalue`` Returns the new length of the list on success or -1 if ``refvalue`` is not in the list. """ return await self.execute_command('LINSERT', name, where, refvalue, value)
Insert ``value`` in list ``name`` either immediately before or after [``where``] ``refvalue`` Returns the new length of the list on success or -1 if ``refvalue`` is not in the list.
def index(self, item, **kwargs): # type: (Any, dict) -> int """ Get index of the parameter. :param item: Item for which get the index. :return: Index of the parameter in the WeakList. """ return list.index(self, self.ref(item), **kwargs)
Get index of the parameter. :param item: Item for which get the index. :return: Index of the parameter in the WeakList.
def transform(self, translation, theta, method='opencv'): """Create a new image by translating and rotating the current image. Parameters ---------- translation : :obj:`numpy.ndarray` of float The XY translation vector. theta : float Rotation angle in radians, with positive meaning counter-clockwise. method : :obj:`str` Method to use for image transformations (opencv or scipy) Returns ------- :obj:`Image` An image of the same type that has been rotated and translated. """ # transform channels separately color_im_tf = self.color.transform(translation, theta, method=method) depth_im_tf = self.depth.transform(translation, theta, method=method) # return combination of cropped data return RgbdImage.from_color_and_depth(color_im_tf, depth_im_tf)
Create a new image by translating and rotating the current image. Parameters ---------- translation : :obj:`numpy.ndarray` of float The XY translation vector. theta : float Rotation angle in radians, with positive meaning counter-clockwise. method : :obj:`str` Method to use for image transformations (opencv or scipy) Returns ------- :obj:`Image` An image of the same type that has been rotated and translated.
def get_all(self): # type: () -> typing.Iterable[typing.Tuple[str, str]] """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, values in six.iteritems(self._as_list): for value in values: yield (name, value)
Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name.
def get_monitoring_problems(self): """Get the schedulers satellites problems list :return: problems dictionary :rtype: dict """ res = self.get_id() res['problems'] = {} # Report our schedulers information, but only if a dispatcher exists if getattr(self, 'dispatcher', None) is None: return res for satellite in self.dispatcher.all_daemons_links: if satellite.type not in ['scheduler']: continue if not satellite.active: continue if satellite.statistics and 'problems' in satellite.statistics: res['problems'][satellite.name] = { '_freshness': satellite.statistics['_freshness'], 'problems': satellite.statistics['problems'] } return res
Get the schedulers satellites problems list :return: problems dictionary :rtype: dict
def get_value_by_version(d): """ Finds the value depending in current eplus version. Parameters ---------- d: dict {(0, 0): value, (x, x): value, ...} for current version (cv), current value is the value of version v such as v <= cv < v+1 """ from oplus import CONF # touchy import cv = CONF.eplus_version[:2] for v, value in sorted(d.items(), reverse=True): if cv >= v: return value
Finds the value depending in current eplus version. Parameters ---------- d: dict {(0, 0): value, (x, x): value, ...} for current version (cv), current value is the value of version v such as v <= cv < v+1
def fixup_for_packaged(): ''' If we are installing FROM an sdist, then a pre-built BokehJS is already installed in the python source tree. The command line options ``--build-js`` or ``--install-js`` are removed from ``sys.argv``, with a warning. Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is already packaged. Returns: None ''' if exists(join(ROOT, 'PKG-INFO')): if "--build-js" in sys.argv or "--install-js" in sys.argv: print(SDIST_BUILD_WARNING) if "--build-js" in sys.argv: sys.argv.remove('--build-js') if "--install-js" in sys.argv: sys.argv.remove('--install-js') if "--existing-js" not in sys.argv: sys.argv.append('--existing-js')
If we are installing FROM an sdist, then a pre-built BokehJS is already installed in the python source tree. The command line options ``--build-js`` or ``--install-js`` are removed from ``sys.argv``, with a warning. Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is already packaged. Returns: None
def list_pkgs(installed=True, attributes=True): ''' Lists installed packages. Due to how nix works, it defaults to just doing a ``nix-env -q``. :param bool installed: list only installed packages. This can be a very long list (12,000+ elements), so caution is advised. Default: True :param bool attributes: show the attributes of the packages when listing all packages. Default: True :return: Packages installed or available, along with their attributes. :rtype: list(list(str)) .. code-block:: bash salt '*' nix.list_pkgs salt '*' nix.list_pkgs installed=False ''' # We don't use -Q here, as it obfuscates the attribute names on full package listings. cmd = _nix_env() cmd.append('--query') if installed: # explicitly add this option for consistency, it's normally the default cmd.append('--installed') if not installed: cmd.append('--available') # We only show attributes if we're not doing an `installed` run. # The output of `nix-env -qaP` and `nix-env -qP` are vastly different: # `nix-env -qaP` returns a list such as 'attr.path name-version' # `nix-env -qP` returns a list of 'installOrder name-version' # Install order is useful to unambiguously select packages on a single # machine, but on more than one it can be a bad thing to specify. if attributes: cmd.append('--attr-path') out = _run(cmd) return [s.split() for s in salt.utils.itertools.split(out['stdout'], '\n')]
Lists installed packages. Due to how nix works, it defaults to just doing a ``nix-env -q``. :param bool installed: list only installed packages. This can be a very long list (12,000+ elements), so caution is advised. Default: True :param bool attributes: show the attributes of the packages when listing all packages. Default: True :return: Packages installed or available, along with their attributes. :rtype: list(list(str)) .. code-block:: bash salt '*' nix.list_pkgs salt '*' nix.list_pkgs installed=False
def get_referenced_object(self): """ :rtype: core.BunqModel :raise: BunqException """ if self._DeviceServer is not None: return self._DeviceServer raise exception.BunqException(self._ERROR_NULL_FIELDS)
:rtype: core.BunqModel :raise: BunqException
def _parse_csv_header(header): '''This parses a CSV header from a K2 CSV LC. Returns a dict that can be used to update an existing lcdict with the relevant metadata info needed to form a full LC. ''' # first, break into lines headerlines = header.split('\n') headerlines = [x.lstrip('# ') for x in headerlines] # next, find the indices of the '# COLUMNS' line and '# LIGHTCURVE' line metadatastart = headerlines.index('METADATA') columnstart = headerlines.index('COLUMNS') lcstart = headerlines.index('LIGHTCURVE') # get the lines for the metadata and columndefs metadata = headerlines[metadatastart+1:columnstart-1] columndefs = headerlines[columnstart+1:lcstart-1] # parse the metadata metainfo = [x.split(',') for x in metadata][:-1] aperpixradius = metadata[-1] objectid, kepid, ucac4id, kepmag = metainfo[0] objectid, kepid, ucac4id, kepmag = (objectid.split(' = ')[-1], kepid.split(' = ')[-1], ucac4id.split(' = ')[-1], kepmag.split(' = ')[-1]) kepmag = float(kepmag) if kepmag else None ra, decl, ndet, k2campaign = metainfo[1] ra, decl, ndet, k2campaign = (ra.split(' = ')[-1], decl.split(' = ')[-1], int(ndet.split(' = ')[-1]), int(k2campaign.split(' = ')[-1])) fovccd, fovchannel, fovmodule = metainfo[2] fovccd, fovchannel, fovmodule = (int(fovccd.split(' = ')[-1]), int(fovchannel.split(' = ')[-1]), int(fovmodule.split(' = ')[-1])) try: qualflag, bjdoffset, napertures = metainfo[3] qualflag, bjdoffset, napertures = (int(qualflag.split(' = ')[-1]), float(bjdoffset.split(' = ')[-1]), int(napertures.split(' = ')[-1])) kernelspec = None except Exception as e: qualflag, bjdoffset, napertures, kernelspec = metainfo[3] qualflag, bjdoffset, napertures, kernelspec = ( int(qualflag.split(' = ')[-1]), float(bjdoffset.split(' = ')[-1]), int(napertures.split(' = ')[-1]), str(kernelspec.split(' = ')[-1]) ) aperpixradius = aperpixradius.split(' = ')[-1].split(',') aperpixradius = [float(x) for x in aperpixradius] # parse the columndefs columns = [x.split(' - ')[1] for x in columndefs] metadict = {'objectid':objectid, 'objectinfo':{ 'objectid':objectid, 'kepid':kepid, 'ucac4id':ucac4id, 'kepmag':kepmag, 'ra':ra, 'decl':decl, 'ndet':ndet, 'k2campaign':k2campaign, 'fovccd':fovccd, 'fovchannel':fovchannel, 'fovmodule':fovmodule, 'qualflag':qualflag, 'bjdoffset':bjdoffset, 'napertures':napertures, 'kernelspec':kernelspec, 'aperpixradius':aperpixradius, }, 'columns':columns} return metadict
This parses a CSV header from a K2 CSV LC. Returns a dict that can be used to update an existing lcdict with the relevant metadata info needed to form a full LC.
def _write_version1(self,new_filename,update_regul=False): """write a version 1 pest control file Parameters ---------- new_filename : str name of the new pest control file update_regul : (boolean) flag to update zero-order Tikhonov prior information equations to prefer the current parameter values """ self.new_filename = new_filename self.rectify_pgroups() self.rectify_pi() self._update_control_section() self.sanity_checks() f_out = open(new_filename, 'w') if self.with_comments: for line in self.comments.get("initial",[]): f_out.write(line+'\n') f_out.write("pcf\n* control data\n") self.control_data.write(f_out) # for line in self.other_lines: # f_out.write(line) if self.with_comments: for line in self.comments.get("* singular value decompisition",[]): f_out.write(line) self.svd_data.write(f_out) #f_out.write("* parameter groups\n") # to catch the byte code ugliness in python 3 pargpnme = self.parameter_groups.loc[:,"pargpnme"].copy() self.parameter_groups.loc[:,"pargpnme"] = \ self.parameter_groups.pargpnme.apply(self.pargp_format["pargpnme"]) self._write_df("* parameter groups", f_out, self.parameter_groups, self.pargp_format, self.pargp_fieldnames) self.parameter_groups.loc[:,"pargpnme"] = pargpnme self._write_df("* parameter data",f_out, self.parameter_data, self.par_format, self.par_fieldnames) if self.tied is not None: self._write_df("tied parameter data", f_out, self.tied, self.tied_format, self.tied_fieldnames) f_out.write("* observation groups\n") for group in self.obs_groups: try: group = group.decode() except: pass f_out.write(pst_utils.SFMT(str(group))+'\n') for group in self.prior_groups: try: group = group.decode() except: pass f_out.write(pst_utils.SFMT(str(group))+'\n') self._write_df("* observation data", f_out, self.observation_data, self.obs_format, self.obs_fieldnames) f_out.write("* model command line\n") for cline in self.model_command: f_out.write(cline+'\n') f_out.write("* model input/output\n") for tplfle,infle in zip(self.template_files,self.input_files): f_out.write(tplfle+' '+infle+'\n') for insfle,outfle in zip(self.instruction_files,self.output_files): f_out.write(insfle+' '+outfle+'\n') if self.nprior > 0: if self.prior_information.isnull().values.any(): #print("WARNING: NaNs in prior_information dataframe") warnings.warn("NaNs in prior_information dataframe",PyemuWarning) f_out.write("* prior information\n") #self.prior_information.index = self.prior_information.pop("pilbl") max_eq_len = self.prior_information.equation.apply(lambda x:len(x)).max() eq_fmt_str = " {0:<" + str(max_eq_len) + "s} " eq_fmt_func = lambda x:eq_fmt_str.format(x) # 17/9/2016 - had to go with a custom writer loop b/c pandas doesn't want to # output strings longer than 100, even with display.max_colwidth #f_out.write(self.prior_information.to_string(col_space=0, # columns=self.prior_fieldnames, # formatters=pi_formatters, # justify="right", # header=False, # index=False) + '\n') #self.prior_information["pilbl"] = self.prior_information.index # for idx,row in self.prior_information.iterrows(): # f_out.write(pst_utils.SFMT(row["pilbl"])) # f_out.write(eq_fmt_func(row["equation"])) # f_out.write(pst_utils.FFMT(row["weight"])) # f_out.write(pst_utils.SFMT(row["obgnme"]) + '\n') for idx, row in self.prior_information.iterrows(): f_out.write(pst_utils.SFMT(row["pilbl"])) f_out.write(eq_fmt_func(row["equation"])) f_out.write(pst_utils.FFMT(row["weight"])) f_out.write(pst_utils.SFMT(row["obgnme"])) if self.with_comments and 'extra' in row: f_out.write(" # {0}".format(row['extra'])) f_out.write('\n') if self.control_data.pestmode.startswith("regul"): #f_out.write("* regularisation\n") #if update_regul or len(self.regul_lines) == 0: # f_out.write(self.regul_section) #else: # [f_out.write(line) for line in self.regul_lines] self.reg_data.write(f_out) for line in self.other_lines: f_out.write(line+'\n') for key,value in self.pestpp_options.items(): if isinstance(value,list): value = ','.join([str(v) for v in value]) f_out.write("++{0}({1})\n".format(str(key),str(value))) if self.with_comments: for line in self.comments.get("final",[]): f_out.write(line+'\n') f_out.close()
write a version 1 pest control file Parameters ---------- new_filename : str name of the new pest control file update_regul : (boolean) flag to update zero-order Tikhonov prior information equations to prefer the current parameter values
def from_file(cls, filename, directory=None, format=None, engine=None, encoding=File._encoding): """Return an instance with the source string read from the given file. Args: filename: Filename for loading/saving the source. directory: (Sub)directory for source loading/saving and rendering. format: Rendering output format (``'pdf'``, ``'png'``, ...). engine: Layout command used (``'dot'``, ``'neato'``, ...). encoding: Encoding for loading/saving the source. """ filepath = os.path.join(directory or '', filename) if encoding is None: encoding = locale.getpreferredencoding() with io.open(filepath, encoding=encoding) as fd: source = fd.read() return cls(source, filename, directory, format, engine, encoding)
Return an instance with the source string read from the given file. Args: filename: Filename for loading/saving the source. directory: (Sub)directory for source loading/saving and rendering. format: Rendering output format (``'pdf'``, ``'png'``, ...). engine: Layout command used (``'dot'``, ``'neato'``, ...). encoding: Encoding for loading/saving the source.
def Analyze(self, source_path, output_writer): """Analyzes the source. Args: source_path (str): the source path. output_writer (StdoutWriter): the output writer. Raises: RuntimeError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported. """ if not os.path.exists(source_path): raise RuntimeError('No such source: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_path_spec = None scan_step = 0 scan_context.OpenSourcePath(source_path) while True: self._source_scanner.Scan( scan_context, auto_recurse=self._auto_recurse, scan_path_spec=scan_path_spec) if not scan_context.updated: break if not self._auto_recurse: output_writer.WriteScanContext(scan_context, scan_step=scan_step) scan_step += 1 # The source is a directory or file. if scan_context.source_type in [ definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]: break # The source scanner found a locked volume, e.g. an encrypted volume, # and we need a credential to unlock the volume. for locked_scan_node in scan_context.locked_scan_nodes: self._PromptUserForEncryptedVolumeCredential( scan_context, locked_scan_node, output_writer) if not self._auto_recurse: scan_node = scan_context.GetUnscannedScanNode() if not scan_node: return scan_path_spec = scan_node.path_spec if self._auto_recurse: output_writer.WriteScanContext(scan_context)
Analyzes the source. Args: source_path (str): the source path. output_writer (StdoutWriter): the output writer. Raises: RuntimeError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported.
def patch(self, patch): """Given a ParsedNodePatch, add the new information to the node.""" # explicitly pick out the parts to update so we don't inadvertently # step on the model name or anything self._contents.update({ 'patch_path': patch.original_file_path, 'description': patch.description, 'columns': patch.columns, 'docrefs': patch.docrefs, }) # patches always trigger re-validation self.validate()
Given a ParsedNodePatch, add the new information to the node.
def patch_custom_resource_definition(self, name, body, **kwargs): # noqa: E501 """patch_custom_resource_definition # noqa: E501 partially update the specified CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_custom_resource_definition(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CustomResourceDefinition (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1CustomResourceDefinition If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501 else: (data) = self.patch_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501 return data
patch_custom_resource_definition # noqa: E501 partially update the specified CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_custom_resource_definition(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CustomResourceDefinition (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1CustomResourceDefinition If the method is called asynchronously, returns the request thread.
def _removeGlyph(self, name, **kwargs): """ This is the environment implementation of :meth:`BaseFont.removeGlyph`. **name** will be a :ref:`type-string` representing an existing glyph in the default layer. The value will have been normalized with :func:`normalizers.normalizeGlyphName`. Subclasses may override this method. """ layer = self.defaultLayer layer.removeGlyph(name)
This is the environment implementation of :meth:`BaseFont.removeGlyph`. **name** will be a :ref:`type-string` representing an existing glyph in the default layer. The value will have been normalized with :func:`normalizers.normalizeGlyphName`. Subclasses may override this method.
def get_cut_prefix(value, max_len): """Drops Characters by unicode not by bytes.""" should_convert = isinstance(value, bytes) if should_convert: value = value.decode("utf8", "ignore") for i in range(len(value)): if terminal_width(value[i:]) <= max_len: break return value[i:].encode("utf8", "ignore") if should_convert else value[i:]
Drops Characters by unicode not by bytes.
def _execCmd(self, cmd, args): """Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary. """ output = self._eslconn.api(cmd, args) if output: body = output.getBody() if body: return body.splitlines() return None
Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary.
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False): '''Note: This function ignores any DNA.''' raise Exception('This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.') chain = None sequences = {} residue_map = {} resid_set = set() resid_list = [] DNA_residues = set([' DA', ' DC', ' DG', ' DT']) chains = [] self.RAW_ATOM_SEQUENCE = [] essential_atoms_1 = set(['CA', 'C', 'N'])#, 'O']) essential_atoms_2 = set(['CA', 'C', 'N'])#, 'OG']) current_atoms = set() atoms_read = {} oldchainID = None removed_residue = {} for line in self.lines: if line[0:4] == 'ATOM' or (ConvertMSEToAtom and (line[0:6] == 'HETATM') and (line[17:20] == 'MSE')): chainID = line[21] if missing_chain_ids.get(self.pdb_id): chainID = missing_chain_ids[self.pdb_id] if chainID not in chains: chains.append(chainID) residue_longname = line[17:20] if residue_longname in DNA_residues: # Skip DNA continue if residue_longname == 'UNK': # Skip unknown residues continue if residue_longname not in allowed_PDB_residues_types and not(ConvertMSEToAtom and residue_longname == 'MSE'): if not self.strict: # Skip unknown residues continue else: raise NonCanonicalResidueException("Residue %s encountered: %s" % (line[17:20], line)) else: resid = line[21:27] #print(chainID, residue_longname, resid) #print(line) #print(resid_list) if resid not in resid_set: removed_residue[chainID] = False add_residue = True if current_atoms: if RemoveIncompleteResidues and essential_atoms_1.intersection(current_atoms) != essential_atoms_1 and essential_atoms_2.intersection(current_atoms) != essential_atoms_2: oldChain = resid_list[-1][0] oldResidueID = resid_list[-1][1:] print("The last residue '%s', %s, in chain %s is missing these atoms: %s." % (resid_list[-1], residue_longname, oldChain, essential_atoms_1.difference(current_atoms) or essential_atoms_2.difference(current_atoms))) resid_set.remove(resid_list[-1]) #print("".join(resid_list)) resid_list = resid_list[:-1] if oldchainID: removed_residue[oldchainID] = True #print("".join(resid_list)) #print(sequences[oldChain]) if sequences.get(oldChain): sequences[oldChain] = sequences[oldChain][:-1] if residue_map.get(oldChain): residue_map[oldChain] = residue_map[oldChain][:-1] #print(sequences[oldChain] else: assert(not(resid_set)) current_atoms = set() atoms_read[chainID] = set() atoms_read[chainID].add(line[12:15].strip()) resid_set.add(resid) resid_list.append(resid) chainID = line[21] sequences[chainID] = sequences.get(chainID, []) if residue_longname in non_canonical_amino_acids: sequences[chainID].append(non_canonical_amino_acids[residue_longname]) else: sequences[chainID].append(residue_type_3to1_map[residue_longname]) residue_map[chainID] = residue_map.get(chainID, []) if residue_longname in non_canonical_amino_acids: residue_map[chainID].append((resid, non_canonical_amino_acids[residue_longname])) else: residue_map[chainID].append((resid, residue_type_3to1_map[residue_longname])) oldchainID = chainID else: #atoms_read[chainID] = atoms_read.get(chainID, set()) atoms_read[chainID].add(line[12:15].strip()) current_atoms.add(line[12:15].strip()) if RemoveIncompleteFinalResidues: # These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain. for chainID, sequence_list in sequences.iteritems(): if not(removed_residue[chainID]): if essential_atoms_1.intersection(atoms_read[chainID]) != essential_atoms_1 and essential_atoms_2.intersection(atoms_read[chainID]) != essential_atoms_2: print("The last residue %s of chain %s is missing these atoms: %s." % (sequence_list[-1], chainID, essential_atoms_1.difference(atoms_read[chainID]) or essential_atoms_2.difference(atoms_read[chainID]))) oldResidueID = sequence_list[-1][1:] residue_map[chainID] = residue_map[chainID][0:-1] sequences[chainID] = sequence_list[0:-1] for chainID, sequence_list in sequences.iteritems(): sequences[chainID] = "".join(sequence_list) assert(sequences[chainID] == "".join([res_details[1] for res_details in residue_map[chainID]])) for chainID in chains: for a_acid in sequences.get(chainID, ""): self.RAW_ATOM_SEQUENCE.append((chainID, a_acid)) residue_objects = {} for chainID in residue_map.keys(): residue_objects[chainID] = [] for chainID, residue_list in residue_map.iteritems(): for res_pair in residue_list: resid = res_pair[0] resaa = res_pair[1] assert(resid[0] == chainID) residue_objects[chainID].append((resid[1:].strip(), resaa)) return sequences, residue_objects
Note: This function ignores any DNA.
def notify_server_ready(self, language, config): """Notify language server availability to code editors.""" for index in range(self.get_stack_count()): editor = self.tabs.widget(index) if editor.language.lower() == language: editor.start_lsp_services(config)
Notify language server availability to code editors.
def _stripe_object_to_refunds(cls, target_cls, data, charge): """ Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return: """ refunds = data.get("refunds") if not refunds: return [] refund_objs = [] for refund_data in refunds.get("data", []): item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False) refund_objs.append(item) return refund_objs
Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return:
def _adjust_axis(self, axis): """Return raw axis/axes corresponding to apparent axis/axes. This method adjusts user provided 'axis' parameter, for some of the cube operations, mainly 'margin'. The user never sees the MR selections dimension, and treats all MRs as single dimensions. Thus we need to adjust the values of axis (to sum across) to what the user would've specified if he were aware of the existence of the MR selections dimension. The reason for this adjustment is that all of the operations performed troughout the margin calculations will be carried on an internal array, containing all the data (together with all selections). For more info on how it needs to operate, check the unit tests. """ if not self._is_axis_allowed(axis): ca_error_msg = "Direction {} not allowed (items dimension)" raise ValueError(ca_error_msg.format(axis)) if isinstance(axis, int): # If single axis was provided, create a list out of it, so that # we can do the subsequent iteration. axis = list([axis]) elif axis is None: # If axis was None, create what user would expect in terms of # finding out the Total(s). In case of 2D cube, this will be the # axis of all the dimensions that the user can see, that is (0, 1), # because the selections dimension is invisible to the user. In # case of 3D cube, this will be the "total" across each slice, so # we need to drop the 0th dimension, and only take last two (1, 2). axis = range(self.ndim)[-2:] else: # In case of a tuple, just keep it as a list. axis = list(axis) axis = np.array(axis) # Create new array for storing updated values of axis. It's necessary # because it's hard to update the values in place. new_axis = np.array(axis) # Iterate over user-visible dimensions, and update axis when MR is # detected. For each detected MR, we need to increment all subsequent # axis (that were provided by the user). But we don't need to update # the axis that are "behind" the current MR. for i, dim in enumerate(self.dimensions): if dim.dimension_type == DT.MR_SUBVAR: # This formula updates only the axis that come "after" the # current MR (items) dimension. new_axis[axis >= i] += 1 return tuple(new_axis)
Return raw axis/axes corresponding to apparent axis/axes. This method adjusts user provided 'axis' parameter, for some of the cube operations, mainly 'margin'. The user never sees the MR selections dimension, and treats all MRs as single dimensions. Thus we need to adjust the values of axis (to sum across) to what the user would've specified if he were aware of the existence of the MR selections dimension. The reason for this adjustment is that all of the operations performed troughout the margin calculations will be carried on an internal array, containing all the data (together with all selections). For more info on how it needs to operate, check the unit tests.
def delta13c_craig(r45sam, r46sam, d13cstd, r45std, r46std, ks='Craig', d18ostd=23.5): """ Algorithm from Craig 1957. From the original Craig paper, we can set up a pair of equations and solve for d13C and d18O simultaneously: d45 * r45 = r13 * d13 + 0.5 * r17 * d18 d46 = r13 * ((r17**2 + r17 - r18) / a) * d13 + 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18 where a = r18 + r13 * r17 and b = 1 + r13 + r17 """ # the constants for the calculations # originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5 k = delta13c_constants()[ks] # TODO: not clear why need to multiply by 2? r13, r18 = k['S13'], 2 * k['S18'] r17 = 2 * (k['K'] * k['S18'] ** k['A']) a = (r18 + r13 * r17) * (1. + r13 + r17) # the coefficients for the calculations eqn_mat = np.array([[r13, 0.5 * r17], [r13 * ((r17 ** 2 + r17 - r18) / a), 1 - 0.5 * r17 * ((r13 ** 2 + r13 - r18) / a)]]) # precalculate the d45 and d46 of the standard versus PDB r45d45std = (eqn_mat[0, 0] * d13cstd + eqn_mat[0, 1] * d18ostd) d46std = eqn_mat[1, 0] * d13cstd + eqn_mat[1, 1] * d18ostd # calculate the d45 and d46 of our sample versus PDB # in r45d45, r45 of PDB = r13 + r17 of PDB r45d45 = 1000. * (r45sam / r45std - 1.) * \ (r13 + r17 + 0.001 * r45d45std) + r45d45std d46 = 1000. * (r46sam / r46std - 1.) * (1. + 0.001 * d46std) + d46std # solve the system of equations x = np.linalg.solve(eqn_mat, np.array([r45d45, d46])) return x[0]
Algorithm from Craig 1957. From the original Craig paper, we can set up a pair of equations and solve for d13C and d18O simultaneously: d45 * r45 = r13 * d13 + 0.5 * r17 * d18 d46 = r13 * ((r17**2 + r17 - r18) / a) * d13 + 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18 where a = r18 + r13 * r17 and b = 1 + r13 + r17
def extractDates(inp, tz=None, now=None): """Extract semantic date information from an input string. This is a convenience method which would only be used if you'd rather not initialize a DateService object. Args: inp (str): The input string to be parsed. tz: An optional Pytz timezone. All datetime objects returned will be relative to the supplied timezone, or timezone-less if none is supplied. now: The time to which all returned datetime objects should be relative. For example, if the text is "In 5 hours", the datetime returned will be now + datetime.timedelta(hours=5). Uses datetime.datetime.now() if none is supplied. Returns: A list of datetime objects extracted from input. """ service = DateService(tz=tz, now=now) return service.extractDates(inp)
Extract semantic date information from an input string. This is a convenience method which would only be used if you'd rather not initialize a DateService object. Args: inp (str): The input string to be parsed. tz: An optional Pytz timezone. All datetime objects returned will be relative to the supplied timezone, or timezone-less if none is supplied. now: The time to which all returned datetime objects should be relative. For example, if the text is "In 5 hours", the datetime returned will be now + datetime.timedelta(hours=5). Uses datetime.datetime.now() if none is supplied. Returns: A list of datetime objects extracted from input.
def get_context(line, pos): """ computes start and end position of substring of line that is the command string under given position """ commands = split_commandline(line) + [''] i = 0 start = 0 end = len(commands[i]) while pos > end: i += 1 start = end + 1 end += 1 + len(commands[i]) return start, end
computes start and end position of substring of line that is the command string under given position
def main(): """ Starts the Application. :return: Definition success. :rtype: bool """ components_paths = [] for path in (os.path.join(umbra.__path__[0], Constants.factory_components_directory), os.path.join(umbra.__path__[0], Constants.factory_addons_components_directory)): os.path.exists(path) and components_paths.append(path) return umbra.engine.run(umbra.engine.Umbra, umbra.engine.get_command_line_parameters_parser().parse_args( [unicode(argument, Constants.default_codec, Constants.codec_error) for argument in sys.argv]), components_paths, ("factory.script_editor", "factory.preferences_manager", "factory.components_manager_ui"))
Starts the Application. :return: Definition success. :rtype: bool
def parseAddress(address): """ Parse the given RFC 2821 email address into a structured object. @type address: C{str} @param address: The address to parse. @rtype: L{Address} @raise xmantissa.error.ArgumentError: The given string was not a valid RFC 2821 address. """ parts = [] parser = _AddressParser() end = parser(parts, address) if end != len(address): raise InvalidTrailingBytes() return parts[0]
Parse the given RFC 2821 email address into a structured object. @type address: C{str} @param address: The address to parse. @rtype: L{Address} @raise xmantissa.error.ArgumentError: The given string was not a valid RFC 2821 address.
def loads(string, *transformers, **kwargs): """ Deserializes Java objects and primitive data serialized using ObjectOutputStream from a string. :param string: A Java data string :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object """ # Read keyword argument ignore_remaining_data = kwargs.get("ignore_remaining_data", False) # Reuse the load method (avoid code duplication) return load( BytesIO(string), *transformers, ignore_remaining_data=ignore_remaining_data )
Deserializes Java objects and primitive data serialized using ObjectOutputStream from a string. :param string: A Java data string :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object
def run(self, scenario): """Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario. """ assert isinstance(scenario, scenarios.Scenario) model = self.new_model(scenario) model.run(scenario, learn=True) return model
Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario.
def val_to_mrc(code, val): """ Convert one single `val` to MRC. This function may be used for control fields in MARC records. Args:, code (str): Code of the field. val (str): Value of the field. Returns: str: Correctly padded MRC line with field. """ code = str(code) if len(code) < 3: code += (3 - len(code)) * " " return "%s L %s" % (code, val)
Convert one single `val` to MRC. This function may be used for control fields in MARC records. Args:, code (str): Code of the field. val (str): Value of the field. Returns: str: Correctly padded MRC line with field.
def on_ctcp(self, connection, event): """Default handler for ctcp events. Replies to VERSION and PING requests and relays DCC requests to the on_dccchat method. """ nick = event.source.nick if event.arguments[0] == "VERSION": connection.ctcp_reply(nick, "VERSION " + self.get_version()) elif event.arguments[0] == "PING": if len(event.arguments) > 1: connection.ctcp_reply(nick, "PING " + event.arguments[1]) elif ( event.arguments[0] == "DCC" and event.arguments[1].split(" ", 1)[0] == "CHAT"): self.on_dccchat(connection, event)
Default handler for ctcp events. Replies to VERSION and PING requests and relays DCC requests to the on_dccchat method.
def from_name(clz, name): """ Instantiates the object from a known name """ if isinstance(name, list) and "green" in name: name = "teal" assert name in COLOR_NAMES, 'Unknown color name' r, b, g = COLOR_NAMES[name] return clz(r, b, g)
Instantiates the object from a known name
def finalize(self): """ Connects the wires. """ self._check_finalized() self._final = True for dest_w, values in self.dest_instrs_info.items(): mux_vals = dict(zip(self.instructions, values)) dest_w <<= sparse_mux(self.signal_wire, mux_vals)
Connects the wires.
def profile(event_type, extra_data=None): """Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement. """ worker = ray.worker.global_worker return RayLogSpanRaylet(worker.profiler, event_type, extra_data=extra_data)
Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement.
def nucmer(args): """ %prog nucmer mappings.bed MTR.fasta assembly.fasta chr1 3 Select specific chromosome region based on MTR mapping. The above command will extract chr1:2,000,001-3,000,000. """ p = OptionParser(nucmer.__doc__) opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) mapbed, mtrfasta, asmfasta, chr, idx = args idx = int(idx) m1 = 1000000 bedfile = "sample.bed" bed = Bed() bed.add("\t".join(str(x) for x in (chr, (idx - 1) * m1, idx * m1))) bed.print_to_file(bedfile) cmd = "intersectBed -a {0} -b {1} -nonamecheck -sorted | cut -f4".\ format(mapbed, bedfile) idsfile = "query.ids" sh(cmd, outfile=idsfile) sfasta = fastaFromBed(bedfile, mtrfasta) qfasta = "query.fasta" cmd = "faSomeRecords {0} {1} {2}".format(asmfasta, idsfile, qfasta) sh(cmd) cmd = "nucmer {0} {1}".format(sfasta, qfasta) sh(cmd) mummerplot_main(["out.delta", "--refcov=0"]) sh("mv out.pdf {0}.{1}.pdf".format(chr, idx))
%prog nucmer mappings.bed MTR.fasta assembly.fasta chr1 3 Select specific chromosome region based on MTR mapping. The above command will extract chr1:2,000,001-3,000,000.
def p_static_non_empty_array_pair_list_item(p): '''static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar | static_scalar''' if len(p) == 4: p[0] = p[1] + [ast.ArrayElement(None, p[3], False, lineno=p.lineno(2))] else: p[0] = [ast.ArrayElement(None, p[1], False, lineno=p.lineno(1))]
static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar | static_scalar
def _process_pathway_disease(self, limit): """ We make a link between the pathway identifiers, and any diseases associated with them. Since we model diseases as processes, we make a triple saying that the pathway may be causally upstream of or within the disease process. :param limit: :return: """ LOG.info("Processing KEGG pathways to disease ids") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 raw = '/'.join((self.rawdir, self.files['pathway_disease']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (disease_id, kegg_pathway_num) = row if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']: continue disease_id = 'KEGG-' + disease_id # will look like KEGG-path:map04130 or KEGG-path:hsa04130 pathway_id = 'KEGG-' + kegg_pathway_num graph.addTriple( pathway_id, self.globaltt['causally upstream of or within'], disease_id) if not self.test_mode and limit is not None and line_counter > limit: break return
We make a link between the pathway identifiers, and any diseases associated with them. Since we model diseases as processes, we make a triple saying that the pathway may be causally upstream of or within the disease process. :param limit: :return:
def get_size(self, m): """ Return the 2-D size of a Jacobian matrix in tuple """ nrow, ncol = 0, 0 if m[0] == 'F': nrow = self.n elif m[0] == 'G': nrow = self.m if m[1] == 'x': ncol = self.n elif m[1] == 'y': ncol = self.m return nrow, ncol
Return the 2-D size of a Jacobian matrix in tuple
def plot_map(fignum, lats, lons, Opts): """ makes a cartopy map with lats/lons Requires installation of cartopy Parameters: _______________ fignum : matplotlib figure number lats : array or list of latitudes lons : array or list of longitudes Opts : dictionary of plotting options: Opts.keys= proj : projection [supported cartopy projections: pc = Plate Carree aea = Albers Equal Area aeqd = Azimuthal Equidistant lcc = Lambert Conformal lcyl = Lambert Cylindrical merc = Mercator mill = Miller Cylindrical moll = Mollweide [default] ortho = Orthographic robin = Robinson sinu = Sinusoidal stere = Stereographic tmerc = Transverse Mercator utm = UTM [set zone and south keys in Opts] laea = Lambert Azimuthal Equal Area geos = Geostationary npstere = North-Polar Stereographic spstere = South-Polar Stereographic latmin : minimum latitude for plot latmax : maximum latitude for plot lonmin : minimum longitude for plot lonmax : maximum longitude lat_0 : central latitude lon_0 : central longitude sym : matplotlib symbol symsize : symbol size in pts edge : markeredgecolor cmap : matplotlib color map res : resolution [c,l,i,h] for low/crude, intermediate, high boundinglat : bounding latitude sym : matplotlib symbol for plotting symsize : matplotlib symbol size for plotting names : list of names for lats/lons (if empty, none will be plotted) pltgrd : if True, put on grid lines padlat : padding of latitudes padlon : padding of longitudes gridspace : grid line spacing global : global projection [default is True] oceancolor : 'azure' landcolor : 'bisque' [choose any of the valid color names for matplotlib see https://matplotlib.org/examples/color/named_colors.html details : dictionary with keys: coasts : if True, plot coastlines rivers : if True, plot rivers states : if True, plot states countries : if True, plot countries ocean : if True, plot ocean fancy : if True, plot etopo 20 grid NB: etopo must be installed if Opts keys not set :these are the defaults: Opts={'latmin':-90,'latmax':90,'lonmin':0,'lonmax':360,'lat_0':0,'lon_0':0,'proj':'moll','sym':'ro,'symsize':5,'edge':'black','pltgrid':1,'res':'c','boundinglat':0.,'padlon':0,'padlat':0,'gridspace':30,'details':all False,'edge':None,'cmap':'jet','fancy':0,'zone':'','south':False,'oceancolor':'azure','landcolor':'bisque'} """ if not has_cartopy: print('This function requires installation of cartopy') return from matplotlib import cm # draw meridian labels on the bottom [left,right,top,bottom] mlabels = [0, 0, 0, 1] plabels = [1, 0, 0, 0] # draw parallel labels on the left Opts_defaults = {'latmin': -90, 'latmax': 90, 'lonmin': 0, 'lonmax': 360, 'lat_0': 0, 'lon_0': 0, 'proj': 'moll', 'sym': 'ro', 'symsize': 5, 'edge': None, 'pltgrid': 1, 'res': 'c', 'boundinglat': 0., 'padlon': 0, 'padlat': 0, 'gridspace': 30, 'global': 1, 'cmap': 'jet','oceancolor':'azure','landcolor':'bisque', 'details': {'fancy': 0, 'coasts': 0, 'rivers': 0, 'states': 0, 'countries': 0, 'ocean': 0}, 'edgecolor': 'face'} for key in Opts_defaults.keys(): if key not in Opts.keys() and key != 'details': Opts[key] = Opts_defaults[key] if key == 'details': if key not in Opts.keys(): Opts[key] = Opts_defaults[key] for detail_key in Opts_defaults[key].keys(): if detail_key not in Opts[key].keys(): Opts[key][detail_key] = Opts_defaults[key][detail_key] if Opts['proj'] == 'pc': ax = plt.axes(projection=ccrs.PlateCarree()) ax.set_extent([Opts['lonmin'], Opts['lonmax'], Opts['latmin'], Opts['latmax']], crs=ccrs.PlateCarree()) if Opts['proj'] == 'aea': ax = plt.axes(projection=ccrs.AlbersEqualArea( central_longitude=Opts['lon_0'], central_latitude=Opts['lat_0'], false_easting=0.0, false_northing=0.0, standard_parallels=(20.0, 50.0), globe=None)) if Opts['proj'] == 'lcc': proj = ccrs.LambertConformal(central_longitude=Opts['lon_0'], central_latitude=Opts['lat_0'],\ false_easting=0.0, false_northing=0.0, standard_parallels=(20.0, 50.0), globe=None) fig=plt.figure(fignum,figsize=(6,6),frameon=True) ax = plt.axes(projection=proj) ax.set_extent([Opts['lonmin'], Opts['lonmax'], Opts['latmin'], Opts['latmax']], crs=ccrs.PlateCarree()) if Opts['proj'] == 'lcyl': ax = plt.axes(projection=ccrs.LambertCylindrical( central_longitude=Opts['lon_0'])) if Opts['proj'] == 'merc': ax = plt.axes(projection=ccrs.Mercator( central_longitude=Opts['lon_0'], min_latitude=Opts['latmin'], max_latitude=Opts['latmax'], latitude_true_scale=0.0, globe=None)) ax.set_extent([Opts['lonmin'],Opts['lonmax'],\ Opts['latmin'],Opts['latmax']]) if Opts['proj'] == 'mill': ax = plt.axes(projection=ccrs.Miller( central_longitude=Opts['lon_0'])) if Opts['proj'] == 'moll': ax = plt.axes(projection=ccrs.Mollweide( central_longitude=Opts['lat_0'], globe=None)) if Opts['proj'] == 'ortho': ax = plt.axes(projection=ccrs.Orthographic( central_longitude=Opts['lon_0'], central_latitude=Opts['lat_0'])) if Opts['proj'] == 'robin': ax = plt.axes(projection=ccrs.Robinson( central_longitude=Opts['lon_0'], globe=None)) if Opts['proj'] == 'sinu': ax = plt.axes(projection=ccrs.Sinusoidal( central_longitude=Opts['lon_0'], false_easting=0.0, false_northing=0.0, globe=None)) if Opts['proj'] == 'stere': ax = plt.axes(projection=ccrs.Stereographic( central_longitude=Opts['lon_0'], false_easting=0.0, false_northing=0.0, true_scale_latitude=None, scale_factor=None, globe=None)) if Opts['proj'] == 'tmerc': ax = plt.axes(projection=ccrs.TransverseMercator( central_longitude=Opts['lon_0'], central_latitude=Opts['lat_0'], false_easting=0.0, false_northing=0.0, scale_factor=None, globe=None)) if Opts['proj'] == 'utm': ax = plt.axes(projection=ccrs.UTM( zone=Opts['zone'], southern_hemisphere=Opts['south'], globe=None)) if Opts['proj'] == 'geos': ax = plt.axes(projection=ccrs.Geostationary( central_longitude=Opts['lon_0'], false_easting=0.0, false_northing=0.0, satellite_height=35785831, sweep_axis='y', globe=None)) if Opts['proj'] == 'laea': ax = plt.axes(projection=ccrs.LambertAzimuthalEqualArea( central_longitude=Opts['lon_0'], central_latitude=Opts['lat_0'], false_easting=0.0, false_northing=0.0, globe=None)) if Opts['proj'] == 'npstere': ax = plt.axes(projection=ccrs.NorthPolarStereo( central_longitude=Opts['lon_0'], true_scale_latitude=None, globe=None)) if Opts['proj'] == 'spstere': ax = plt.axes(projection=ccrs.SouthPolarStereo( central_longitude=Opts['lon_0'], true_scale_latitude=None, globe=None)) if 'details' in list(Opts.keys()): if Opts['details']['fancy'] == 1: pmag_data_dir = find_pmag_dir.get_data_files_dir() EDIR = os.path.join(pmag_data_dir, "etopo20") etopo_path = os.path.join(EDIR, 'etopo20data.gz') etopo = np.loadtxt(os.path.join(EDIR, 'etopo20data.gz')) elons = np.loadtxt(os.path.join(EDIR, 'etopo20lons.gz')) elats = np.loadtxt(os.path.join(EDIR, 'etopo20lats.gz')) xx, yy = np.meshgrid(elons, elats) levels = np.arange(-10000, 8000, 500) # define contour intervals m = ax.contourf(xx, yy, etopo, levels, transform=ccrs.PlateCarree(), cmap=Opts['cmap']) cbar=plt.colorbar(m) if Opts['details']['coasts'] == 1: if Opts['res']=='c' or Opts['res']=='l': ax.coastlines(resolution='110m') elif Opts['res']=='i': ax.coastlines(resolution='50m') elif Opts['res']=='h': ax.coastlines(resolution='10m') if Opts['details']['rivers'] == 1: ax.add_feature(cfeature.RIVERS) if Opts['details']['states'] == 1: states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', edgecolor='black', facecolor='none', linestyle='dotted') ax.add_feature(states_provinces) if Opts['details']['countries'] == 1: ax.add_feature(BORDERS, linestyle='-', linewidth=2) if Opts['details']['ocean'] == 1: ax.add_feature(OCEAN, color=Opts['oceancolor']) ax.add_feature(LAND, color=Opts['landcolor']) ax.add_feature(LAKES, color=Opts['oceancolor']) if Opts['proj'] in ['merc', 'pc','lcc']: if Opts['pltgrid']: if Opts['proj']=='lcc': fig.canvas.draw() #xticks=list(np.arange(Opts['lonmin'],Opts['lonmax']+Opts['gridspace'],Opts['gridspace'])) #yticks=list(np.arange(Opts['latmin'],Opts['latmax']+Opts['gridspace'],Opts['gridspace'])) xticks=list(np.arange(-180,180,Opts['gridspace'])) yticks=list(np.arange(-90,90,Opts['gridspace'])) ax.gridlines(ylocs=yticks,xlocs=xticks,linewidth=2, linestyle='dotted') ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER) # you need this here ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)# you need this here, too try: import pmagpy.lcc_ticks as lcc_ticks lcc_ticks.lambert_xticks(ax, xticks) lcc_ticks.lambert_yticks(ax, yticks) except: print ('plotting of tick marks on Lambert Conformal requires the package "shapely".\n Try importing with "conda install -c conda-forge shapely"') else: gl = ax.gridlines(crs=ccrs.PlateCarree(), linewidth=2, linestyle='dotted', draw_labels=True) gl.ylocator = mticker.FixedLocator(np.arange(-80, 81, Opts['gridspace'])) gl.xlocator = mticker.FixedLocator(np.arange(-180, 181, Opts['gridspace'])) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabels_top = False else: gl = ax.gridlines(crs=ccrs.PlateCarree(), linewidth=2, linestyle='dotted') elif Opts['pltgrid']: print('gridlines only supported for PlateCarree, Lambert Conformal, and Mercator plots currently') prn_name, symsize = 0, 5 # if 'names' in list(Opts.keys()) > 0: # names = Opts['names'] # if len(names) > 0: # prn_name = 1 ## X, Y, T, k = [], [], [], 0 if 'symsize' in list(Opts.keys()): symsize = Opts['symsize'] if Opts['sym'][-1] != '-': # just plot points color, symbol = Opts['sym'][0], Opts['sym'][1] ax.scatter(lons, lats, s=Opts['symsize'], c=color, marker=symbol, transform=ccrs.Geodetic(), edgecolors=Opts['edgecolor']) if prn_name == 1: print('labels not yet implemented in plot_map') # for pt in range(len(lats)): # T.append(plt.text(X[pt] + 5000, Y[pt] - 5000, names[pt])) else: # for lines, need to separate chunks using lat==100. ax.plot(lons, lats, Opts['sym'], transform=ccrs.Geodetic()) if Opts['global']: ax.set_global()
makes a cartopy map with lats/lons Requires installation of cartopy Parameters: _______________ fignum : matplotlib figure number lats : array or list of latitudes lons : array or list of longitudes Opts : dictionary of plotting options: Opts.keys= proj : projection [supported cartopy projections: pc = Plate Carree aea = Albers Equal Area aeqd = Azimuthal Equidistant lcc = Lambert Conformal lcyl = Lambert Cylindrical merc = Mercator mill = Miller Cylindrical moll = Mollweide [default] ortho = Orthographic robin = Robinson sinu = Sinusoidal stere = Stereographic tmerc = Transverse Mercator utm = UTM [set zone and south keys in Opts] laea = Lambert Azimuthal Equal Area geos = Geostationary npstere = North-Polar Stereographic spstere = South-Polar Stereographic latmin : minimum latitude for plot latmax : maximum latitude for plot lonmin : minimum longitude for plot lonmax : maximum longitude lat_0 : central latitude lon_0 : central longitude sym : matplotlib symbol symsize : symbol size in pts edge : markeredgecolor cmap : matplotlib color map res : resolution [c,l,i,h] for low/crude, intermediate, high boundinglat : bounding latitude sym : matplotlib symbol for plotting symsize : matplotlib symbol size for plotting names : list of names for lats/lons (if empty, none will be plotted) pltgrd : if True, put on grid lines padlat : padding of latitudes padlon : padding of longitudes gridspace : grid line spacing global : global projection [default is True] oceancolor : 'azure' landcolor : 'bisque' [choose any of the valid color names for matplotlib see https://matplotlib.org/examples/color/named_colors.html details : dictionary with keys: coasts : if True, plot coastlines rivers : if True, plot rivers states : if True, plot states countries : if True, plot countries ocean : if True, plot ocean fancy : if True, plot etopo 20 grid NB: etopo must be installed if Opts keys not set :these are the defaults: Opts={'latmin':-90,'latmax':90,'lonmin':0,'lonmax':360,'lat_0':0,'lon_0':0,'proj':'moll','sym':'ro,'symsize':5,'edge':'black','pltgrid':1,'res':'c','boundinglat':0.,'padlon':0,'padlat':0,'gridspace':30,'details':all False,'edge':None,'cmap':'jet','fancy':0,'zone':'','south':False,'oceancolor':'azure','landcolor':'bisque'}
def unset(config, section, opt=None): """Unsets specified option and/or section in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): optional option name. """ if section not in config.keys(): raise ConfigError("section '{}' doesn't exist".format(section)) if opt is None: del config[section] return if opt not in config[section].keys(): raise ConfigError( "option '{}.{}' doesn't exist".format(section, opt) ) del config[section][opt] if not config[section]: del config[section]
Unsets specified option and/or section in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): optional option name.
def _get_params(self, rdata): """ Returns a list of jsonrpc request's method parameters. """ if 'params' in rdata: if isinstance(rdata['params'], dict) \ or isinstance(rdata['params'], list) \ or rdata['params'] is None: return rdata['params'] else: # wrong type raise InvalidRequestError else: return None
Returns a list of jsonrpc request's method parameters.
def reset(self): """Clear the internal statistics to initial state.""" if getattr(self, 'num', None) is None: self.num_inst = 0 self.sum_metric = 0.0 else: self.num_inst = [0] * self.num self.sum_metric = [0.0] * self.num self.records = dict() self.counts = dict()
Clear the internal statistics to initial state.
def aux_insertTree(childTree, parentTree): """This a private (You shouldn't have to call it) recursive function that inserts a child tree into a parent tree.""" if childTree.x1 != None and childTree.x2 != None : parentTree.insert(childTree.x1, childTree.x2, childTree.name, childTree.referedObject) for c in childTree.children: aux_insertTree(c, parentTree)
This a private (You shouldn't have to call it) recursive function that inserts a child tree into a parent tree.
def extract_full_summary_from_signature(operation): """ Extract the summary from the docstring of the command. """ lines = inspect.getdoc(operation) regex = r'\s*(:param)\s+(.+?)\s*:(.*)' summary = '' if lines: match = re.search(regex, lines) summary = lines[:match.regs[0][0]] if match else lines summary = summary.replace('\n', ' ').replace('\r', '') return summary
Extract the summary from the docstring of the command.
def parse_cookie(name, seed, kaka): """Parses and verifies a cookie value :param seed: A seed used for the HMAC signature :param kaka: The cookie :return: A tuple consisting of (payload, timestamp) """ if not kaka: return None cookie_obj = SimpleCookie(kaka) morsel = cookie_obj.get(name) if morsel: parts = morsel.value.split("|") if len(parts) != 3: return None # verify the cookie signature sig = cookie_signature(seed, parts[0], parts[1]) if sig != parts[2]: raise SAMLError("Invalid cookie signature") try: return parts[0].strip(), parts[1] except KeyError: return None else: return None
Parses and verifies a cookie value :param seed: A seed used for the HMAC signature :param kaka: The cookie :return: A tuple consisting of (payload, timestamp)
def strip(self, text, *args, **kwargs): """ Try to maintain parity with what is extracted by extract since strip will most likely be used in conjunction with extract """ if OEMBED_DEFAULT_PARSE_HTML: extracted = self.extract_oembeds_html(text, *args, **kwargs) else: extracted = self.extract_oembeds(text, *args, **kwargs) matches = [r['original_url'] for r in extracted] match_handler = lambda m: m.group() not in matches and m.group() or '' return re.sub(URL_RE, match_handler, text)
Try to maintain parity with what is extracted by extract since strip will most likely be used in conjunction with extract
def load_simdata(self, idx: int) -> None: """Load the next sim sequence value (of the given index).""" if self._sim_ramflag: self.sim[0] = self._sim_array[idx] elif self._sim_diskflag: raw = self._sim_file.read(8) self.sim[0] = struct.unpack('d', raw)
Load the next sim sequence value (of the given index).
def natural_keys(text: str) -> List[Union[int, str]]: """ Sort key function. Returns text split into string/number parts, for natural sorting; as per http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside Example (as per the source above): .. code-block:: python >>> from cardinal_pythonlib.sort import natural_keys >>> alist=[ ... "something1", ... "something12", ... "something17", ... "something2", ... "something25", ... "something29" ... ] >>> alist.sort(key=natural_keys) >>> alist ['something1', 'something2', 'something12', 'something17', 'something25', 'something29'] """ # noqa return [atoi(c) for c in re.split(r'(\d+)', text)]
Sort key function. Returns text split into string/number parts, for natural sorting; as per http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside Example (as per the source above): .. code-block:: python >>> from cardinal_pythonlib.sort import natural_keys >>> alist=[ ... "something1", ... "something12", ... "something17", ... "something2", ... "something25", ... "something29" ... ] >>> alist.sort(key=natural_keys) >>> alist ['something1', 'something2', 'something12', 'something17', 'something25', 'something29']
def Open(self, file_object): """Opens the database file object. Args: file_object (FileIO): file-like object. Raises: IOError: if the SQLite database signature does not match. OSError: if the SQLite database signature does not match. ValueError: if the file-like object is invalid. """ if not file_object: raise ValueError('Missing file-like object.') # Since pysqlite3 does not provide an exclusive read-only mode and # cannot interact with a file-like object directly we make a temporary # copy. Before making a copy we check the header signature. file_object.seek(0, os.SEEK_SET) data = file_object.read(len(self._HEADER_SIGNATURE)) if data != self._HEADER_SIGNATURE: file_object.close() raise IOError('Unsupported SQLite database signature.') with tempfile.NamedTemporaryFile(delete=False) as temp_file: self._temp_file_path = temp_file.name while data: temp_file.write(data) data = file_object.read(self._COPY_BUFFER_SIZE) self._connection = sqlite3.connect(self._temp_file_path) self._connection.text_factory = bytes self._cursor = self._connection.cursor()
Opens the database file object. Args: file_object (FileIO): file-like object. Raises: IOError: if the SQLite database signature does not match. OSError: if the SQLite database signature does not match. ValueError: if the file-like object is invalid.
def getMdriztabParameters(files): """ Gets entry in MDRIZTAB where task parameters live. This method returns a record array mapping the selected row. """ # Get the MDRIZTAB table file name from the primary header. # It is gotten from the first file in the input list. No # consistency checks are performed. _fileName = files[0] _header = fileutil.getHeader(_fileName) if 'MDRIZTAB' in _header: _tableName = _header['MDRIZTAB'] else: raise KeyError("No MDRIZTAB found in file " + _fileName) _tableName = fileutil.osfn(_tableName) # Now get the filters from the primary header. _filters = fileutil.getFilterNames(_header) # Specifically check to see whether the MDRIZTAB file can be found mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName if mtab_path and not os.path.exists(mtab_path): # check path first, if given raise IOError("Directory for MDRIZTAB '%s' could not be accessed!"%mtab_path) if not os.path.exists(_tableName): # then check for the table itself raise IOError("MDRIZTAB table '%s' could not be found!"%_tableName) # Open MDRIZTAB file. try: _mdriztab = fits.open(_tableName, memmap=False) except: raise IOError("MDRIZTAB table '%s' not valid!" % _tableName) # Look for matching rows based on filter name. If no # match, pick up rows for the default filter. _rows = _getRowsByFilter(_mdriztab, _filters) if _rows == []: _rows = _getRowsByFilter(_mdriztab, 'ANY') # Now look for the row that matches the number of images. # The logic below assumes that rows for a given filter # are arranged in ascending order of the 'numimage' field. _nimages = len(files) _row = 0 for i in _rows: _numimages = _mdriztab[1].data.field('numimages')[i] if _nimages >= _numimages: _row = i print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1)) mpars = _mdriztab[1].data[_row] _mdriztab.close() interpreted = _interpretMdriztabPars(mpars) if "staticfile" in interpreted: interpreted.pop("staticfile") return interpreted
Gets entry in MDRIZTAB where task parameters live. This method returns a record array mapping the selected row.
def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result
Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid.
def Nusselt_laminar(Tsat, Tw, rhog, rhol, kl, mul, Hvap, L, angle=90.): r'''Calculates heat transfer coefficient for laminar film condensation of a pure chemical on a flat plate, as presented in [1]_ according to an analysis performed by Nusselt in 1916. .. math:: h=0.943\left[\frac{g\sin(\theta)\rho_{liq}(\rho_l-\rho_v)k_{l}^3 \Delta H_{vap}}{\mu_l(T_{sat}-T_w)L}\right]^{0.25} Parameters ---------- Tsat : float Saturation temperature at operating pressure [Pa] Tw : float Wall temperature, [K] rhog : float Density of the gas [kg/m^3] rhol : float Density of the liquid [kg/m^3] kl : float Thermal conductivity of liquid [W/m/K] mul : float Viscosity of liquid [Pa*s] Hvap : float Heat of vaporization of the fluid at P, [J/kg] L : float Length of the plate [m] angle : float, optional Angle of inclination of the plate [degrees] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- Optionally, the plate may be inclined. The constant 0.943 is actually: .. math:: 2\sqrt{2}/3 Examples -------- p. 578 in [1]_, matches exactly. >>> Nusselt_laminar(Tsat=370, Tw=350, rhog=7.0, rhol=585., kl=0.091, ... mul=158.9E-6, Hvap=776900, L=0.1) 1482.206403453679 References ---------- .. [1] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994. ''' return 2.*2.**0.5/3.*(kl**3*rhol*(rhol - rhog)*g*sin(angle/180.*pi) *Hvap/(mul*(Tsat - Tw)*L))**0.25
r'''Calculates heat transfer coefficient for laminar film condensation of a pure chemical on a flat plate, as presented in [1]_ according to an analysis performed by Nusselt in 1916. .. math:: h=0.943\left[\frac{g\sin(\theta)\rho_{liq}(\rho_l-\rho_v)k_{l}^3 \Delta H_{vap}}{\mu_l(T_{sat}-T_w)L}\right]^{0.25} Parameters ---------- Tsat : float Saturation temperature at operating pressure [Pa] Tw : float Wall temperature, [K] rhog : float Density of the gas [kg/m^3] rhol : float Density of the liquid [kg/m^3] kl : float Thermal conductivity of liquid [W/m/K] mul : float Viscosity of liquid [Pa*s] Hvap : float Heat of vaporization of the fluid at P, [J/kg] L : float Length of the plate [m] angle : float, optional Angle of inclination of the plate [degrees] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- Optionally, the plate may be inclined. The constant 0.943 is actually: .. math:: 2\sqrt{2}/3 Examples -------- p. 578 in [1]_, matches exactly. >>> Nusselt_laminar(Tsat=370, Tw=350, rhog=7.0, rhol=585., kl=0.091, ... mul=158.9E-6, Hvap=776900, L=0.1) 1482.206403453679 References ---------- .. [1] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994.
def abort_request(self, request): """Called to abort request on timeout""" self.timedout = True try: request.cancel() except error.AlreadyCancelled: return
Called to abort request on timeout
def Campbell_Thodos(T, Tb, Tc, Pc, M, dipole=None, hydroxyl=False): r'''Calculate saturation liquid density using the Campbell-Thodos [1]_ CSP method. An old and uncommon estimation method. .. math:: V_s = \frac{RT_c}{P_c}{Z_{RA}}^{[1+(1-T_r)^{2/7}]} Z_{RA} = \alpha + \beta(1-T_r) \alpha = 0.3883-0.0179s s = T_{br} \frac{\ln P_c}{(1-T_{br})} \beta = 0.00318s-0.0211+0.625\Lambda^{1.35} \Lambda = \frac{P_c^{1/3}} { M^{1/2} T_c^{5/6}} For polar compounds: .. math:: \theta = P_c \mu^2/T_c^2 \alpha = 0.3883 - 0.0179s - 130540\theta^{2.41} \beta = 0.00318s - 0.0211 + 0.625\Lambda^{1.35} + 9.74\times 10^6 \theta^{3.38} Polar Combounds with hydroxyl groups (water, alcohols) .. math:: \alpha = \left[0.690T_{br} -0.3342 + \frac{5.79\times 10^{-10}} {T_{br}^{32.75}}\right] P_c^{0.145} \beta = 0.00318s - 0.0211 + 0.625 \Lambda^{1.35} + 5.90\Theta^{0.835} Parameters ---------- T : float Temperature of fluid [K] Tb : float Boiling temperature of the fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] M : float Molecular weight of the fluid [g/mol] dipole : float, optional Dipole moment of the fluid [debye] hydroxyl : bool, optional Swith to use the hydroxyl variant for polar fluids Returns ------- Vs : float Saturation liquid volume Notes ----- If a dipole is provided, the polar chemical method is used. The paper is an excellent read. Pc is internally converted to atm. Examples -------- Ammonia, from [1]_. >>> Campbell_Thodos(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, M=17.03, dipole=1.47) 7.347363635885525e-05 References ---------- .. [1] Campbell, Scott W., and George Thodos. "Prediction of Saturated Liquid Densities and Critical Volumes for Polar and Nonpolar Substances." Journal of Chemical & Engineering Data 30, no. 1 (January 1, 1985): 102-11. doi:10.1021/je00039a032. ''' Tr = T/Tc Tbr = Tb/Tc Pc = Pc/101325. s = Tbr * log(Pc)/(1-Tbr) Lambda = Pc**(1/3.)/(M**0.5*Tc**(5/6.)) alpha = 0.3883 - 0.0179*s beta = 0.00318*s - 0.0211 + 0.625*Lambda**(1.35) if dipole: theta = Pc*dipole**2/Tc**2 alpha -= 130540 * theta**2.41 beta += 9.74E6 * theta**3.38 if hydroxyl: beta = 0.00318*s - 0.0211 + 0.625*Lambda**(1.35) + 5.90*theta**0.835 alpha = (0.69*Tbr - 0.3342 + 5.79E-10/Tbr**32.75)*Pc**0.145 Zra = alpha + beta*(1-Tr) Vs = R*Tc/(Pc*101325)*Zra**(1+(1-Tr)**(2/7.)) return Vs
r'''Calculate saturation liquid density using the Campbell-Thodos [1]_ CSP method. An old and uncommon estimation method. .. math:: V_s = \frac{RT_c}{P_c}{Z_{RA}}^{[1+(1-T_r)^{2/7}]} Z_{RA} = \alpha + \beta(1-T_r) \alpha = 0.3883-0.0179s s = T_{br} \frac{\ln P_c}{(1-T_{br})} \beta = 0.00318s-0.0211+0.625\Lambda^{1.35} \Lambda = \frac{P_c^{1/3}} { M^{1/2} T_c^{5/6}} For polar compounds: .. math:: \theta = P_c \mu^2/T_c^2 \alpha = 0.3883 - 0.0179s - 130540\theta^{2.41} \beta = 0.00318s - 0.0211 + 0.625\Lambda^{1.35} + 9.74\times 10^6 \theta^{3.38} Polar Combounds with hydroxyl groups (water, alcohols) .. math:: \alpha = \left[0.690T_{br} -0.3342 + \frac{5.79\times 10^{-10}} {T_{br}^{32.75}}\right] P_c^{0.145} \beta = 0.00318s - 0.0211 + 0.625 \Lambda^{1.35} + 5.90\Theta^{0.835} Parameters ---------- T : float Temperature of fluid [K] Tb : float Boiling temperature of the fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] M : float Molecular weight of the fluid [g/mol] dipole : float, optional Dipole moment of the fluid [debye] hydroxyl : bool, optional Swith to use the hydroxyl variant for polar fluids Returns ------- Vs : float Saturation liquid volume Notes ----- If a dipole is provided, the polar chemical method is used. The paper is an excellent read. Pc is internally converted to atm. Examples -------- Ammonia, from [1]_. >>> Campbell_Thodos(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, M=17.03, dipole=1.47) 7.347363635885525e-05 References ---------- .. [1] Campbell, Scott W., and George Thodos. "Prediction of Saturated Liquid Densities and Critical Volumes for Polar and Nonpolar Substances." Journal of Chemical & Engineering Data 30, no. 1 (January 1, 1985): 102-11. doi:10.1021/je00039a032.
def bytes_cast(maybe_str, encoding='utf-8'): """ Converts any string-like input to a bytes-like output, with respect to python version Parameters ---------- maybe_str : if this is a string-like object, it will be converted to bytes encoding : str, default='utf-8' encoding to be used when encoding string """ if isinstance(maybe_str, unicode_): return maybe_str.encode(encoding) else: return maybe_str
Converts any string-like input to a bytes-like output, with respect to python version Parameters ---------- maybe_str : if this is a string-like object, it will be converted to bytes encoding : str, default='utf-8' encoding to be used when encoding string
def description(self): """Extract description from a release.""" if self.release.get('body'): return markdown(self.release['body']) elif self.repository.get('description'): return self.repository['description'] return 'No description provided.'
Extract description from a release.
def add_reply_comment(self, msg_data_id, index, user_comment_id, content): """ 回复评论 """ return self._post( 'comment/reply/add', data={ 'msg_data_id': msg_data_id, 'index': index, 'user_comment_id': user_comment_id, 'content': content })
回复评论
def _cleanup(self, dry_run=False): """ Cleans up (removes) intermediate files. You can register intermediate files, which will be deleted automatically when the pipeline completes. This function deletes them, either absolutely or conditionally. It is run automatically when the pipeline succeeds, so you shouldn't need to call it from a pipeline. :param bool dry_run: Set to True if you want to build a cleanup script, but not actually delete the files. """ if dry_run: # Move all unconditional cleans into the conditional list if len(self.cleanup_list) > 0: combined_list = self.cleanup_list_conditional + self.cleanup_list self.cleanup_list_conditional = combined_list self.cleanup_list = [] if len(self.cleanup_list) > 0: print("\nCleaning up flagged intermediate files. . .") for expr in self.cleanup_list: print("\nRemoving glob: " + expr) try: # Expand regular expression files = glob.glob(expr) # Remove entry from cleanup list while files in self.cleanup_list: self.cleanup_list.remove(files) # and delete the files for file in files: if os.path.isfile(file): print("`rm " + file + "`") os.remove(os.path.join(file)) elif os.path.isdir(file): print("`rmdir " + file + "`") os.rmdir(os.path.join(file)) except: pass if len(self.cleanup_list_conditional) > 0: run_flag = flag_name(RUN_FLAG) flag_files = [fn for fn in glob.glob(self.outfolder + flag_name("*")) if COMPLETE_FLAG not in os.path.basename(fn) and not "{}_{}".format(self.name, run_flag) == os.path.basename(fn)] if len(flag_files) == 0 and not dry_run: print("\nCleaning up conditional list. . .") for expr in self.cleanup_list_conditional: print("\nRemoving glob: " + expr) try: files = glob.glob(expr) while files in self.cleanup_list_conditional: self.cleanup_list_conditional.remove(files) for file in files: if os.path.isfile(file): print("`rm " + file + "`") os.remove(os.path.join(file)) elif os.path.isdir(file): print("`rmdir " + file + "`") os.rmdir(os.path.join(file)) except: pass else: print("\nConditional flag found: " + str([os.path.basename(i) for i in flag_files])) print("\nThese conditional files were left in place:\n\n- " + "\n- ".join(self.cleanup_list_conditional)) # Produce a cleanup script. no_cleanup_script = [] for cleandir in self.cleanup_list_conditional: try: items_to_clean = glob.glob(cleandir) for clean_item in items_to_clean: with open(self.cleanup_file, "a") as clean_script: if os.path.isfile(clean_item): clean_script.write("rm " + clean_item + "\n") elif os.path.isdir(clean_item): clean_script.write("rmdir " + clean_item + "\n") except Exception as e: no_cleanup_script.append(cleandir) if no_cleanup_script: print('\n\nCould not produce cleanup script for item(s):\n\n- ' + '\n- '.join(no_cleanup_script))
Cleans up (removes) intermediate files. You can register intermediate files, which will be deleted automatically when the pipeline completes. This function deletes them, either absolutely or conditionally. It is run automatically when the pipeline succeeds, so you shouldn't need to call it from a pipeline. :param bool dry_run: Set to True if you want to build a cleanup script, but not actually delete the files.
def _delete_duplicates(l, keep_last): """Delete duplicates from a sequence, keeping the first or last.""" seen=set() result=[] if keep_last: # reverse in & out, then keep first l.reverse() for i in l: try: if i not in seen: result.append(i) seen.add(i) except TypeError: # probably unhashable. Just keep it. result.append(i) if keep_last: result.reverse() return result
Delete duplicates from a sequence, keeping the first or last.
def trigger_event(self, event, *args): """Dispatch an event to the proper handler method. In the most common usage, this method is not overloaded by subclasses, as it performs the routing of events to methods. However, this method can be overriden if special dispatching rules are needed, or if having a single method that catches all events is desired. """ handler_name = 'on_' + event if hasattr(self, handler_name): return getattr(self, handler_name)(*args)
Dispatch an event to the proper handler method. In the most common usage, this method is not overloaded by subclasses, as it performs the routing of events to methods. However, this method can be overriden if special dispatching rules are needed, or if having a single method that catches all events is desired.
def delete_dev_vlans(vlanid, auth, url, devid=None, devip=None): """ function takes devid and vlanid of specific device and 802.1q VLAN tag and issues a RESTFUL call to remove the specified VLAN from the target device. :param vlanid:int or str value of target 802.1q VLAN :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: HTTP response object from requests library. Status code should be 204 if Successful :param devid: str requires devid of the target device :param devip: str of ipv4 address of the target device :rtype: requests.models.Response >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> create_dev_vlan = create_dev_vlan('350', '200', 'test vlan', auth.creds, auth.url) """ if devip is not None: devid = get_dev_details(devip, auth, url)['id'] remove_dev_vlan_url = "/imcrs/vlan/delvlan?devId=" + str(devid) + "&vlanId=" + str(vlanid) f_url = url + remove_dev_vlan_url response = requests.delete(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 204: print('Vlan deleted') return response.status_code elif response.status_code == 409: print('Unable to delete VLAN.\nVLAN does not Exist\nDevice does not support VLAN ' 'function') return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " delete_dev_vlans: An Error has occured"
function takes devid and vlanid of specific device and 802.1q VLAN tag and issues a RESTFUL call to remove the specified VLAN from the target device. :param vlanid:int or str value of target 802.1q VLAN :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: HTTP response object from requests library. Status code should be 204 if Successful :param devid: str requires devid of the target device :param devip: str of ipv4 address of the target device :rtype: requests.models.Response >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> create_dev_vlan = create_dev_vlan('350', '200', 'test vlan', auth.creds, auth.url)
def _merge_retry_options(retry_options, overrides): """Helper for ``construct_settings()``. Takes two retry options, and merges them into a single RetryOption instance. Args: retry_options (RetryOptions): The base RetryOptions. overrides (RetryOptions): The RetryOptions used for overriding ``retry``. Use the values if it is not None. If entire ``overrides`` is None, ignore the base retry and return None. Returns: RetryOptions: The merged options, or None if it will be canceled. """ if overrides is None: return None if overrides.retry_codes is None and overrides.backoff_settings is None: return retry_options codes = retry_options.retry_codes if overrides.retry_codes is not None: codes = overrides.retry_codes backoff_settings = retry_options.backoff_settings if overrides.backoff_settings is not None: backoff_settings = overrides.backoff_settings return gax.RetryOptions( backoff_settings=backoff_settings, retry_codes=codes, )
Helper for ``construct_settings()``. Takes two retry options, and merges them into a single RetryOption instance. Args: retry_options (RetryOptions): The base RetryOptions. overrides (RetryOptions): The RetryOptions used for overriding ``retry``. Use the values if it is not None. If entire ``overrides`` is None, ignore the base retry and return None. Returns: RetryOptions: The merged options, or None if it will be canceled.
def counts_to_dicts(df, column): """ convert (values, counts) as returned by aggregate.aggregate_counts() to dicts makes expand_counts much faster """ # index where there are counts and they aren't null d = df[column].apply(lambda c: pd.notnull(c) and len(c[0]) > 0) return df.loc[d, column].apply(lambda c: {k: v for k, v in zip(*c)})
convert (values, counts) as returned by aggregate.aggregate_counts() to dicts makes expand_counts much faster
def file_exists_on_unit(self, sentry_unit, file_name): """Check if a file exists on a unit.""" try: sentry_unit.file_stat(file_name) return True except IOError: return False except Exception as e: msg = 'Error checking file {}: {}'.format(file_name, e) amulet.raise_status(amulet.FAIL, msg=msg)
Check if a file exists on a unit.
def delete_cli(access_token, project_member_id, base_url=OH_BASE_URL, file_basename=None, file_id=None, all_files=False): """ Command line function for deleting files. For more information visit :func:`delete_file<ohapi.api.delete_file>`. """ response = delete_file(access_token, project_member_id, base_url, file_basename, file_id, all_files) if (response.status_code == 200): print("File deleted successfully") else: print("Bad response while deleting file.")
Command line function for deleting files. For more information visit :func:`delete_file<ohapi.api.delete_file>`.
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy): """Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` of the objective bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an ``ObjectiveRequisiteAssignmentSession`` raise: NotFound - ``objective_bank_id`` not found raise: NullArgument - ``objective_bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_objective_requisite_assignment(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.ObjectiveRequisiteAssignmentSession(objective_bank_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` of the objective bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteAssignmentSession) - an ``ObjectiveRequisiteAssignmentSession`` raise: NotFound - ``objective_bank_id`` not found raise: NullArgument - ``objective_bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` and ``supports_visible_federation()`` are ``true``.*
def find(self, *args, **kwargs): """Same as :meth:`pymongo.collection.Collection.find`, except it returns the right document class. """ return Cursor(self, *args, wrap=self.document_class, **kwargs)
Same as :meth:`pymongo.collection.Collection.find`, except it returns the right document class.
async def fetch_transaction(self, request): """Fetches a specific transaction from the validator, specified by id. Request: path: - transaction_id: The 128-character id of the txn to be fetched Response: data: A JSON object with the data from the expanded Transaction link: The link to this exact query """ error_traps = [error_handlers.TransactionNotFoundTrap] txn_id = request.match_info.get('transaction_id', '') self._validate_id(txn_id) response = await self._query_validator( Message.CLIENT_TRANSACTION_GET_REQUEST, client_transaction_pb2.ClientTransactionGetResponse, client_transaction_pb2.ClientTransactionGetRequest( transaction_id=txn_id), error_traps) return self._wrap_response( request, data=self._expand_transaction(response['transaction']), metadata=self._get_metadata(request, response))
Fetches a specific transaction from the validator, specified by id. Request: path: - transaction_id: The 128-character id of the txn to be fetched Response: data: A JSON object with the data from the expanded Transaction link: The link to this exact query
def interval_wait(self): """Pauses the correct amount of time according to this acquisition object's interval setting, and the last time this function was called""" # calculate time since last interation and wait to acheive desired interval now = time.time() elapsed = (now - self.last_tick)*1000 # print("interval %d, time from start %d \n" % (elapsed, (now - self.start_time)*1000)) if elapsed < self.interval: # print('sleep ', (self.interval-elapsed)) # self.signals.warning.emit('') # clear previous warning time.sleep((self.interval-elapsed)/1000) now = time.time() elif elapsed > self.interval: pass # self.signals.warning.emit("WARNING: PROVIDED INTERVAL EXCEEDED, ELAPSED TIME %d" % (elapsed)) self.last_tick = now
Pauses the correct amount of time according to this acquisition object's interval setting, and the last time this function was called
def deliver_hook(instance, target, payload_override=None): """ Deliver the payload to the target URL. By default it serializes to JSON and POSTs. """ payload = payload_override or serialize_hook(instance) if hasattr(settings, 'HOOK_DELIVERER'): deliverer = get_module(settings.HOOK_DELIVERER) deliverer(target, payload, instance=instance) else: client.post( url=target, data=json.dumps(payload, cls=serializers.json.DjangoJSONEncoder), headers={'Content-Type': 'application/json'} ) return None
Deliver the payload to the target URL. By default it serializes to JSON and POSTs.
def decrypt_data(self, name, ciphertext, context="", nonce="", batch_input=None, mount_point=DEFAULT_MOUNT_POINT): """Decrypt the provided ciphertext using the named key. Supported methods: POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json :param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL. :type name: str | unicode :param ciphertext: the ciphertext to decrypt. :type ciphertext: str | unicode :param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is enabled. :type context: str | unicode :param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. :type nonce: str | unicode :param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...] :type batch_input: List[dict] :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response """ params = { 'ciphertext': ciphertext, 'context': context, 'nonce': nonce, 'batch_input': batch_input, } api_path = '/v1/{mount_point}/decrypt/{name}'.format( mount_point=mount_point, name=name, ) response = self._adapter.post( url=api_path, json=params, ) return response.json()
Decrypt the provided ciphertext using the named key. Supported methods: POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json :param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL. :type name: str | unicode :param ciphertext: the ciphertext to decrypt. :type ciphertext: str | unicode :param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is enabled. :type context: str | unicode :param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. :type nonce: str | unicode :param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...] :type batch_input: List[dict] :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
def load_module(self, fullmodname): """ PEP-302-compliant load_module() method. Args: fullmodname: The dot-separated full module name, e.g. 'django.core.mail'. Returns: The module object constructed from the source code. Raises: SyntaxError if the module's source code is syntactically incorrect. ImportError if there was a problem accessing the source code. Whatever else can be raised by executing the module's source code. """ submodname, is_package, fullpath, source = self._get_source(fullmodname) code = compile(source, fullpath, 'exec') mod = sys.modules.get(fullmodname) try: if mod is None: mod = sys.modules[fullmodname] = types.ModuleType(fullmodname) mod.__loader__ = self mod.__file__ = fullpath mod.__name__ = fullmodname if is_package: mod.__path__ = [os.path.dirname(mod.__file__)] exec(code, mod.__dict__) except: if fullmodname in sys.modules: del sys.modules[fullmodname] raise return mod
PEP-302-compliant load_module() method. Args: fullmodname: The dot-separated full module name, e.g. 'django.core.mail'. Returns: The module object constructed from the source code. Raises: SyntaxError if the module's source code is syntactically incorrect. ImportError if there was a problem accessing the source code. Whatever else can be raised by executing the module's source code.
def _convert_to_dataset_class(df, dataset_class, expectations_config=None, autoinspect_func=None): """ Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectations_config """ if expectations_config is not None: # Cast the dataframe into the new class, and manually initialize expectations according to the provided configuration df.__class__ = dataset_class df._initialize_expectations(expectations_config) else: # Instantiate the new Dataset with default expectations try: df = dataset_class(df, autoinspect_func=autoinspect_func) except: raise NotImplementedError( "read_csv requires a Dataset class that can be instantiated from a Pandas DataFrame") return df
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectations_config
def get_index_declaration_sql(self, name, index): """ Obtains DBMS specific SQL code portion needed to set an index declaration to be used in statements like CREATE TABLE. :param name: The name of the index. :type name: str :param index: The index definition :type index: Index :return: DBMS specific SQL code portion needed to set an index. :rtype: str """ columns = index.get_quoted_columns(self) name = Identifier(name) if not columns: raise DBALException('Incomplete definition. "columns" required.') return "%sINDEX %s (%s)%s" % ( self.get_create_index_sql_flags(index), name.get_quoted_name(self), self.get_index_field_declaration_list_sql(columns), self.get_partial_index_sql(index), )
Obtains DBMS specific SQL code portion needed to set an index declaration to be used in statements like CREATE TABLE. :param name: The name of the index. :type name: str :param index: The index definition :type index: Index :return: DBMS specific SQL code portion needed to set an index. :rtype: str
def dot(a, b): r"""Dot product of two 3d vectors.""" if isinstance(a, Mul): a = a.expand() avect = 1 aivect = -1 for ai, fact in enumerate(a.args): if isinstance(fact, Vector3D): avect = fact aivect = ai break acoef = a.args[:aivect] + a.args[aivect+1:] acoef = Mul(*acoef) return acoef*dot(avect, b) if isinstance(b, Mul): b = b.expand() bvect = 1 bivect = -1 for bi, fact in enumerate(b.args): if isinstance(fact, Vector3D): bvect = fact bivect = bi break bcoef = b.args[:bivect] + b.args[bivect+1:] bcoef = Mul(*bcoef) return bcoef*dot(a, bvect) if isinstance(a, Vector3D) and isinstance(b, Vector3D): return DotProduct(a, b) if hasattr(a, "shape") and hasattr(b, "shape"): return cartesian_dot_product(a, b) print a, b, type(a), type(b), print isinstance(a, Vector3D), isinstance(b, Vector3D) raise NotImplementedError("could not catch these instances in dot!")
r"""Dot product of two 3d vectors.
def get_program_duration(program): ''' Get a program's duration in seconds ''' program_start = program.get('start_time') program_end = program.get('end_time') if not program_start or not program_end: _LOGGER.error('Could not determine program start and/or end times.') _LOGGER.debug('Program data: %s', program) return program_duration = program_end - program_start return program_duration.seconds
Get a program's duration in seconds
def render_image(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap. """ self.logger.debug("redraw pixmap=%s" % (self.pixmap)) if self.pixmap is None: return self.logger.debug("drawing to pixmap") # Prepare array for rendering arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8) (height, width) = arr.shape[:2] return self._render_offscreen(self.pixmap, arr, dst_x, dst_y, width, height)
Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap.
def listen(self, *, host, port, override=False, forever=False, **kwargs): """Listen on TCP/IP socket. Parameters ---------- host: str Host like '127.0.0.1' port: Port like 80. """ if override: argv = dict(enumerate(sys.argv)) host = argv.get(1, host) port = int(argv.get(2, port)) server = self.loop.create_server( self.__handler.fork, host, port, **kwargs) server = self.loop.run_until_complete(server) self.log('info', 'Start listening host="{host}" port="{port}"'. format(host=host, port=port)) if forever: try: self.loop.run_forever() except KeyboardInterrupt: pass return server
Listen on TCP/IP socket. Parameters ---------- host: str Host like '127.0.0.1' port: Port like 80.
def parser(self): "Load the configuration file." if not hasattr(self, '_conf'): cfile = self.config_file logger.debug('loading config %s' % cfile) if os.path.isfile(cfile): conf = self._create_config_parser() conf.read(os.path.expanduser(cfile)) else: if self.robust: logger.debug('no default config file %s--skipping' % cfile) else: raise IOError('no such file: %s' % cfile) conf = None self._conf = conf return self._conf
Load the configuration file.
def conjugate(self): """Quaternion conjugate, encapsulated in a new instance. For a unit quaternion, this is the same as the inverse. Returns: A new Quaternion object clone with its vector part negated """ return self.__class__(scalar=self.scalar, vector= -self.vector)
Quaternion conjugate, encapsulated in a new instance. For a unit quaternion, this is the same as the inverse. Returns: A new Quaternion object clone with its vector part negated
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge removed.""" return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
Returns a copy of this bipartite graph with the given edge removed.
def gpu_iuwt_recomposition(in1, scale_adjust, store_on_gpu, smoothed_array): """ This function calls the a trous algorithm code to recompose the input into a single array. This is the implementation of the isotropic undecimated wavelet transform recomposition for a GPU. INPUTS: in1 (no default): Array containing wavelet coefficients. scale_adjust (no default): Indicates the number of omitted array pages. store_on_gpu (no default): Boolean specifier for whether the decomposition is stored on the gpu or not. OUTPUTS: recomposiiton Array containing the reconstructed array. """ wavelet_filter = (1./16)*np.array([1,4,6,4,1], dtype=np.float32) # Filter-bank for use in the a trous algorithm. wavelet_filter = gpuarray.to_gpu_async(wavelet_filter) # Determines scale with adjustment and creates a zero array on the GPU to store the output,unless smoothed_array # is given. max_scale = in1.shape[0] + scale_adjust if smoothed_array is None: recomposition = gpuarray.zeros([in1.shape[1], in1.shape[2]], np.float32) else: recomposition = gpuarray.to_gpu(smoothed_array.astype(np.float32)) # Determines whether the array is already on the GPU or not. If not, moves it to the GPU. try: gpu_in1 = gpuarray.to_gpu_async(in1.astype(np.float32)) except: gpu_in1 = in1 # Creates a working array on the GPU. gpu_tmp = gpuarray.empty_like(recomposition) # Creates and fills an array with the appropriate scale value. gpu_scale = gpuarray.zeros([1], np.int32) gpu_scale += max_scale-1 # Fetches the a trous kernels. gpu_a_trous_row_kernel, gpu_a_trous_col_kernel = gpu_a_trous() grid_rows = int(in1.shape[1]//32) grid_cols = int(in1.shape[2]//32) # The following loops call the a trous algorithm code to recompose the input. The first loop assumes that there are # non-zero wavelet coefficients at scales above scale_adjust, while the second loop completes the recomposition # on the scales less than scale_adjust. for i in range(max_scale-1, scale_adjust-1, -1): gpu_a_trous_row_kernel(recomposition, gpu_tmp, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_a_trous_col_kernel(gpu_tmp, recomposition, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) recomposition = recomposition[:,:] + gpu_in1[i-scale_adjust,:,:] gpu_scale -= 1 if scale_adjust>0: for i in range(scale_adjust-1, -1, -1): gpu_a_trous_row_kernel(recomposition, gpu_tmp, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_a_trous_col_kernel(gpu_tmp, recomposition, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_scale -= 1 # Return values depend on mode. if store_on_gpu: return recomposition else: return recomposition.get()
This function calls the a trous algorithm code to recompose the input into a single array. This is the implementation of the isotropic undecimated wavelet transform recomposition for a GPU. INPUTS: in1 (no default): Array containing wavelet coefficients. scale_adjust (no default): Indicates the number of omitted array pages. store_on_gpu (no default): Boolean specifier for whether the decomposition is stored on the gpu or not. OUTPUTS: recomposiiton Array containing the reconstructed array.
def block_header_to_hex( block_data, prev_hash ): """ Calculate the hex form of a block's header, given its getblock information from bitcoind. """ header_info = { "version": block_data['version'], "prevhash": prev_hash, "merkle_root": block_data['merkleroot'], "timestamp": block_data['time'], "bits": int(block_data['bits'], 16), "nonce": block_data['nonce'], "hash": block_data['hash'] } return block_header_serialize(header_info)
Calculate the hex form of a block's header, given its getblock information from bitcoind.
def update_json(self, fields=None): """Update the current entity. Call :meth:`update_raw`. Check the response status code, decode JSON and return the decoded JSON as a dict. :param fields: See :meth:`update`. :return: A dict consisting of the decoded JSON in the server's response. :raises: ``requests.exceptions.HTTPError`` if the response has an HTTP 4XX or 5XX status code. :raises: ``ValueError`` If the response JSON can not be decoded. """ response = self.update_raw(fields) response.raise_for_status() return response.json()
Update the current entity. Call :meth:`update_raw`. Check the response status code, decode JSON and return the decoded JSON as a dict. :param fields: See :meth:`update`. :return: A dict consisting of the decoded JSON in the server's response. :raises: ``requests.exceptions.HTTPError`` if the response has an HTTP 4XX or 5XX status code. :raises: ``ValueError`` If the response JSON can not be decoded.
def agg_multi_func(values, agg_field, agg_funcs, group_by=None): """ Aggregates several functions :param values: list of objects (dict) :param agg_field: target field to calculate aggregates :param agg_funcs: list of aggregate functions :param group_by: field used to determine group :return: dict {agg_func0: value, agg_func1: agg_value, ...} """ if len(values) == 0: return None else: if group_by: group_aggs = pd.DataFrame(values).groupby(group_by).agg({agg_field: agg_funcs}) res = {} for row in group_aggs.itertuples(): row_res = {} for i, func in enumerate(agg_funcs): row_res.update({func: row[i+1]}) res.update({row[0]: row_res}) return res else: aggs = pd.DataFrame(values).agg({agg_field: agg_funcs}) res = {} for func in agg_funcs: res.update({func: aggs[agg_field][func]}) return res
Aggregates several functions :param values: list of objects (dict) :param agg_field: target field to calculate aggregates :param agg_funcs: list of aggregate functions :param group_by: field used to determine group :return: dict {agg_func0: value, agg_func1: agg_value, ...}
def get(self, filetype, **kwargs): """Returns file name, downloading if remote access configured. Parameters ---------- filetype : str type of file keyword arguments : keywords to fully specify path Notes ----- Path templates are defined in $DIMAGE_DIR/data/dimage_paths.ini """ path = self.full(filetype, **kwargs) if path: if self._remote: self.download_url_to_path(self.url(filetype, **kwargs), path) else: print("There is no file with filetype=%r to access in the tree module loaded" % filetype)
Returns file name, downloading if remote access configured. Parameters ---------- filetype : str type of file keyword arguments : keywords to fully specify path Notes ----- Path templates are defined in $DIMAGE_DIR/data/dimage_paths.ini
def upstream_url(self, uri): "Returns the URL to the upstream data source for the given URI based on configuration" return self.application.options.upstream + self.request.uri
Returns the URL to the upstream data source for the given URI based on configuration
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''): """Generates a help string for the key flags of a given module. Args: module: A module object or a module name (a string). output_lines: A list of strings. The generated help message lines will be appended to this list. prefix: A string that is prepended to each generated help line. """ key_flags = self._GetKeyFlagsForModule(module) if key_flags: self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
Generates a help string for the key flags of a given module. Args: module: A module object or a module name (a string). output_lines: A list of strings. The generated help message lines will be appended to this list. prefix: A string that is prepended to each generated help line.
def date_in_past(self): """Is the block's date in the past? (Has it not yet happened?) """ now = datetime.datetime.now() return (now.date() > self.date)
Is the block's date in the past? (Has it not yet happened?)
def get_season(self, season_key, card_type="micro_card"): """ Calling Season API. Arg: season_key: key of the season card_type: optional, default to micro_card. Accepted values are micro_card & summary_card Return: json data """ season_url = self.api_path + "season/" + season_key + "/" params = {} params["card_type"] = card_type response = self.get_response(season_url, params) return response
Calling Season API. Arg: season_key: key of the season card_type: optional, default to micro_card. Accepted values are micro_card & summary_card Return: json data
def config_examples(dest, user_dir): """ Copy the example workflows to a directory. \b DEST: Path to which the examples should be copied. """ examples_path = Path(lightflow.__file__).parents[1] / 'examples' if examples_path.exists(): dest_path = Path(dest).resolve() if not user_dir: dest_path = dest_path / 'examples' if dest_path.exists(): if not click.confirm('Directory already exists. Overwrite existing files?', default=True, abort=True): return else: dest_path.mkdir() for example_file in examples_path.glob('*.py'): shutil.copy(str(example_file), str(dest_path / example_file.name)) click.echo('Copied examples to {}'.format(str(dest_path))) else: click.echo('The examples source path does not exist')
Copy the example workflows to a directory. \b DEST: Path to which the examples should be copied.
def num_features_model(m:nn.Module)->int: "Return the number of output features for `model`." sz = 64 while True: try: return model_sizes(m, size=(sz,sz))[-1][1] except Exception as e: sz *= 2 if sz > 2048: raise
Return the number of output features for `model`.
def delete_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): """Delete the secret at the specified location. Supported methods: DELETE: /{mount_point}/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the delete_secret request. :rtype: requests.Response """ api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, )
Delete the secret at the specified location. Supported methods: DELETE: /{mount_point}/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the delete_secret request. :rtype: requests.Response
def parse_row(self): """ Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values. """ fields = self.mapping for i, cell in enumerate(self.row[0:len(fields)]): field_name, field_type = fields[str(i)] parsed_cell = self.clean_cell(cell, field_type) self.parsed_row[field_name] = parsed_cell
Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values.
def checkedItems(self, column=0, parent=None, recurse=True): """ Returns a list of the checked items for this tree widget based on the inputed column and parent information. :param column | <int> parent | <QtGui.QTreeWidget> || None recurse | <bool> :return [<QtGui.QTreeWidgetItem>, ..] """ output = [] if not parent: for i in range(self.topLevelItemCount()): item = self.topLevelItem(i) if item.checkState(column) == QtCore.Qt.Checked: output.append(item) if recurse: output += self.checkedItems(column, item, recurse) else: for c in range(parent.childCount()): item = parent.child(c) if item.checkState(column) == QtCore.Qt.Checked: output.append(item) if recurse: output += self.checkedItems(column, item, recurse) return output
Returns a list of the checked items for this tree widget based on the inputed column and parent information. :param column | <int> parent | <QtGui.QTreeWidget> || None recurse | <bool> :return [<QtGui.QTreeWidgetItem>, ..]
def read_reg_file(f, foff=0x2D): """ Given a file handle for an old-style Agilent *.REG file, this will parse that file into a dictonary of key/value pairs (including any tables that are in the *.REG file, which will be parsed into lists of lists). """ # convenience function for reading in data def rd(st): return struct.unpack(st, f.read(struct.calcsize(st))) f.seek(0x19) if f.read(1) != b'A': # raise TypeError("Version of REG file is too new.") return {} f.seek(foff) nrecs = rd('<I')[0] # TODO: should be '<H' rec_tab = [rd('<HHIII') for n in range(nrecs)] names = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1539: # '0306' # this is part of the linked list too, but contains a # reference to a table cd = struct.unpack('<HIII21sI', d) names[cd[5]] = cd[4].decode('iso8859').strip('\x00') # except: # pass elif r[1] == 32769 or r[1] == 32771: # b'0180' or b'0380' names[r[4]] = d[:-1].decode('iso8859') elif r[1] == 32774: # b'0680' # this is a string that is referenced elsewhere (in a table) names[r[4]] = d[2:-1].decode('iso8859') elif r[1] == 32770: # b'0280' # this is just a flattened numeric array names[r[4]] = np.frombuffer(d, dtype=np.uint32, offset=4) data = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1538: # '0206' # this is part of a linked list if len(d) == 43: cd = struct.unpack('<HIII21sd', d) data[cd[4].decode('iso8859').strip('\x00')] = cd[5] else: pass elif r[1] == 1537: # b'0106' # name of property n = d[14:30].split(b'\x00')[0].decode('iso8859') # with value from names data[n] = names.get(struct.unpack('<I', d[35:39])[0], '') elif r[1] == 1793: # b'0107' # this is a table of values nrow = struct.unpack('<H', d[4:6])[0] ncol = struct.unpack('<H', d[16:18])[0] if ncol != 0: cols = [struct.unpack('<16sHHHHHI', d[20 + 30 * i:50 + 30 * i]) for i in range(ncol)] colnames = [c[0].split(b'\x00')[0].decode('iso8859') for c in cols] # TODO: type 2 is not a constant size? 31, 17 rty2sty = {1: 'H', 3: 'I', 4: 'f', 5: 'H', 7: 'H', 8: 'd', 11: 'H', 12: 'H', 13: 'I', 14: 'I', 16: 'H'} coltype = '<' + ''.join([rty2sty.get(c[3], str(c[2]) + 's') for c in cols]) lencol = struct.calcsize(coltype) tab = [] for i in reversed(range(2, nrow + 2)): rawrow = struct.unpack(coltype, d[-i * lencol: (1 - i) * lencol]) row = [] for j, p in enumerate(rawrow): if cols[j][3] == 3: row.append(names.get(p, str(p))) else: row.append(p) tab.append(row) data[names[r[4]]] = [colnames, tab] elif r[1] == 1281 or r[1] == 1283: # b'0105' or b'0305' fm = '<HHBIIhIdII12shIddQQB8sII12shIddQQB8s' m = struct.unpack(fm, d) nrecs = m[4] # number of points in table # x_units = names.get(m[8], '') x_arr = m[14] * names.get(m[9], np.arange(nrecs - 1)) y_arr = m[25] * names.get(m[20]) y_units = names.get(m[19], '') if y_units == 'bar': y_arr *= 0.1 # convert to MPa # TODO: what to call this? data['Trace'] = Trace(y_arr, x_arr, name='') # elif r[1] == 1025: # b'0104' # # lots of zeros? maybe one or two numbers? # # only found in REG entries that have long 0280 records # fm = '<HQQQIHHHHIIHB' # m = struct.unpack(fm, d) # print(m) # #print(r[1], len(d), binascii.hexlify(d)) # pass # elif r[1] == 512: # b'0002' # # either points to two null pointers or two other pointers # # (indicates start of linked list?) # print(r[1], len(d), binascii.hexlify(d)) # elif r[1] == 769 or r[1] == 772: # b'0103' or b'0403' # # points to 2nd, 3rd & 4th records (two 0002 records and a 0180) # b = binascii.hexlify # print(b(d[10:14]), b(d[14:18]), b(d[18:22])) return data
Given a file handle for an old-style Agilent *.REG file, this will parse that file into a dictonary of key/value pairs (including any tables that are in the *.REG file, which will be parsed into lists of lists).
def load_rules(self, force_reload=False, overwrite=True): """Load rules from policy file or cache.""" # double-checked locking if self.load_once and self._policy_loaded: return with self._load_lock: if self.load_once and self._policy_loaded: return reloaded, data = _cache.read_file( self.policy_file, force_reload=force_reload) self._policy_loaded = True if reloaded or not self.rules: rules = Rules.load_json(data, self.default_rule, self.raise_error) self._set_rules(rules, overwrite=overwrite) LOG.debug('Reload policy file: %s', self.policy_file)
Load rules from policy file or cache.
def parsed_whois(self, query, **kwargs): """Pass in a domain name""" return self._results('parsed-whois', '/v1/{0}/whois/parsed'.format(query), cls=ParsedWhois, **kwargs)
Pass in a domain name
def rebind_invalidated_ars(portal): """Rebind the ARs automatically generated because of the retraction of their parent to the new field 'Invalidated'. The field used until now 'ParentAnalysisRequest' will be used for partitioning """ logger.info("Rebinding retracted/invalidated ARs") # Walk through the Analysis Requests that were generated because of an # invalidation, get the source AR and rebind the fields relationship = "AnalysisRequestChildAnalysisRequest" ref_catalog = api.get_tool(REFERENCE_CATALOG) retests = ref_catalog(relationship=relationship) total = len(retests) to_remove = list() num = 0 for num, relation in enumerate(retests, start=1): relation = relation.getObject() if not relation: continue retest = relation.getTargetObject() invalidated = relation.getSourceObject() retest.setInvalidated(invalidated) # Set ParentAnalysisRequest field to None, cause we will use this field # for storing Primary-Partitions relationship. retest.setParentAnalysisRequest(None) # Remove the relationship! to_remove.append((relation.aq_parent, relation.id)) if num % 100 == 0: logger.info("Rebinding invalidated ARs: {0}/{1}" .format(num, total)) # Remove relationships for relation_to_remove in to_remove: folder = relation_to_remove[0] rel_id = relation_to_remove[1] folder.manage_delObjects([rel_id]) logger.info("Rebound {} invalidated ARs".format(num))
Rebind the ARs automatically generated because of the retraction of their parent to the new field 'Invalidated'. The field used until now 'ParentAnalysisRequest' will be used for partitioning
def find_duplicate_keys(data): """ Find duplicate keys in a layer of ordered pairs. Intended as the object_pairs_hook callable for json.load or loads. :param data: ordered pairs :return: Dictionary with no duplicate keys :raises ValueError if duplicate keys are found """ out_dict = {} for key, value in data: if key in out_dict: raise ValueError("Duplicate key: {}".format(key)) out_dict[key] = value return out_dict
Find duplicate keys in a layer of ordered pairs. Intended as the object_pairs_hook callable for json.load or loads. :param data: ordered pairs :return: Dictionary with no duplicate keys :raises ValueError if duplicate keys are found