code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def add_config_path(path): """Select config parser by file extension and add path into parser. """ if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = PARSER parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != PARSER: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=PARSER)) # add config path to parser parser_class.add_config_path(path) return True
Select config parser by file extension and add path into parser.
def generate_look_up_table(): """ Generate look up table. :return: List """ poly = 0xA001 table = [] for index in range(256): data = index << 1 crc = 0 for _ in range(8, 0, -1): data >>= 1 if (data ^ crc) & 0x0001: crc = (crc >> 1) ^ poly else: crc >>= 1 table.append(crc) return table
Generate look up table. :return: List
def _get_envs_from_ref_paths(self, refs): ''' Return the names of remote refs (stripped of the remote name) and tags which are map to the branches and tags. ''' def _check_ref(env_set, rname): ''' Add the appropriate saltenv(s) to the set ''' if rname in self.saltenv_revmap: env_set.update(self.saltenv_revmap[rname]) else: if rname == self.base: env_set.add('base') elif not self.disable_saltenv_mapping: env_set.add(rname) use_branches = 'branch' in self.ref_types use_tags = 'tag' in self.ref_types ret = set() if salt.utils.stringutils.is_hex(self.base): # gitfs_base or per-saltenv 'base' may point to a commit ID, which # would not show up in the refs. Make sure we include it. ret.add('base') for ref in salt.utils.data.decode(refs): if ref.startswith('refs/'): ref = ref[5:] rtype, rname = ref.split('/', 1) if rtype == 'remotes' and use_branches: parted = rname.partition('/') rname = parted[2] if parted[2] else parted[0] _check_ref(ret, rname) elif rtype == 'tags' and use_tags: _check_ref(ret, rname) return ret
Return the names of remote refs (stripped of the remote name) and tags which are map to the branches and tags.
def make_route_refresh_request(self, peer_ip, *route_families): """Request route-refresh for peer with `peer_ip` for given `route_families`. Will make route-refresh request for a given `route_family` only if such capability is supported and if peer is in ESTABLISHED state. Else, such requests are ignored. Raises appropriate error in other cases. If `peer_ip` is equal to 'all' makes refresh request to all valid peers. """ LOG.debug('Route refresh requested for peer %s and route families %s', peer_ip, route_families) if not SUPPORTED_GLOBAL_RF.intersection(route_families): raise ValueError('Given route family(s) % is not supported.' % route_families) peer_list = [] # If route-refresh is requested for all peers. if peer_ip == 'all': peer_list.extend(self.get_peers_in_established()) else: given_peer = self._peers.get(peer_ip) if not given_peer: raise ValueError('Invalid/unrecognized peer %s' % peer_ip) if not given_peer.in_established: raise ValueError('Peer currently do not have established' ' session.') peer_list.append(given_peer) # Make route refresh request to valid peers. for peer in peer_list: peer.request_route_refresh(*route_families) return True
Request route-refresh for peer with `peer_ip` for given `route_families`. Will make route-refresh request for a given `route_family` only if such capability is supported and if peer is in ESTABLISHED state. Else, such requests are ignored. Raises appropriate error in other cases. If `peer_ip` is equal to 'all' makes refresh request to all valid peers.
def _parse_coords(coord_lines): """ Helper method to parse coordinates. """ paras = {} var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$") for l in coord_lines: m = var_pattern.match(l.strip()) if m: paras[m.group(1).strip("=")] = float(m.group(2)) species = [] coords = [] # Stores whether a Zmatrix format is detected. Once a zmatrix format # is detected, it is assumed for the remaining of the parsing. zmode = False for l in coord_lines: l = l.strip() if not l: break if (not zmode) and GaussianInput._xyz_patt.match(l): m = GaussianInput._xyz_patt.match(l) species.append(m.group(1)) toks = re.split(r"[,\s]+", l.strip()) if len(toks) > 4: coords.append([float(i) for i in toks[2:5]]) else: coords.append([float(i) for i in toks[1:4]]) elif GaussianInput._zmat_patt.match(l): zmode = True toks = re.split(r"[,\s]+", l.strip()) species.append(toks[0]) toks.pop(0) if len(toks) == 0: coords.append(np.array([0, 0, 0])) else: nn = [] parameters = [] while len(toks) > 1: ind = toks.pop(0) data = toks.pop(0) try: nn.append(int(ind)) except ValueError: nn.append(species.index(ind) + 1) try: val = float(data) parameters.append(val) except ValueError: if data.startswith("-"): parameters.append(-paras[data[1:]]) else: parameters.append(paras[data]) if len(nn) == 1: coords.append(np.array([0, 0, parameters[0]])) elif len(nn) == 2: coords1 = coords[nn[0] - 1] coords2 = coords[nn[1] - 1] bl = parameters[0] angle = parameters[1] axis = [0, 1, 0] op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False) coord = op.operate(coords2) vec = coord - coords1 coord = vec * bl / np.linalg.norm(vec) + coords1 coords.append(coord) elif len(nn) == 3: coords1 = coords[nn[0] - 1] coords2 = coords[nn[1] - 1] coords3 = coords[nn[2] - 1] bl = parameters[0] angle = parameters[1] dih = parameters[2] v1 = coords3 - coords2 v2 = coords1 - coords2 axis = np.cross(v1, v2) op = SymmOp.from_origin_axis_angle( coords1, axis, angle, False) coord = op.operate(coords2) v1 = coord - coords1 v2 = coords1 - coords2 v3 = np.cross(v1, v2) adj = get_angle(v3, axis) axis = coords1 - coords2 op = SymmOp.from_origin_axis_angle( coords1, axis, dih - adj, False) coord = op.operate(coord) vec = coord - coords1 coord = vec * bl / np.linalg.norm(vec) + coords1 coords.append(coord) def _parse_species(sp_str): """ The species specification can take many forms. E.g., simple integers representing atomic numbers ("8"), actual species string ("C") or a labelled species ("C1"). Sometimes, the species string is also not properly capitalized, e.g, ("c1"). This method should take care of these known formats. """ try: return int(sp_str) except ValueError: sp = re.sub(r"\d", "", sp_str) return sp.capitalize() species = [_parse_species(sp) for sp in species] return Molecule(species, coords)
Helper method to parse coordinates.
def get_popular_aliases(self, *args, **kwargs): """ Return the aggregated results of :meth:`Timesheet.get_popular_aliases`. """ aliases_count_total = defaultdict(int) aliases_counts = self._timesheets_callback('get_popular_aliases')(*args, **kwargs) for aliases_count in aliases_counts: for alias, count in aliases_count: aliases_count_total[alias] += count sorted_aliases_count_total = sorted(aliases_count_total.items(), key=lambda item: item[1], reverse=True) return sorted_aliases_count_total
Return the aggregated results of :meth:`Timesheet.get_popular_aliases`.
def process(in_path, out_file, n_jobs, framesync): """Computes the features for the selected dataset or file.""" if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) file_struct.features_file = out_file compute_all_features(file_struct, framesync) else: # Collection mode file_structs = msaf.io.get_dataset_files(in_path) # Call in parallel return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)( file_struct, framesync) for file_struct in file_structs)
Computes the features for the selected dataset or file.
def _update_project(self, request, data): """Update project info""" domain_id = identity.get_domain_id_for_operation(request) try: project_id = data['project_id'] # add extra information if keystone.VERSIONS.active >= 3: EXTRA_INFO = getattr(settings, 'PROJECT_TABLE_EXTRA_INFO', {}) kwargs = dict((key, data.get(key)) for key in EXTRA_INFO) else: kwargs = {} return api.keystone.tenant_update( request, project_id, name=data['name'], description=data['description'], enabled=data['enabled'], domain=domain_id, **kwargs) except exceptions.Conflict: msg = _('Project name "%s" is already used.') % data['name'] self.failure_message = msg return except Exception as e: LOG.debug('Project update failed: %s', e) exceptions.handle(request, ignore=True) return
Update project info
def readtxt(filepath): """ read file as is""" with open(filepath, 'rt') as f: lines = f.readlines() return ''.join(lines)
read file as is
def run_id(self): '''Run name without whitespace ''' s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Run name without whitespace
def dicom_to_nifti(dicom_input, output_file=None): """ This is the main dicom to nifti conversion function for ge images. As input ge images are required. It will then determine the type of images and do the correct conversion :param output_file: filepath to the output nifti :param dicom_input: directory with dicom files for 1 scan """ assert common.is_siemens(dicom_input) if _is_4d(dicom_input): logger.info('Found sequence type: MOSAIC 4D') return _mosaic_4d_to_nifti(dicom_input, output_file) grouped_dicoms = _classic_get_grouped_dicoms(dicom_input) if _is_classic_4d(grouped_dicoms): logger.info('Found sequence type: CLASSIC 4D') return _classic_4d_to_nifti(grouped_dicoms, output_file) logger.info('Assuming anatomical data') return convert_generic.dicom_to_nifti(dicom_input, output_file)
This is the main dicom to nifti conversion function for ge images. As input ge images are required. It will then determine the type of images and do the correct conversion :param output_file: filepath to the output nifti :param dicom_input: directory with dicom files for 1 scan
def calculate_mrcas(self, c1 : ClassId, c2 : ClassId) -> Set[ClassId]: """ Calculate the MRCA for a class pair """ G = self.G # reflexive ancestors ancs1 = self._ancestors(c1) | {c1} ancs2 = self._ancestors(c2) | {c2} common_ancestors = ancs1 & ancs2 redundant = set() for a in common_ancestors: redundant = redundant | nx.ancestors(G, a) return common_ancestors - redundant
Calculate the MRCA for a class pair
def update_to_v24(self): """Convert older tags into an ID3v2.4 tag. This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to TDRC). If you intend to save tags, you must call this function at some point; it is called by default when loading the tag. """ self.__update_common() # TDAT, TYER, and TIME have been turned into TDRC. try: date = text_type(self.get("TYER", "")) if date.strip(u"\x00"): self.pop("TYER") dat = text_type(self.get("TDAT", "")) if dat.strip("\x00"): self.pop("TDAT") date = "%s-%s-%s" % (date, dat[2:], dat[:2]) time = text_type(self.get("TIME", "")) if time.strip("\x00"): self.pop("TIME") date += "T%s:%s:00" % (time[:2], time[2:]) if "TDRC" not in self: self.add(TDRC(encoding=0, text=date)) except UnicodeDecodeError: # Old ID3 tags have *lots* of Unicode problems, so if TYER # is bad, just chuck the frames. pass # TORY can be the first part of a TDOR. if "TORY" in self: f = self.pop("TORY") if "TDOR" not in self: try: self.add(TDOR(encoding=0, text=str(f))) except UnicodeDecodeError: pass # IPLS is now TIPL. if "IPLS" in self: f = self.pop("IPLS") if "TIPL" not in self: self.add(TIPL(encoding=f.encoding, people=f.people)) # These can't be trivially translated to any ID3v2.4 tags, or # should have been removed already. for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]: if key in self: del(self[key]) # Recurse into chapters for f in self.getall("CHAP"): f.sub_frames.update_to_v24() for f in self.getall("CTOC"): f.sub_frames.update_to_v24()
Convert older tags into an ID3v2.4 tag. This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to TDRC). If you intend to save tags, you must call this function at some point; it is called by default when loading the tag.
def add(self, *constraints: Tuple[Bool]) -> None: """Adds the constraints to this solver. :param constraints: constraints to add """ raw_constraints = [ c.raw for c in cast(Tuple[Bool], constraints) ] # type: List[z3.BoolRef] self.constraints.extend(raw_constraints)
Adds the constraints to this solver. :param constraints: constraints to add
def _cast_inplace(terms, acceptable_dtypes, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to.
def setup_stream_handlers(conf): """Setup logging stream handlers according to the options.""" class StdoutFilter(logging.Filter): def filter(self, record): return record.levelno in (logging.DEBUG, logging.INFO) log.handlers = [] stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.WARNING) stdout_handler.addFilter(StdoutFilter()) if conf.debug: stdout_handler.setLevel(logging.DEBUG) elif conf.verbose: stdout_handler.setLevel(logging.INFO) else: stdout_handler.setLevel(logging.WARNING) log.addHandler(stdout_handler) stderr_handler = logging.StreamHandler(sys.stderr) msg_format = "%(levelname)s: %(message)s" stderr_handler.setFormatter(logging.Formatter(fmt=msg_format)) stderr_handler.setLevel(logging.WARNING) log.addHandler(stderr_handler)
Setup logging stream handlers according to the options.
def create_network(name, router_ext=None, admin_state_up=True, network_type=None, physical_network=None, segmentation_id=None, shared=None, profile=None): ''' Creates a new network CLI Example: .. code-block:: bash salt '*' neutron.create_network network-name salt '*' neutron.create_network network-name profile=openstack1 :param name: Name of network to create :param admin_state_up: should the state of the network be up? default: True (Optional) :param router_ext: True then if create the external network (Optional) :param network_type: the Type of network that the provider is such as GRE, VXLAN, VLAN, FLAT, or LOCAL (Optional) :param physical_network: the name of the physical network as neutron knows it (Optional) :param segmentation_id: the vlan id or GRE id (Optional) :param shared: is the network shared or not (Optional) :param profile: Profile to build on (Optional) :return: Created network information ''' conn = _auth(profile) return conn.create_network(name, admin_state_up, router_ext, network_type, physical_network, segmentation_id, shared)
Creates a new network CLI Example: .. code-block:: bash salt '*' neutron.create_network network-name salt '*' neutron.create_network network-name profile=openstack1 :param name: Name of network to create :param admin_state_up: should the state of the network be up? default: True (Optional) :param router_ext: True then if create the external network (Optional) :param network_type: the Type of network that the provider is such as GRE, VXLAN, VLAN, FLAT, or LOCAL (Optional) :param physical_network: the name of the physical network as neutron knows it (Optional) :param segmentation_id: the vlan id or GRE id (Optional) :param shared: is the network shared or not (Optional) :param profile: Profile to build on (Optional) :return: Created network information
def transform(self, data): """ :param data: DataFrame with column to encode :return: encoded Series """ with timer('transform %s' % self.name, logging.DEBUG): transformed = super(Token, self).transform(self.tokenize(data)) return transformed.reshape((len(data), self.sequence_length))
:param data: DataFrame with column to encode :return: encoded Series
def p_review_comment_1(self, p): """review_comment : REVIEW_COMMENT TEXT""" try: if six.PY2: value = p[2].decode(encoding='utf-8') else: value = p[2] self.builder.add_review_comment(self.document, value) except CardinalityError: self.more_than_one_error('ReviewComment', p.lineno(1)) except OrderError: self.order_error('ReviewComment', 'Reviewer', p.lineno(1))
review_comment : REVIEW_COMMENT TEXT
def sdk_version(self): '''sdk version of connected device.''' if self.__sdk == 0: try: self.__sdk = int(self.adb.cmd("shell", "getprop", "ro.build.version.sdk").communicate()[0].decode("utf-8").strip()) except: pass return self.__sdk
sdk version of connected device.
def appendOps(self, ops, append_to=None): """ Append op(s) to the transaction builder :param list ops: One or a list of operations """ if isinstance(ops, list): self.ops.extend(ops) else: self.ops.append(ops) parent = self.parent if parent: parent._set_require_reconstruction()
Append op(s) to the transaction builder :param list ops: One or a list of operations
def create_sample_file(ip, op, num_lines): """ make a short version of an RDF file """ with open(ip, "rb") as f: with open(op, "wb") as fout: for _ in range(num_lines): fout.write(f.readline() )
make a short version of an RDF file
def child_folder(self, fragment): """ Returns a folder object by combining the fragment to this folder's path """ return Folder(os.path.join(self.path, Folder(fragment).path))
Returns a folder object by combining the fragment to this folder's path
def init_app(self, app): """Register the extension with the application. Args: app (flask.Flask): The application to register with. """ app.url_rule_class = partial(NavigationRule, copilot=self) app.context_processor(self.inject_context)
Register the extension with the application. Args: app (flask.Flask): The application to register with.
def push_json_file(json_file, url, dry_run=False, batch_size=100, anonymize_fields=[], remove_fields=[], rename_fields=[]): """ read the json file provided and POST in batches no bigger than the batch_size specified to the specified url. """ batch = [] json_data = json.loads(json_file.read()) if isinstance(json_data, list): for item in json_data: # anonymize fields for field_name in anonymize_fields: if field_name in item: item[field_name] = md5sum(item[field_name]) # remove fields for field_name in remove_fields: if field_name in item: del item[field_name] # rename fields for (field_name, new_field_name) in rename_fields: if field_name in item: item[new_field_name] = item[field_name] del item[field_name] batch.append(item) if len(batch) >= batch_size: post(batch, url, dry_run=dry_run) batch = [] if len(batch) > 0: post(batch, url, dry_run=dry_run) else: post(json_data, url, dry_run=dry_run)
read the json file provided and POST in batches no bigger than the batch_size specified to the specified url.
def get_phi_subvariables(self, var): """ Get sub-variables that phi variable `var` represents. :param SimVariable var: The variable instance. :return: A set of sub-variables, or an empty set if `var` is not a phi variable. :rtype: set """ if not self.is_phi_variable(var): return set() return self._phi_variables[var]
Get sub-variables that phi variable `var` represents. :param SimVariable var: The variable instance. :return: A set of sub-variables, or an empty set if `var` is not a phi variable. :rtype: set
def _rotate(coordinates, theta, around): """Rotate a set of coordinates around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being rotated. theta : float The angle by which to rotate the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The vector about which to rotate the coordinates. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot rotate around a zero vector') return Rotation(theta, around).apply_to(coordinates)
Rotate a set of coordinates around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being rotated. theta : float The angle by which to rotate the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The vector about which to rotate the coordinates.
def GetWindowsEventMessage(self, log_source, message_identifier): """Retrieves the message string for a specific Windows Event Log source. Args: log_source (str): Event Log source, such as "Application Error". message_identifier (int): message identifier. Returns: str: message string or None if not available. """ database_reader = self._GetWinevtRcDatabaseReader() if not database_reader: return None if self._lcid != self.DEFAULT_LCID: message_string = database_reader.GetMessage( log_source, self.lcid, message_identifier) if message_string: return message_string return database_reader.GetMessage( log_source, self.DEFAULT_LCID, message_identifier)
Retrieves the message string for a specific Windows Event Log source. Args: log_source (str): Event Log source, such as "Application Error". message_identifier (int): message identifier. Returns: str: message string or None if not available.
def run_vardict(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run VarDict variant calling. """ items = shared.add_highdepth_genome_exclusion(items) if vcfutils.is_paired_analysis(align_bams, items): call_file = _run_vardict_paired(align_bams, items, ref_file, assoc_files, region, out_file) else: vcfutils.check_paired_problems(items) call_file = _run_vardict_caller(align_bams, items, ref_file, assoc_files, region, out_file) return call_file
Run VarDict variant calling.
def process_request(self, request): """ Checks whether the page is already cached and returns the cached version if available. """ celery_task = getattr(request, '_cache_update_cache', False) if not request.method in ('GET', 'HEAD'): request._cache_update_cache = False return None # Don't bother checking the cache. request._cache_update_cache = True if self.should_bypass_cache(request): return None response, expired = self.get_cache(request) if response is None: return None # No cache information available, need to rebuild. # TODO: this logic should be in the task not here but it needs the per_request_middleware if celery_task: if self.should_regenerate(response): return None elif expired: self.send_task(request, response) # don't update right since we're serving from cache request._cache_update_cache = False return response
Checks whether the page is already cached and returns the cached version if available.
def probable_languages( self, text: str, max_languages: int = 3) -> Tuple[str, ...]: """List of most probable programming languages, the list is ordered from the most probable to the least probable one. :param text: source code. :param max_languages: maximum number of listed languages. :return: languages list """ scores = self.scores(text) # Sorted from the most probable language to the least probable sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True) languages, probabilities = list(zip(*sorted_scores)) # Find the most distant consecutive languages. # A logarithmic scale is used here because the probabilities # are most of the time really close to zero rescaled_probabilities = [log(proba) for proba in probabilities] distances = [ rescaled_probabilities[pos] - rescaled_probabilities[pos+1] for pos in range(len(rescaled_probabilities)-1)] max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0] limit = min(max_distance_pos, max_languages) return languages[:limit]
List of most probable programming languages, the list is ordered from the most probable to the least probable one. :param text: source code. :param max_languages: maximum number of listed languages. :return: languages list
def get_running_time(self): """ Determines how long has this process been running. @rtype: long @return: Process running time in milliseconds. """ if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA: dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION else: dwAccess = win32.PROCESS_QUERY_INFORMATION hProcess = self.get_handle(dwAccess) (CreationTime, ExitTime, _, _) = win32.GetProcessTimes(hProcess) if self.is_alive(): ExitTime = win32.GetSystemTimeAsFileTime() CreationTime = CreationTime.dwLowDateTime + (CreationTime.dwHighDateTime << 32) ExitTime = ExitTime.dwLowDateTime + ( ExitTime.dwHighDateTime << 32) RunningTime = ExitTime - CreationTime return RunningTime / 10000
Determines how long has this process been running. @rtype: long @return: Process running time in milliseconds.
def score(self, model): """ Computes a score to measure how well the given `BayesianModel` fits to the data set. (This method relies on the `local_score`-method that is implemented in each subclass.) Parameters ---------- model: `BayesianModel` instance The Bayesian network that is to be scored. Nodes of the BayesianModel need to coincide with column names of data set. Returns ------- score: float A number indicating the degree of fit between data and model Examples ------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import K2Score >>> # create random data sample with 3 variables, where B and C are identical: >>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB')) >>> data['C'] = data['B'] >>> K2Score(data).score(BayesianModel([['A','B'], ['A','C']])) -24242.367348745247 >>> K2Score(data).score(BayesianModel([['A','B'], ['B','C']])) -16273.793897051042 """ score = 0 for node in model.nodes(): score += self.local_score(node, model.predecessors(node)) score += self.structure_prior(model) return score
Computes a score to measure how well the given `BayesianModel` fits to the data set. (This method relies on the `local_score`-method that is implemented in each subclass.) Parameters ---------- model: `BayesianModel` instance The Bayesian network that is to be scored. Nodes of the BayesianModel need to coincide with column names of data set. Returns ------- score: float A number indicating the degree of fit between data and model Examples ------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import K2Score >>> # create random data sample with 3 variables, where B and C are identical: >>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB')) >>> data['C'] = data['B'] >>> K2Score(data).score(BayesianModel([['A','B'], ['A','C']])) -24242.367348745247 >>> K2Score(data).score(BayesianModel([['A','B'], ['B','C']])) -16273.793897051042
def list_aliases(self): """List aliases linked to the index""" # check alias doesn't exist r = self.requests.get(self.index_url + "/_alias", headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: logger.warning("Something went wrong when retrieving aliases on %s.", self.anonymize_url(self.index_url)) logger.warning(ex) return aliases = r.json()[self.index]['aliases'] return aliases
List aliases linked to the index
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'): """ This method is like `add_output` but it sends to another """ self.onchain_rate = get_onchain_exchange_rates( self.crypto, withdraw_crypto, best=True, verbose=self.verbose ) exchange_rate = float(self.onchain_rate['rate']) result = self.onchain_rate['service'].get_onchain_exchange_address( self.crypto, withdraw_crypto, withdraw_address ) address = result['deposit'] value_satoshi = self.from_unit_to_satoshi(value, unit) if self.verbose: print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % ( value_satoshi, (value_satoshi / 1e8), exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper() )) self.outs.append({ 'address': address, 'value': value_satoshi })
This method is like `add_output` but it sends to another
def _serve_dir(self, abspath, params): """Show a directory listing.""" relpath = os.path.relpath(abspath, self._root) breadcrumbs = self._create_breadcrumbs(relpath) entries = [{'link_path': os.path.join(relpath, e), 'name': e} for e in os.listdir(abspath)] args = self._default_template_args('dir.html') args.update({'root_parent': os.path.dirname(self._root), 'breadcrumbs': breadcrumbs, 'entries': entries, 'params': params}) content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
Show a directory listing.
def get_community_names(): ''' Get the current accepted SNMP community names and their permissions. If community names are being managed by Group Policy, those values will be returned instead like this: .. code-block:: bash TestCommunity: Managed by GPO Community names managed normally will denote the permission instead: .. code-block:: bash TestCommunity: Read Only Returns: dict: A dictionary of community names and permissions. CLI Example: .. code-block:: bash salt '*' win_snmp.get_community_names ''' ret = dict() # Look in GPO settings first if __utils__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY): _LOG.debug('Loading communities from Group Policy settings') current_values = __utils__['reg.list_values']( _HKEY, _COMMUNITIES_GPO_KEY, include_default=False) # GPO settings are different in that they do not designate permissions # They are a numbered list of communities like so: # # {1: "community 1", # 2: "community 2"} # # Denote that it is being managed by Group Policy. # # community 1: # Managed by GPO # community 2: # Managed by GPO if isinstance(current_values, list): for current_value in current_values: # Ignore error values if not isinstance(current_value, dict): continue ret[current_value['vdata']] = 'Managed by GPO' if not ret: _LOG.debug('Loading communities from SNMP settings') current_values = __utils__['reg.list_values']( _HKEY, _COMMUNITIES_KEY, include_default=False) # The communities are stored as the community name with a numeric # permission value. Like this (4 = Read Only): # # {"community 1": 4, # "community 2": 4} # # Convert the numeric value to the text equivalent, as present in the # Windows SNMP service GUI. # # community 1: # Read Only # community 2: # Read Only if isinstance(current_values, list): for current_value in current_values: # Ignore error values if not isinstance(current_value, dict): continue permissions = six.text_type() for permission_name in _PERMISSION_TYPES: if current_value['vdata'] == _PERMISSION_TYPES[permission_name]: permissions = permission_name break ret[current_value['vname']] = permissions if not ret: _LOG.debug('Unable to find existing communities.') return ret
Get the current accepted SNMP community names and their permissions. If community names are being managed by Group Policy, those values will be returned instead like this: .. code-block:: bash TestCommunity: Managed by GPO Community names managed normally will denote the permission instead: .. code-block:: bash TestCommunity: Read Only Returns: dict: A dictionary of community names and permissions. CLI Example: .. code-block:: bash salt '*' win_snmp.get_community_names
def _urlopen_as_json(self, url, headers=None): """Shorcut for return contents as json""" req = Request(url, headers=headers) return json.loads(urlopen(req).read())
Shorcut for return contents as json
def log_parameters(self): """ Logs information about model parameters. """ arg_params, aux_params = self.module.get_params() total_parameters = 0 fixed_parameters = 0 learned_parameters = 0 info = [] # type: List[str] for name, array in sorted(arg_params.items()): info.append("%s: %s" % (name, array.shape)) num_parameters = reduce(lambda x, y: x * y, array.shape) total_parameters += num_parameters if name in self.module._fixed_param_names: fixed_parameters += num_parameters else: learned_parameters += num_parameters percent_fixed = 100 * (fixed_parameters / max(1, total_parameters)) percent_learned = 100 * (learned_parameters / max(1, total_parameters)) logger.info("Model parameters: %s", ", ".join(info)) logger.info("Fixed model parameters: %s", ", ".join(self.module._fixed_param_names)) logger.info("Fixing %d parameters (%0.2f%%)", fixed_parameters, percent_fixed) logger.info("Learning %d parameters (%0.2f%%)", learned_parameters, percent_learned) logger.info("Total # of parameters: %d", total_parameters)
Logs information about model parameters.
def open(self, data_source, *args, **kwargs): """ Open filename to get data for data_source. :param data_source: Data source for which the file contains data. :type data_source: str Positional and keyword arguments can contain either the data to use for the data source or the full path of the file which contains data for the data source. """ if self.sources[data_source]._meta.data_reader.is_file_reader: filename = kwargs.get('filename') path = kwargs.get('path', '') rel_path = kwargs.get('rel_path', '') if len(args) > 0: filename = args[0] if len(args) > 1: path = args[1] if len(args) > 2: rel_path = args[2] args = () kwargs = {'filename': os.path.join(rel_path, path, filename)} LOGGER.debug('filename: %s', kwargs['filename']) # call constructor of data source with filename argument self.objects[data_source] = self.sources[data_source](*args, **kwargs) # register data and uncertainty in registry data_src_obj = self.objects[data_source] meta = [getattr(data_src_obj, m) for m in self.reg.meta_names] self.reg.register(data_src_obj.data, *meta)
Open filename to get data for data_source. :param data_source: Data source for which the file contains data. :type data_source: str Positional and keyword arguments can contain either the data to use for the data source or the full path of the file which contains data for the data source.
def _minimum_coloring_qubo(x_vars, chi_lb, chi_ub, magnitude=1.): """We want to disincentivize unneeded colors. Generates the QUBO that does that. """ # if we already know the chromatic number, then we don't need to # disincentivize any colors. if chi_lb == chi_ub: return {} # we might need to use some of the colors, so we want to disincentivize # them in increasing amounts, linearly. scaling = magnitude / (chi_ub - chi_lb) # build the QUBO Q = {} for v in x_vars: for f, color in enumerate(range(chi_lb, chi_ub)): idx = x_vars[v][color] Q[(idx, idx)] = (f + 1) * scaling return Q
We want to disincentivize unneeded colors. Generates the QUBO that does that.
def generate(cls, strategy, **kwargs): """Generate a new instance. The instance will be created with the given strategy (one of BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY). Args: strategy (str): the strategy to use for generating the instance. Returns: object: the generated instance """ assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY) action = getattr(cls, strategy) return action(**kwargs)
Generate a new instance. The instance will be created with the given strategy (one of BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY). Args: strategy (str): the strategy to use for generating the instance. Returns: object: the generated instance
def _make_connection(self, addr, port): "make our proxy connection" sender = self._create_connection(addr, port) # XXX look out! we're depending on this "sender" implementing # certain Twisted APIs, and the state-machine shouldn't depend # on that. # XXX also, if sender implements producer/consumer stuff, we # should register ourselves (and implement it to) -- but this # should really be taking place outside the state-machine in # "the I/O-doing" stuff self._sender = sender self._when_done.fire(sender)
make our proxy connection
def delete_blobs(self, blobs, on_error=None, client=None): """Deletes a list of blobs from the current bucket. Uses :meth:`delete_blob` to delete each individual blob. If :attr:`user_project` is set, bills the API request to that project. :type blobs: list :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or blob names to delete. :type on_error: callable :param on_error: (Optional) Takes single argument: ``blob``. Called called once for each blob raising :class:`~google.cloud.exceptions.NotFound`; otherwise, the exception is propagated. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises: :class:`~google.cloud.exceptions.NotFound` (if `on_error` is not passed). """ for blob in blobs: try: blob_name = blob if not isinstance(blob_name, six.string_types): blob_name = blob.name self.delete_blob(blob_name, client=client) except NotFound: if on_error is not None: on_error(blob) else: raise
Deletes a list of blobs from the current bucket. Uses :meth:`delete_blob` to delete each individual blob. If :attr:`user_project` is set, bills the API request to that project. :type blobs: list :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or blob names to delete. :type on_error: callable :param on_error: (Optional) Takes single argument: ``blob``. Called called once for each blob raising :class:`~google.cloud.exceptions.NotFound`; otherwise, the exception is propagated. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises: :class:`~google.cloud.exceptions.NotFound` (if `on_error` is not passed).
def before(self, idx): """Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.""" if not isinstance(idx, datetime): raise TypeError("'%s' is not %s" % (idx, datetime)) day = min(idx.date(), self._hi_limit - DAY) while day >= self._lo_limit: if day < self._rd_cache.lo or day >= self._rd_cache.hi: self._load(self._rd_cache, day) self._rd_cache.set_ptr(idx) if self._rd_cache.ptr > 0: return self._rd_cache.data[self._rd_cache.ptr - 1]['idx'] day = self._rd_cache.lo - DAY return None
Return datetime of newest existing data record whose datetime is < idx. Might not even be in the same year! If no such record exists, return None.
def update_status(self, *args, **kwargs): """ :reference: https://dev.twitter.com/rest/reference/post/statuses/update :allowed_param:'status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates', 'media_ids' """ post_data = {} media_ids = kwargs.pop('media_ids', None) if media_ids is not None: post_data['media_ids'] = list_to_csv(media_ids) return bind_api( api=self, path='/statuses/update.json', method='POST', payload_type='status', allowed_param=['status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates'], require_auth=True )(post_data=post_data, *args, **kwargs)
:reference: https://dev.twitter.com/rest/reference/post/statuses/update :allowed_param:'status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates', 'media_ids'
def determine_paths(self, package_name=None, create_package_dir=False, dry_run=False): """Determine paths automatically and a little intelligently""" # Give preference to the environment variable here as it will not # derefrence sym links self.project_dir = Path(os.getenv('PWD') or os.getcwd()) # Try and work out the project name distribution = self.get_distribution() if distribution: # Get name from setup.py self.project_name = distribution.get_name() else: # ...failing that, use the current directory name self.project_name = self.project_dir.name # Descend into the 'src' directory to find the package # if necessary if os.path.isdir(self.project_dir / "src"): package_search_dir = self.project_dir / "src" else: package_search_dir = self.project_dir created_package_dir = False if not package_name: # Lets try and work out the package_name from the project_name package_name = self.project_name.replace("-", "_") # Now do some fuzzy matching def get_matches(name): possibles = [n for n in os.listdir(package_search_dir) if os.path.isdir(package_search_dir / n)] return difflib.get_close_matches(name, possibles, n=1, cutoff=0.8) close = get_matches(package_name) # If no matches, try removing the first part of the package name # (e.g. django-guardian becomes guardian) if not close and "_" in package_name: short_package_name = "_".join(package_name.split("_")[1:]) close = get_matches(short_package_name) if not close: if create_package_dir: package_dir = package_search_dir / package_name # Gets set to true even during dry run created_package_dir = True if not dry_run: print("Creating package directory at %s" % package_dir) os.mkdir(package_dir) else: print("Would have created package directory at %s" % package_dir) else: raise CommandError("Could not guess the package name. Specify it using --name.") else: package_name = close[0] self.package_name = package_name self.package_dir = package_search_dir / package_name if not os.path.exists(self.package_dir) and not created_package_dir: raise CommandError("Package directory did not exist at %s. Perhaps specify it using --name" % self.package_dir)
Determine paths automatically and a little intelligently
def match_list(lst, pattern, group_names=[]): """ Parameters ---------- lst: list of str regex: string group_names: list of strings See re.MatchObject group docstring Returns ------- list of strings Filtered list, with the strings that match the pattern """ filtfn = re.compile(pattern).match filtlst = filter_list(lst, filtfn) if not group_names: return [m.string for m in filtlst] else: return [m.group(group_names) for m in filtlst]
Parameters ---------- lst: list of str regex: string group_names: list of strings See re.MatchObject group docstring Returns ------- list of strings Filtered list, with the strings that match the pattern
def unindex_item(self, item): """ Un-index an item from our name_to_item dict. :param item: the item to un-index :type item: alignak.objects.item.Item :return: None """ name_property = getattr(self.__class__, "name_property", None) if name_property is None: return name = getattr(item, name_property, None) if name is None: return self.name_to_item.pop(name, None)
Un-index an item from our name_to_item dict. :param item: the item to un-index :type item: alignak.objects.item.Item :return: None
def x_fit(self, test_length): """ Test to see if the line can has enough space for the given length. """ if (self.x + test_length) >= self.xmax: return False else: return True
Test to see if the line can has enough space for the given length.
def guard_multi_verify(analysis): """Return whether the transition "multi_verify" can be performed or not The transition multi_verify will only take place if multi-verification of results is enabled. """ # Cannot multiverify if there is only one remaining verification remaining_verifications = analysis.getNumberOfRemainingVerifications() if remaining_verifications <= 1: return False # Cannot verify if the user submitted and self-verification is not allowed if was_submitted_by_current_user(analysis): if not analysis.isSelfVerificationEnabled(): return False # Cannot verify if the user verified and multi verification is not allowed if was_verified_by_current_user(analysis): if not is_multi_verification_allowed(analysis): return False # Cannot verify if the user was last verifier and consecutive verification # by same user is not allowed if current_user_was_last_verifier(analysis): if not is_consecutive_multi_verification_allowed(analysis): return False # Cannot verify unless all dependencies are verified or can be verified for dependency in analysis.getDependencies(): if not is_verified_or_verifiable(dependency): return False return True
Return whether the transition "multi_verify" can be performed or not The transition multi_verify will only take place if multi-verification of results is enabled.
def check_image_is_3d(img): """Ensures the image loaded is 3d and nothing else.""" if len(img.shape) < 3: raise ValueError('Input volume must be atleast 3D!') elif len(img.shape) == 3: for dim_size in img.shape: if dim_size < 1: raise ValueError('Atleast one slice must exist in each dimension') elif len(img.shape) == 4: if img.shape[3] != 1: raise ValueError('Input volume is 4D with more than one volume!') else: img = np.squeeze(img, axis=3) elif len(img.shape) > 4: raise ValueError('Invalid shape of image : {}'.format(img.shape)) return img
Ensures the image loaded is 3d and nothing else.
def compile_file(self, filename, encoding="utf-8", bare=False): '''compile a CoffeeScript script file to a JavaScript code. filename can be a list or tuple of filenames, then contents of files are concatenated with line feeds. if bare is True, then compile the JavaScript without the top-level function safety wrapper (like the coffee command). ''' if isinstance(filename, _BaseString): filename = [filename] scripts = [] for f in filename: with io.open(f, encoding=encoding) as fp: scripts.append(fp.read()) return self.compile('\n\n'.join(scripts), bare=bare)
compile a CoffeeScript script file to a JavaScript code. filename can be a list or tuple of filenames, then contents of files are concatenated with line feeds. if bare is True, then compile the JavaScript without the top-level function safety wrapper (like the coffee command).
def parseDateText(self, dateString, sourceTime=None): """ Parse long-form date strings:: 'May 31st, 2006' 'Jan 1st' 'July 2006' @type dateString: string @param dateString: text to convert to a datetime @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: struct_time @return: calculated C{struct_time} value of dateString """ if sourceTime is None: yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime() else: yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime currentMth = mth currentDy = dy accuracy = [] debug and log.debug('parseDateText currentMth %s currentDy %s', mth, dy) s = dateString.lower() m = self.ptc.CRE_DATE3.search(s) mth = m.group('mthname') mth = self.ptc.MonthOffsets[mth] accuracy.append('month') if m.group('day') is not None: dy = int(m.group('day')) accuracy.append('day') else: dy = 1 if m.group('year') is not None: yr = int(m.group('year')) accuracy.append('year') # birthday epoch constraint if yr < self.ptc.BirthdayEpoch: yr += 2000 elif yr < 100: yr += 1900 elif (mth < currentMth) or (mth == currentMth and dy < currentDy): # if that day and month have already passed in this year, # then increment the year by 1 yr += self.ptc.YearParseStyle with self.context() as ctx: if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr): sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst) ctx.updateAccuracy(*accuracy) else: # Return current time if date string is invalid sourceTime = time.localtime() debug and log.debug('parseDateText returned ' 'mth %d dy %d yr %d sourceTime %s', mth, dy, yr, sourceTime) return sourceTime
Parse long-form date strings:: 'May 31st, 2006' 'Jan 1st' 'July 2006' @type dateString: string @param dateString: text to convert to a datetime @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: struct_time @return: calculated C{struct_time} value of dateString
def connect(self, uri, link_quality_callback, link_error_callback): """ Connect the link driver to a specified URI of the format: radio://<dongle nbr>/<radio channel>/[250K,1M,2M] The callback for linkQuality can be called at any moment from the driver to report back the link quality in percentage. The callback from linkError will be called when a error occues with an error message. """ # check if the URI is a radio URI if not re.search('^usb://', uri): raise WrongUriType('Not a radio URI') # Open the USB dongle if not re.search('^usb://([0-9]+)$', uri): raise WrongUriType('Wrong radio URI format!') uri_data = re.search('^usb://([0-9]+)$', uri) self.uri = uri if self.cfusb is None: self.cfusb = CfUsb(devid=int(uri_data.group(1))) if self.cfusb.dev: self.cfusb.set_crtp_to_usb(True) else: self.cfusb = None raise Exception('Could not open {}'.format(self.uri)) else: raise Exception('Link already open!') # Prepare the inter-thread communication queue self.in_queue = queue.Queue() # Limited size out queue to avoid "ReadBack" effect self.out_queue = queue.Queue(50) # Launch the comm thread self._thread = _UsbReceiveThread(self.cfusb, self.in_queue, link_quality_callback, link_error_callback) self._thread.start() self.link_error_callback = link_error_callback
Connect the link driver to a specified URI of the format: radio://<dongle nbr>/<radio channel>/[250K,1M,2M] The callback for linkQuality can be called at any moment from the driver to report back the link quality in percentage. The callback from linkError will be called when a error occues with an error message.
def register(CommandSubClass): """A class decorator for Command classes to register in the default set.""" name = CommandSubClass.name() if name in Command._all_commands: raise ValueError("Command already exists: " + name) Command._all_commands[name] = CommandSubClass return CommandSubClass
A class decorator for Command classes to register in the default set.
def __get_dbms_version(self, make_connection=True): """ Returns the 'DBMS Version' string """ major, minor, _, _ = self.get_server_version(make_connection=make_connection) return '{}.{}'.format(major, minor)
Returns the 'DBMS Version' string
def merge(self, dataset): """ Merge the specified dataset on top of the existing data. This replaces all values in the existing dataset with the values from the given dataset. Args: dataset (TaskData): A reference to the TaskData object that should be merged on top of the existing object. """ def merge_data(source, dest): for key, value in source.items(): if isinstance(value, dict): merge_data(value, dest.setdefault(key, {})) else: dest[key] = value return dest merge_data(dataset.data, self._data) for h in dataset.task_history: if h not in self._task_history: self._task_history.append(h)
Merge the specified dataset on top of the existing data. This replaces all values in the existing dataset with the values from the given dataset. Args: dataset (TaskData): A reference to the TaskData object that should be merged on top of the existing object.
def set_dft_grid(self, radical_points=128, angular_points=302, grid_type="Lebedev"): """ Set the grid for DFT numerical integrations. Args: radical_points: Radical points. (Integer) angular_points: Angular points. (Integer) grid_type: The type of of the grid. There are two standard grids: SG-1 and SG-0. The other two supported grids are "Lebedev" and "Gauss-Legendre" """ available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230, 266, 302, 350, 434, 590, 770, 974, 1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, 3890, 4334, 4802, 5294} if grid_type.lower() == "sg-0": self.params["rem"]["xc_grid"] = 0 elif grid_type.lower() == "sg-1": self.params["rem"]["xc_grid"] = 1 elif grid_type.lower() == "lebedev": if angular_points not in available_lebedev_angular_points: raise ValueError(str(angular_points) + " is not a valid " "Lebedev angular points number") self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format( rp=radical_points, ap=angular_points) elif grid_type.lower() == "gauss-legendre": self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format( rp=radical_points, ap=angular_points) else: raise ValueError("Grid type " + grid_type + " is not supported " "currently")
Set the grid for DFT numerical integrations. Args: radical_points: Radical points. (Integer) angular_points: Angular points. (Integer) grid_type: The type of of the grid. There are two standard grids: SG-1 and SG-0. The other two supported grids are "Lebedev" and "Gauss-Legendre"
def _migration_required(connection): """ Returns True if ambry models do not match to db tables. Otherwise returns False. """ stored_version = get_stored_version(connection) actual_version = SCHEMA_VERSION assert isinstance(stored_version, int) assert isinstance(actual_version, int) assert stored_version <= actual_version, \ 'Db version can not be greater than models version. Update your source code.' return stored_version < actual_version
Returns True if ambry models do not match to db tables. Otherwise returns False.
def _discover_cover_image(zf, opf_xmldoc, opf_filepath): ''' Find the cover image path in the OPF file. Returns a tuple: (image content in base64, file extension) ''' content = None filepath = None extension = None # Strategies to discover the cover-image path: # e.g.: <meta name="cover" content="cover"/> tag = find_tag(opf_xmldoc, 'meta', 'name', 'cover') if tag and 'content' in tag.attributes.keys(): item_id = tag.attributes['content'].value if item_id: # e.g.: <item href="cover.jpg" id="cover" media-type="image/jpeg"/> filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', item_id) if not filepath: filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', 'cover-image') if not filepath: filepath, extension = find_img_tag(opf_xmldoc, 'item', 'id', 'cover') # If we have found the cover image path: if filepath: # The cover image path is relative to the OPF file base_dir = os.path.dirname(opf_filepath) # Also, normalize the path (ie opfpath/../cover.jpg -> cover.jpg) coverpath = os.path.normpath(os.path.join(base_dir, filepath)) content = zf.read(coverpath) content = base64.b64encode(content) return content, extension
Find the cover image path in the OPF file. Returns a tuple: (image content in base64, file extension)
def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.MakeCdf() cdf.ps = [p ** k for p in cdf.ps] return cdf
Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf
def handle_document(self, item_session: ItemSession, filename: str) -> Actions: '''Process a successful document response. Returns: A value from :class:`.hook.Actions`. ''' self._waiter.reset() action = self.handle_response(item_session) if action == Actions.NORMAL: self._statistics.increment(item_session.response.body.size()) item_session.set_status(Status.done, filename=filename) return action
Process a successful document response. Returns: A value from :class:`.hook.Actions`.
def bezier_real_minmax(p): """returns the minimum and maximum for any real cubic bezier""" local_extremizers = [0, 1] if len(p) == 4: # cubic case a = [p.real for p in p] denom = a[0] - 3*a[1] + 3*a[2] - a[3] if denom != 0: delta = a[1]**2 - (a[0] + a[1])*a[2] + a[2]**2 + (a[0] - a[1])*a[3] if delta >= 0: # otherwise no local extrema sqdelta = sqrt(delta) tau = a[0] - 2*a[1] + a[2] r1 = (tau + sqdelta)/denom r2 = (tau - sqdelta)/denom if 0 < r1 < 1: local_extremizers.append(r1) if 0 < r2 < 1: local_extremizers.append(r2) local_extrema = [bezier_point(a, t) for t in local_extremizers] return min(local_extrema), max(local_extrema) # find reverse standard coefficients of the derivative dcoeffs = bezier2polynomial(a, return_poly1d=True).deriv().coeffs # find real roots, r, such that 0 <= r <= 1 local_extremizers += polyroots01(dcoeffs) local_extrema = [bezier_point(a, t) for t in local_extremizers] return min(local_extrema), max(local_extrema)
returns the minimum and maximum for any real cubic bezier
def experience( self, agent_indices, observ, action, reward, unused_done, unused_nextob): """Process the transition tuple of the current step. When training, add the current transition tuple to the memory and update the streaming statistics for observations and rewards. A summary string is returned if requested at this step. Args: agent_indices: Tensor containing current batch indices. observ: Batch tensor of observations. action: Batch tensor of actions. reward: Batch tensor of rewards. unused_done: Batch tensor of done flags. unused_nextob: Batch tensor of successor observations. Returns: Summary tensor. """ with tf.name_scope('experience/'): return tf.cond( self._is_training, # pylint: disable=g-long-lambda lambda: self._define_experience( agent_indices, observ, action, reward), str)
Process the transition tuple of the current step. When training, add the current transition tuple to the memory and update the streaming statistics for observations and rewards. A summary string is returned if requested at this step. Args: agent_indices: Tensor containing current batch indices. observ: Batch tensor of observations. action: Batch tensor of actions. reward: Batch tensor of rewards. unused_done: Batch tensor of done flags. unused_nextob: Batch tensor of successor observations. Returns: Summary tensor.
def add_comment(node, text, location='above'): """Add a comment to the given node. If the `SourceWithCommentGenerator` class is used these comments will be output as part of the source code. Note that a node can only contain one comment. Subsequent calls to `add_comment` will ovverride the existing comments. Args: node: The AST node whose containing statement will be commented. text: A comment string. location: Where the comment should appear. Valid values are 'above', 'below' and 'right' Returns: The node with the comment stored as an annotation. """ anno.setanno(node, 'comment', dict(location=location, text=text), safe=False) return node
Add a comment to the given node. If the `SourceWithCommentGenerator` class is used these comments will be output as part of the source code. Note that a node can only contain one comment. Subsequent calls to `add_comment` will ovverride the existing comments. Args: node: The AST node whose containing statement will be commented. text: A comment string. location: Where the comment should appear. Valid values are 'above', 'below' and 'right' Returns: The node with the comment stored as an annotation.
def _get_all_objs( self, server_instance, regexes=None, include_only_marked=False, tags=None, use_guest_hostname=False ): """ Explore vCenter infrastructure to discover hosts, virtual machines, etc. and compute their associated tags. Start at the vCenter `rootFolder`, so as to collect every objet. Example topology: ``` rootFolder - datacenter1 - compute_resource1 == cluster - host1 - host2 - host3 - compute_resource2 - host5 - vm1 - vm2 ``` If it's a node we want to query metric for, it will be enqueued at the instance level and will be processed by a subsequent job. """ start = time.time() if tags is None: tags = [] obj_list = defaultdict(list) # Collect objects and their attributes all_objects = self._collect_mors_and_attributes(server_instance) # Add rootFolder since it is not explored by the propertyCollector rootFolder = server_instance.content.rootFolder all_objects[rootFolder] = {"name": rootFolder.name, "parent": None} for obj, properties in all_objects.items(): instance_tags = [] if not self._is_excluded(obj, properties, regexes, include_only_marked) and any( isinstance(obj, vimtype) for vimtype in RESOURCE_TYPE_METRICS ): if use_guest_hostname: hostname = properties.get("guest.hostName", properties.get("name", "unknown")) else: hostname = properties.get("name", "unknown") if properties.get("parent"): instance_tags += self._get_parent_tags(obj, all_objects) if isinstance(obj, vim.VirtualMachine): vsphere_type = 'vsphere_type:vm' vimtype = vim.VirtualMachine mor_type = "vm" power_state = properties.get("runtime.powerState") if power_state != vim.VirtualMachinePowerState.poweredOn: self.log.debug("Skipping VM in state {}".format(ensure_unicode(power_state))) continue host_mor = properties.get("runtime.host") host = "unknown" if host_mor: host = ensure_unicode(all_objects.get(host_mor, {}).get("name", "unknown")) instance_tags.append('vsphere_host:{}'.format(ensure_unicode(host))) elif isinstance(obj, vim.HostSystem): vsphere_type = 'vsphere_type:host' vimtype = vim.HostSystem mor_type = "host" elif isinstance(obj, vim.Datastore): vsphere_type = 'vsphere_type:datastore' instance_tags.append( 'vsphere_datastore:{}'.format(ensure_unicode(properties.get("name", "unknown"))) ) hostname = None vimtype = vim.Datastore mor_type = "datastore" elif isinstance(obj, vim.Datacenter): vsphere_type = 'vsphere_type:datacenter' instance_tags.append( "vsphere_datacenter:{}".format(ensure_unicode(properties.get("name", "unknown"))) ) hostname = None vimtype = vim.Datacenter mor_type = "datacenter" elif isinstance(obj, vim.ClusterComputeResource): vsphere_type = 'vsphere_type:cluster' instance_tags.append("vsphere_cluster:{}".format(ensure_unicode(properties.get("name", "unknown")))) hostname = None vimtype = vim.ClusterComputeResource mor_type = "cluster" else: vsphere_type = None if vsphere_type: instance_tags.append(vsphere_type) obj_list[vimtype].append( {"mor_type": mor_type, "mor": obj, "hostname": hostname, "tags": tags + instance_tags} ) self.log.debug("All objects with attributes cached in {} seconds.".format(time.time() - start)) return obj_list
Explore vCenter infrastructure to discover hosts, virtual machines, etc. and compute their associated tags. Start at the vCenter `rootFolder`, so as to collect every objet. Example topology: ``` rootFolder - datacenter1 - compute_resource1 == cluster - host1 - host2 - host3 - compute_resource2 - host5 - vm1 - vm2 ``` If it's a node we want to query metric for, it will be enqueued at the instance level and will be processed by a subsequent job.
def get_version(self, dependency): """Return the installed version parsing the output of 'pip show'.""" logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
Return the installed version parsing the output of 'pip show'.
def p_expression_sla(self, p): 'expression : expression LSHIFTA expression' p[0] = Sll(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression LSHIFTA expression
def set(self, dic, val=None, force=False): """set can assign versatile options from `CMAOptions.versatile_options()` with a new value, use `init()` for the others. Arguments --------- `dic` either a dictionary or a key. In the latter case, `val` must be provided `val` value for `key`, approximate match is sufficient `force` force setting of non-versatile options, use with caution This method will be most probably used with the ``opts`` attribute of a `CMAEvolutionStrategy` instance. """ if val is not None: # dic is a key in this case dic = {dic:val} # compose a dictionary for key_original, val in list(dict(dic).items()): key = self.corrected_key(key_original) if not self._lock_setting or \ key in CMAOptions.versatile_options(): self[key] = val else: _print_warning('key ' + str(key_original) + ' ignored (not recognized as versatile)', 'set', 'CMAOptions') return self
set can assign versatile options from `CMAOptions.versatile_options()` with a new value, use `init()` for the others. Arguments --------- `dic` either a dictionary or a key. In the latter case, `val` must be provided `val` value for `key`, approximate match is sufficient `force` force setting of non-versatile options, use with caution This method will be most probably used with the ``opts`` attribute of a `CMAEvolutionStrategy` instance.
def write(self, data, sections=None): """ Write the data to the output socket. """ if self.error[0]: self.status = self.error[0] data = b(self.error[1]) if not self.headers_sent: self.send_headers(data, sections) if self.request_method != 'HEAD': try: if self.chunked: self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) else: self.conn.sendall(data) except socket.timeout: self.closeConnection = True except socket.error: # But some clients will close the connection before that # resulting in a socket error. self.closeConnection = True
Write the data to the output socket.
def workflow_close(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /workflow-xxxx/close API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose """ return DXHTTPRequest('/%s/close' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /workflow-xxxx/close API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose
def save_pid(name): """ When debugging and profiling, it is very annoying to poke through the process list to discover the currently running Ansible and MuxProcess IDs, especially when trying to catch an issue during early startup. So here, if a magic environment variable set, stash them in hidden files in the CWD:: alias muxpid="cat .ansible-mux.pid" alias anspid="cat .ansible-controller.pid" gdb -p $(muxpid) perf top -p $(anspid) """ if os.environ.get('MITOGEN_SAVE_PIDS'): with open('.ansible-%s.pid' % (name,), 'w') as fp: fp.write(str(os.getpid()))
When debugging and profiling, it is very annoying to poke through the process list to discover the currently running Ansible and MuxProcess IDs, especially when trying to catch an issue during early startup. So here, if a magic environment variable set, stash them in hidden files in the CWD:: alias muxpid="cat .ansible-mux.pid" alias anspid="cat .ansible-controller.pid" gdb -p $(muxpid) perf top -p $(anspid)
def expand_factor_conditions(s, env): """If env matches the expanded factor then return value else return ''. Example ------- >>> s = 'py{33,34}: docformatter' >>> expand_factor_conditions(s, Env(name="py34", ...)) "docformatter" >>> expand_factor_conditions(s, Env(name="py26", ...)) "" """ try: factor, value = re.split(r'\s*\:\s*', s) except ValueError: return s if matches_factor_conditions(factor, env): return value else: return ''
If env matches the expanded factor then return value else return ''. Example ------- >>> s = 'py{33,34}: docformatter' >>> expand_factor_conditions(s, Env(name="py34", ...)) "docformatter" >>> expand_factor_conditions(s, Env(name="py26", ...)) ""
def get_platforms_set(): '''Returns set of all possible platforms''' # arch and mageia are not in Py2 _supported_dists, so we add them manually # Ubuntu adds itself to the list on Ubuntu platforms = set([x.lower() for x in platform._supported_dists]) platforms |= set(['darwin', 'arch', 'mageia', 'ubuntu']) return platforms
Returns set of all possible platforms
def filter(self, *args, **kwargs): """ Apply filters to the existing nodes in the set. :param kwargs: filter parameters Filters mimic Django's syntax with the double '__' to separate field and operators. e.g `.filter(salary__gt=20000)` results in `salary > 20000`. The following operators are available: * 'lt': less than * 'gt': greater than * 'lte': less than or equal to * 'gte': greater than or equal to * 'ne': not equal to * 'in': matches one of list (or tuple) * 'isnull': is null * 'regex': matches supplied regex (neo4j regex format) * 'exact': exactly match string (just '=') * 'iexact': case insensitive match string * 'contains': contains string * 'icontains': case insensitive contains * 'startswith': string starts with * 'istartswith': case insensitive string starts with * 'endswith': string ends with * 'iendswith': case insensitive string ends with :return: self """ if args or kwargs: self.q_filters = Q(self.q_filters & Q(*args, **kwargs)) return self
Apply filters to the existing nodes in the set. :param kwargs: filter parameters Filters mimic Django's syntax with the double '__' to separate field and operators. e.g `.filter(salary__gt=20000)` results in `salary > 20000`. The following operators are available: * 'lt': less than * 'gt': greater than * 'lte': less than or equal to * 'gte': greater than or equal to * 'ne': not equal to * 'in': matches one of list (or tuple) * 'isnull': is null * 'regex': matches supplied regex (neo4j regex format) * 'exact': exactly match string (just '=') * 'iexact': case insensitive match string * 'contains': contains string * 'icontains': case insensitive contains * 'startswith': string starts with * 'istartswith': case insensitive string starts with * 'endswith': string ends with * 'iendswith': case insensitive string ends with :return: self
def decode(self, msgbuf): '''decode a buffer as a MAVLink message''' # decode the header if msgbuf[0] != PROTOCOL_MARKER_V1: headerlen = 10 try: magic, mlen, incompat_flags, compat_flags, seq, srcSystem, srcComponent, msgIdlow, msgIdhigh = struct.unpack('<cBBBBBBHB', msgbuf[:headerlen]) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink header: %s' % emsg) msgId = msgIdlow | (msgIdhigh<<16) mapkey = msgId else: headerlen = 6 try: magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('<cBBBBB', msgbuf[:headerlen]) incompat_flags = 0 compat_flags = 0 except struct.error as emsg: raise MAVError('Unable to unpack MAVLink header: %s' % emsg) mapkey = msgId if (incompat_flags & MAVLINK_IFLAG_SIGNED) != 0: signature_len = MAVLINK_SIGNATURE_BLOCK_LEN else: signature_len = 0 if ord(magic) != PROTOCOL_MARKER_V1 and ord(magic) != PROTOCOL_MARKER_V2: raise MAVError("invalid MAVLink prefix '%s'" % magic) if mlen != len(msgbuf)-(headerlen+2+signature_len): raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u headerlen=%u' % (len(msgbuf)-(headerlen+2+signature_len), mlen, msgId, headerlen)) if not mapkey in mavlink_map: raise MAVError('unknown MAVLink message ID %s' % str(mapkey)) # decode the payload type = mavlink_map[mapkey] fmt = type.format order_map = type.orders len_map = type.lengths crc_extra = type.crc_extra # decode the checksum try: crc, = struct.unpack('<H', msgbuf[-(2+signature_len):][:2]) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg) crcbuf = msgbuf[1:-(2+signature_len)] if True: # using CRC extra crcbuf.append(crc_extra) crc2 = x25crc(crcbuf) if crc != crc2.crc: raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc)) sig_ok = False if self.signing.secret_key is not None: accept_signature = False if signature_len == MAVLINK_SIGNATURE_BLOCK_LEN: sig_ok = self.check_signature(msgbuf, srcSystem, srcComponent) accept_signature = sig_ok if sig_ok: self.signing.goodsig_count += 1 else: self.signing.badsig_count += 1 if not accept_signature and self.signing.allow_unsigned_callback is not None: accept_signature = self.signing.allow_unsigned_callback(self, msgId) if accept_signature: self.signing.unsigned_count += 1 else: self.signing.reject_count += 1 elif self.signing.allow_unsigned_callback is not None: accept_signature = self.signing.allow_unsigned_callback(self, msgId) if accept_signature: self.signing.unsigned_count += 1 else: self.signing.reject_count += 1 if not accept_signature: raise MAVError('Invalid signature') csize = struct.calcsize(fmt) mbuf = msgbuf[headerlen:-(2+signature_len)] if len(mbuf) < csize: # zero pad to give right size mbuf.extend([0]*(csize - len(mbuf))) if len(mbuf) < csize: raise MAVError('Bad message of type %s length %u needs %s' % ( type, len(mbuf), csize)) mbuf = mbuf[:csize] try: t = struct.unpack(fmt, mbuf) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % ( type, fmt, len(mbuf), emsg)) tlist = list(t) # handle sorted fields if True: t = tlist[:] if sum(len_map) == len(len_map): # message has no arrays in it for i in range(0, len(tlist)): tlist[i] = t[order_map[i]] else: # message has some arrays tlist = [] for i in range(0, len(order_map)): order = order_map[i] L = len_map[order] tip = sum(len_map[:order]) field = t[tip] if L == 1 or isinstance(field, str): tlist.append(field) else: tlist.append(t[tip:(tip + L)]) # terminate any strings for i in range(0, len(tlist)): if isinstance(tlist[i], str): tlist[i] = str(MAVString(tlist[i])) t = tuple(tlist) # construct the message object try: m = type(*t) except Exception as emsg: raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg)) m._signed = sig_ok if m._signed: m._link_id = msgbuf[-13] m._msgbuf = msgbuf m._payload = msgbuf[6:-(2+signature_len)] m._crc = crc m._header = MAVLink_header(msgId, incompat_flags, compat_flags, mlen, seq, srcSystem, srcComponent) return m
decode a buffer as a MAVLink message
def get_queryset(self, value, row, *args, **kwargs): """ Returns a queryset of all objects for this Model. Overwrite this method if you want to limit the pool of objects from which the related object is retrieved. :param value: The field's value in the datasource. :param row: The datasource's current row. As an example; if you'd like to have ForeignKeyWidget look up a Person by their pre- **and** lastname column, you could subclass the widget like so:: class FullNameForeignKeyWidget(ForeignKeyWidget): def get_queryset(self, value, row): return self.model.objects.filter( first_name__iexact=row["first_name"], last_name__iexact=row["last_name"] ) """ return self.model.objects.all()
Returns a queryset of all objects for this Model. Overwrite this method if you want to limit the pool of objects from which the related object is retrieved. :param value: The field's value in the datasource. :param row: The datasource's current row. As an example; if you'd like to have ForeignKeyWidget look up a Person by their pre- **and** lastname column, you could subclass the widget like so:: class FullNameForeignKeyWidget(ForeignKeyWidget): def get_queryset(self, value, row): return self.model.objects.filter( first_name__iexact=row["first_name"], last_name__iexact=row["last_name"] )
def _init_client(self, from_archive=False): """Init client""" return BugzillaClient(self.url, user=self.user, password=self.password, max_bugs_csv=self.max_bugs_csv, archive=self.archive, from_archive=from_archive)
Init client
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs): """ Try to resolve SAM resource references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param SupportedResourceReferences supported_resource_refs: Object containing information about available resource references and the values they resolve to. :return: Modified input dictionary with references resolved """ if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
Try to resolve SAM resource references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param SupportedResourceReferences supported_resource_refs: Object containing information about available resource references and the values they resolve to. :return: Modified input dictionary with references resolved
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if x_is_series: fac = Series(fac, index=series_index, name=name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins
handles post processing for the cut method where we combine the index information if the originally passed datatype was a series
def endpoint_catalog(catalog=None): # noqa: E501 """Retrieve the endpoint catalog Retrieve the endpoint catalog # noqa: E501 :param catalog: The data needed to get a catalog :type catalog: dict | bytes :rtype: Response """ if connexion.request.is_json: catalog = UserAuth.from_dict(connexion.request.get_json()) # noqa: E501 if (not hasAccess()): return redirectUnauthorized() driver = LoadedDrivers.getDefaultDriver() auth = None if (catalog): auth = catalog return Response(status=200, body=driver.getCatalog(auth))
Retrieve the endpoint catalog Retrieve the endpoint catalog # noqa: E501 :param catalog: The data needed to get a catalog :type catalog: dict | bytes :rtype: Response
def compute_threat_list_diff( self, threat_type, constraints, version_token=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets the most recent threat list diffs. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `threat_type`: >>> threat_type = enums.ThreatType.THREAT_TYPE_UNSPECIFIED >>> >>> # TODO: Initialize `constraints`: >>> constraints = {} >>> >>> response = client.compute_threat_list_diff(threat_type, constraints) Args: threat_type (~google.cloud.webrisk_v1beta1.types.ThreatType): Required. The ThreatList to update. constraints (Union[dict, ~google.cloud.webrisk_v1beta1.types.Constraints]): The constraints associated with this request. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.webrisk_v1beta1.types.Constraints` version_token (bytes): The current version token of the client for the requested list (the client version that was received from the last successful diff). retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "compute_threat_list_diff" not in self._inner_api_calls: self._inner_api_calls[ "compute_threat_list_diff" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.compute_threat_list_diff, default_retry=self._method_configs["ComputeThreatListDiff"].retry, default_timeout=self._method_configs["ComputeThreatListDiff"].timeout, client_info=self._client_info, ) request = webrisk_pb2.ComputeThreatListDiffRequest( threat_type=threat_type, constraints=constraints, version_token=version_token, ) return self._inner_api_calls["compute_threat_list_diff"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets the most recent threat list diffs. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `threat_type`: >>> threat_type = enums.ThreatType.THREAT_TYPE_UNSPECIFIED >>> >>> # TODO: Initialize `constraints`: >>> constraints = {} >>> >>> response = client.compute_threat_list_diff(threat_type, constraints) Args: threat_type (~google.cloud.webrisk_v1beta1.types.ThreatType): Required. The ThreatList to update. constraints (Union[dict, ~google.cloud.webrisk_v1beta1.types.Constraints]): The constraints associated with this request. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.webrisk_v1beta1.types.Constraints` version_token (bytes): The current version token of the client for the requested list (the client version that was received from the last successful diff). retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def CrearLiquidacion(self, tipo_cbte, pto_vta, nro_cbte, fecha, periodo, iibb_adquirente=None, domicilio_sede=None, inscripcion_registro_publico=None, datos_adicionales=None, alicuota_iva=None, **kwargs): "Inicializa internamente los datos de una liquidación para autorizar" # creo el diccionario con los campos generales de la liquidación: liq = {'tipoComprobante': tipo_cbte, 'puntoVenta': pto_vta, 'nroComprobante': nro_cbte, 'fechaComprobante': fecha, 'periodo': periodo, 'iibbAdquirente': iibb_adquirente, 'domicilioSede': domicilio_sede, 'inscripcionRegistroPublico': inscripcion_registro_publico, 'datosAdicionales': datos_adicionales, 'alicuotaIVA': alicuota_iva, } liq["condicionVenta"] = [] self.solicitud = dict(liquidacion=liq, bonificacionPenalizacion=[], otroImpuesto=[], remito=[] ) return True
Inicializa internamente los datos de una liquidación para autorizar
def bishop88_i_from_v(voltage, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, method='newton'): """ Find current given any voltage. Parameters ---------- voltage : numeric voltage (V) in volts [V] photocurrent : numeric photogenerated current (Iph or IL) in amperes [A] saturation_current : numeric diode dark or saturation current (Io or Isat) in amperes [A] resistance_series : numeric series resistance (Rs) in ohms resistance_shunt : numeric shunt resistance (Rsh) in ohms nNsVth : numeric product of diode ideality factor (n), number of series cells (Ns), and thermal voltage (Vth = k_b * T / q_e) in volts [V] method : str one of two optional search methods: either ``'brentq'``, a reliable and bounded method or ``'newton'`` which is the default. Returns ------- current : numeric current (I) at the specified voltage (V) in amperes [A] """ # collect args args = (photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) def fv(x, v, *a): # calculate voltage residual given diode voltage "x" return bishop88(x, *a)[1] - v if method.lower() == 'brentq': # first bound the search using voc voc_est = estimate_voc(photocurrent, saturation_current, nNsVth) # brentq only works with scalar inputs, so we need a set up function # and np.vectorize to repeatedly call the optimizer with the right # arguments for possible array input def vd_from_brent(voc, v, iph, isat, rs, rsh, gamma): return brentq(fv, 0.0, voc, args=(v, iph, isat, rs, rsh, gamma)) vd_from_brent_vectorized = np.vectorize(vd_from_brent) vd = vd_from_brent_vectorized(voc_est, voltage, *args) elif method.lower() == 'newton': # make sure all args are numpy arrays if max size > 1 # if voltage is an array, then make a copy to use for initial guess, v0 args, v0 = _prepare_newton_inputs((voltage,), args, voltage) vd = newton(func=lambda x, *a: fv(x, voltage, *a), x0=v0, fprime=lambda x, *a: bishop88(x, *a, gradients=True)[4], args=args) else: raise NotImplementedError("Method '%s' isn't implemented" % method) return bishop88(vd, *args)[0]
Find current given any voltage. Parameters ---------- voltage : numeric voltage (V) in volts [V] photocurrent : numeric photogenerated current (Iph or IL) in amperes [A] saturation_current : numeric diode dark or saturation current (Io or Isat) in amperes [A] resistance_series : numeric series resistance (Rs) in ohms resistance_shunt : numeric shunt resistance (Rsh) in ohms nNsVth : numeric product of diode ideality factor (n), number of series cells (Ns), and thermal voltage (Vth = k_b * T / q_e) in volts [V] method : str one of two optional search methods: either ``'brentq'``, a reliable and bounded method or ``'newton'`` which is the default. Returns ------- current : numeric current (I) at the specified voltage (V) in amperes [A]
def _unpack_result(klass, result): '''Convert a D-BUS return variant into an appropriate return value''' result = result.unpack() # to be compatible with standard Python behaviour, unbox # single-element tuples and return None for empty result tuples if len(result) == 1: result = result[0] elif len(result) == 0: result = None return result
Convert a D-BUS return variant into an appropriate return value
def find_all(self, *args, **kwargs): """ Like :meth:`find`, but selects all matches (not just the first one). Returns a :class:`Collection`. If no elements match, this returns a Collection with no items. """ op = operator.methodcaller('find_all', *args, **kwargs) return self._wrap_multi(op)
Like :meth:`find`, but selects all matches (not just the first one). Returns a :class:`Collection`. If no elements match, this returns a Collection with no items.
def get_lock_requests(self): """Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned. """ d = defaultdict(list) if self._context: for variant in self._context.resolved_packages: name = variant.name version = variant.version lock = self.patch_locks.get(name) if lock is None: lock = self.default_patch_lock request = get_lock_request(name, version, lock) if request is not None: d[lock].append(request) return d
Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned.
def calls(self, call): """Redefine a call. The fake method will execute your function. I.E.:: >>> f = Fake().provides('hello').calls(lambda: 'Why, hello there') >>> f.hello() 'Why, hello there' """ exp = self._get_current_call() exp.call_replacement = call return self
Redefine a call. The fake method will execute your function. I.E.:: >>> f = Fake().provides('hello').calls(lambda: 'Why, hello there') >>> f.hello() 'Why, hello there'
def subscriptions_list(**kwargs): ''' .. versionadded:: 2019.2.0 List all subscriptions for a tenant. CLI Example: .. code-block:: bash salt-call azurearm_resource.subscriptions_list ''' result = {} subconn = __utils__['azurearm.get_client']('subscription', **kwargs) try: subs = __utils__['azurearm.paged_object_to_list'](subconn.subscriptions.list()) for sub in subs: result[sub['subscription_id']] = sub except CloudError as exc: __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs) result = {'error': str(exc)} return result
.. versionadded:: 2019.2.0 List all subscriptions for a tenant. CLI Example: .. code-block:: bash salt-call azurearm_resource.subscriptions_list
def set_group(self, group): """Set group size of DMatrix (used for ranking). Parameters ---------- group : array like Group size of each group """ _check_call(_LIB.XGDMatrixSetGroup(self.handle, c_array(ctypes.c_uint, group), c_bst_ulong(len(group))))
Set group size of DMatrix (used for ranking). Parameters ---------- group : array like Group size of each group
def _group(self, rdd): """Group together the values with the same key.""" return rdd.reduceByKey(lambda x, y: x.append(y))
Group together the values with the same key.
def _advapi32_generate_pair(algorithm, bit_size=None): """ Generates a public/private key pair using CryptoAPI :param algorithm: The key algorithm - "rsa" or "dsa" :param bit_size: An integer - used for "rsa" and "dsa". For "rsa" the value maye be 1024, 2048, 3072 or 4096. For "dsa" the value may be 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A 2-element tuple of (PublicKey, PrivateKey). The contents of each key may be saved by calling .asn1.dump(). """ if algorithm == 'rsa': provider = Advapi32Const.MS_ENH_RSA_AES_PROV algorithm_id = Advapi32Const.CALG_RSA_SIGN struct_type = 'RSABLOBHEADER' else: provider = Advapi32Const.MS_ENH_DSS_DH_PROV algorithm_id = Advapi32Const.CALG_DSS_SIGN struct_type = 'DSSBLOBHEADER' context_handle = None key_handle = None try: context_handle = open_context_handle(provider, verify_only=False) key_handle_pointer = new(advapi32, 'HCRYPTKEY *') flags = (bit_size << 16) | Advapi32Const.CRYPT_EXPORTABLE res = advapi32.CryptGenKey(context_handle, algorithm_id, flags, key_handle_pointer) handle_error(res) key_handle = unwrap(key_handle_pointer) out_len = new(advapi32, 'DWORD *') res = advapi32.CryptExportKey( key_handle, null(), Advapi32Const.PRIVATEKEYBLOB, 0, null(), out_len ) handle_error(res) buffer_length = deref(out_len) buffer_ = buffer_from_bytes(buffer_length) res = advapi32.CryptExportKey( key_handle, null(), Advapi32Const.PRIVATEKEYBLOB, 0, buffer_, out_len ) handle_error(res) blob_struct_pointer = struct_from_buffer(advapi32, struct_type, buffer_) blob_struct = unwrap(blob_struct_pointer) struct_size = sizeof(advapi32, blob_struct) private_blob = bytes_from_buffer(buffer_, buffer_length)[struct_size:] if algorithm == 'rsa': public_info, private_info = _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, private_blob) else: # The public key for a DSA key is not available in from the private # key blob, so we have to separately export the public key public_out_len = new(advapi32, 'DWORD *') res = advapi32.CryptExportKey( key_handle, null(), Advapi32Const.PUBLICKEYBLOB, 0, null(), public_out_len ) handle_error(res) public_buffer_length = deref(public_out_len) public_buffer = buffer_from_bytes(public_buffer_length) res = advapi32.CryptExportKey( key_handle, null(), Advapi32Const.PUBLICKEYBLOB, 0, public_buffer, public_out_len ) handle_error(res) public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:] public_info, private_info = _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob) return (load_public_key(public_info), load_private_key(private_info)) finally: if context_handle: close_context_handle(context_handle) if key_handle: advapi32.CryptDestroyKey(key_handle)
Generates a public/private key pair using CryptoAPI :param algorithm: The key algorithm - "rsa" or "dsa" :param bit_size: An integer - used for "rsa" and "dsa". For "rsa" the value maye be 1024, 2048, 3072 or 4096. For "dsa" the value may be 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A 2-element tuple of (PublicKey, PrivateKey). The contents of each key may be saved by calling .asn1.dump().
def normalize_partial_name(decl): """ Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name """ if decl.cache.normalized_partial_name is None: decl.cache.normalized_partial_name = normalize(decl.partial_name) return decl.cache.normalized_partial_name
Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name
def f_translate_key(self, key): """Translates integer indices into the appropriate names""" if isinstance(key, int): if key == 0: key = self.v_name else: key = self.v_name + '_%d' % key return key
Translates integer indices into the appropriate names
def add_step(step_name, func): """ Add a step function to Orca. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. Parameters ---------- step_name : str func : callable """ if isinstance(func, Callable): logger.debug('registering step {!r}'.format(step_name)) _STEPS[step_name] = _StepFuncWrapper(step_name, func) else: raise TypeError('func must be a callable')
Add a step function to Orca. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. Parameters ---------- step_name : str func : callable
def zoom(params, factor): """ Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom) """ params.zoom /= factor n_x = params.mb_cx / params.zoom n_y = params.mb_cy / params.zoom params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2 params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom)
def read_until_done(self, command, timeout=None): """Yield messages read until we receive a 'DONE' command. Read messages of the given command until we receive a 'DONE' command. If a command different than the requested one is received, an AdbProtocolError is raised. Args: command: The command to expect, like 'DENT' or 'DATA'. timeout: The timeouts.PolledTimeout to use for this operation. Yields: Messages read, of type self.RECV_MSG_TYPE, see read_message(). Raises: AdbProtocolError: If an unexpected command is read. AdbRemoteError: If a 'FAIL' message is read. """ message = self.read_message(timeout) while message.command != 'DONE': message.assert_command_is(command) yield message message = self.read_message(timeout)
Yield messages read until we receive a 'DONE' command. Read messages of the given command until we receive a 'DONE' command. If a command different than the requested one is received, an AdbProtocolError is raised. Args: command: The command to expect, like 'DENT' or 'DATA'. timeout: The timeouts.PolledTimeout to use for this operation. Yields: Messages read, of type self.RECV_MSG_TYPE, see read_message(). Raises: AdbProtocolError: If an unexpected command is read. AdbRemoteError: If a 'FAIL' message is read.
def clean_up(self):#, grid): """ de-select grid cols, refresh grid """ if self.selected_col: col_label_value = self.grid.GetColLabelValue(self.selected_col) col_label_value = col_label_value.strip('\nEDIT ALL') self.grid.SetColLabelValue(self.selected_col, col_label_value) for row in range(self.grid.GetNumberRows()): self.grid.SetCellBackgroundColour(row, self.selected_col, 'white') self.grid.ForceRefresh()
de-select grid cols, refresh grid
def lint(fix_imports): """ Run flake8. """ from glob import glob from subprocess import call # FIXME: should support passing these in an option skip = [ 'ansible', 'db', 'flask_sessions', 'node_modules', 'requirements', ] root_files = glob('*.py') root_dirs = [name for name in next(os.walk('.'))[1] if not name.startswith('.')] files_and_dirs = [x for x in root_files + root_dirs if x not in skip] def execute_tool(desc, *args): command = list(args) + files_and_dirs click.echo(f"{desc}: {' '.join(command)}") ret = call(command) if ret != 0: exit(ret) if fix_imports: execute_tool('Fixing import order', 'isort', '-rc') execute_tool('Checking code style', 'flake8')
Run flake8.