code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
swap and then flatten axes 0 and 1
def propose_unif(self): """Propose a new live point by sampling *uniformly* within the ellipsoid.""" while True: # Sample a point from the ellipsoid. u = self.ell.sample(rstate=self.rstate) # Check if `u` is within the unit cube. if unitcheck(u, self.nonperiodic): break # if it is, we're done! return u, self.ell.axes
Propose a new live point by sampling *uniformly* within the ellipsoid.
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True): # pylint: disable = attribute-defined-outside-init,arguments-differ """ Fit gradient boosting classifier Parameters ---------- X : array_like Feature matrix y : array_like Labels sample_weight : array_like Weight for each instance eval_set : list, optional A list of (X, y) pairs to use as a validation set for early-stopping eval_metric : str, callable, optional If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric. The call signature is func(y_predicted, y_true) where y_true will be a DMatrix object such that you may need to call the get_label method. It must return a str, value pair where the str is a name for the evaluation and value is the value of the evaluation function. This objective is always minimized. early_stopping_rounds : int, optional Activates early stopping. Validation error needs to decrease at least every <early_stopping_rounds> round(s) to continue training. Requires at least one item in evals. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have two additional fields: bst.best_score and bst.best_iteration. verbose : bool If `verbose` and an evaluation set is used, writes the evaluation metric measured on the validation set to stderr. """ evals_result = {} self.classes_ = list(np.unique(y)) self.n_classes_ = len(self.classes_) if self.n_classes_ > 2: # Switch to using a multiclass objective in the underlying XGB instance self.objective = "multi:softprob" xgb_options = self.get_xgb_params() xgb_options['num_class'] = self.n_classes_ else: xgb_options = self.get_xgb_params() feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None else: xgb_options.update({"eval_metric": eval_metric}) if eval_set is not None: # TODO: use sample_weight if given? evals = list(DMatrix(x[0], label=x[1]) for x in eval_set) nevals = len(evals) eval_names = ["validation_{}".format(i) for i in range(nevals)] evals = list(zip(evals, eval_names)) else: evals = () self._le = LabelEncoder().fit(y) training_labels = self._le.transform(y) if sample_weight is not None: train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight, missing=self.missing) else: train_dmatrix = DMatrix(X, label=training_labels, missing=self.missing) self._Booster = train(xgb_options, train_dmatrix, self.n_estimators, evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, feval=feval, verbose_eval=verbose) if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] self.evals_result_ = evals_result if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration return self
Fit gradient boosting classifier Parameters ---------- X : array_like Feature matrix y : array_like Labels sample_weight : array_like Weight for each instance eval_set : list, optional A list of (X, y) pairs to use as a validation set for early-stopping eval_metric : str, callable, optional If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric. The call signature is func(y_predicted, y_true) where y_true will be a DMatrix object such that you may need to call the get_label method. It must return a str, value pair where the str is a name for the evaluation and value is the value of the evaluation function. This objective is always minimized. early_stopping_rounds : int, optional Activates early stopping. Validation error needs to decrease at least every <early_stopping_rounds> round(s) to continue training. Requires at least one item in evals. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have two additional fields: bst.best_score and bst.best_iteration. verbose : bool If `verbose` and an evaluation set is used, writes the evaluation metric measured on the validation set to stderr.
def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, ): """ Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns: Modin Dataframe """ _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) return DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns: Modin Dataframe
def bgr2rgb(self): """ Converts data using the cv conversion. """ new_data = cv2.cvtColor(self.raw_data, cv2.COLOR_BGR2RGB) return ColorImage(new_data, frame=self.frame, encoding='rgb8')
Converts data using the cv conversion.
def customData( self, key, default = None ): """ Return the custom data that is stored on this node for the \ given key, returning the default parameter if none was found. :param key <str> :param default <variant> :return <variant> """ return self._customData.get(nativestring(key), default)
Return the custom data that is stored on this node for the \ given key, returning the default parameter if none was found. :param key <str> :param default <variant> :return <variant>
def read_wait_cell(self): """Read the value of the cell holding the 'wait' value, Returns the int value of whatever it has, or None if the cell doesn't exist. """ table_state = self.bt_table.read_row( TABLE_STATE, filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, WAIT_CELL, WAIT_CELL)) if table_state is None: utils.dbg('No waiting for new games needed; ' 'wait_for_game_number column not in table_state') return None value = table_state.cell_value(METADATA, WAIT_CELL) if not value: utils.dbg('No waiting for new games needed; ' 'no value in wait_for_game_number cell ' 'in table_state') return None return cbt_intvalue(value)
Read the value of the cell holding the 'wait' value, Returns the int value of whatever it has, or None if the cell doesn't exist.
def set_header(self, msg): """ Set second head line text """ self.s.move(1, 0) self.overwrite_line(msg, attr=curses.A_NORMAL)
Set second head line text
def create(context, name): """create(context, name) Create a tag. >>> dcictl tag-create [OPTIONS] :param string name: Name of the tag [required] """ result = tag.create(context, name=name) utils.format_output(result, context.format)
create(context, name) Create a tag. >>> dcictl tag-create [OPTIONS] :param string name: Name of the tag [required]
def dist_calc_matrix(surf, cortex, labels, exceptions = ['Unknown', 'Medial_wall'], verbose = True): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) # remove exceptions from label list: label_list = sd.load.get_freesurfer_label(labels, verbose = False) rs = np.where([a not in exceptions for a in label_list])[0] rois = [label_list[r] for r in rs] if verbose: print("# of regions: " + str(len(rois))) # calculate distance from each region to all nodes: dist_roi = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes)) if verbose: print(roi) dist_roi = np.array(dist_roi) # Calculate min distance per region: dist_mat = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_mat.append(np.min(dist_roi[:,translated_source_nodes], axis = 1)) dist_mat = np.array(dist_mat) return dist_mat, rois
Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n
def raw_value(self): """ Property to return the variable defined in ``django.conf.settings``. Returns: object: the variable defined in ``django.conf.settings``. Raises: AttributeError: if the variable is missing. KeyError: if the item is missing from nested setting. """ if self.parent_setting is not None: return self.parent_setting.raw_value[self.full_name] else: return getattr(settings, self.full_name)
Property to return the variable defined in ``django.conf.settings``. Returns: object: the variable defined in ``django.conf.settings``. Raises: AttributeError: if the variable is missing. KeyError: if the item is missing from nested setting.
def _build_row(self, row, parent, align, border): """ Given a row of text, build table cells. """ tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row, border) # We use align here rather than cells to ensure every row # contains the same number of columns. for i, a in enumerate(align): c = etree.SubElement(tr, tag) try: c.text = cells[i].strip() except IndexError: c.text = "" if a: c.set('align', a)
Given a row of text, build table cells.
def _convert_md_type(self, type_to_convert: str): """Metadata types are not consistent in Isogeo API. A vector dataset is defined as vector-dataset in query filter but as vectorDataset in resource (metadata) details. see: https://github.com/isogeo/isogeo-api-py-minsdk/issues/29 """ if type_to_convert in FILTER_TYPES: return FILTER_TYPES.get(type_to_convert) elif type_to_convert in FILTER_TYPES.values(): return [k for k, v in FILTER_TYPES.items() if v == type_to_convert][0] else: raise ValueError( "Incorrect metadata type to convert: {}".format(type_to_convert) )
Metadata types are not consistent in Isogeo API. A vector dataset is defined as vector-dataset in query filter but as vectorDataset in resource (metadata) details. see: https://github.com/isogeo/isogeo-api-py-minsdk/issues/29
def create(cls, interface_id, address=None, network_value=None, nodeid=1, **kw): """ :param int interface_id: interface id :param str address: address of this interface :param str network_value: network of this interface in cidr x.x.x.x/24 :param int nodeid: if a cluster, identifies which node this is for :rtype: dict """ data = {'address': address, 'auth_request': False, 'auth_request_source': False, 'primary_heartbeat': False, 'backup_heartbeat': False, 'backup_mgt': False, 'dynamic': False, 'network_value': network_value, 'nicid': str(interface_id), 'nodeid': nodeid, 'outgoing': False, 'primary_mgt': False} for k, v in kw.items(): data.update({k: v}) if 'dynamic' in kw and kw['dynamic'] is not None: for key in ('address', 'network_value'): data.pop(key, None) if data['primary_mgt']: # Have to set auth_request to a different interface for DHCP data['auth_request'] = False if data.get('dynamic_index', None) is None: data['dynamic_index'] = 1 elif data.get('automatic_default_route') is None: data.update(automatic_default_route=True) return cls(data)
:param int interface_id: interface id :param str address: address of this interface :param str network_value: network of this interface in cidr x.x.x.x/24 :param int nodeid: if a cluster, identifies which node this is for :rtype: dict
def get_scanner_param_mandatory(self, param): """ Returns if a scanner parameter is mandatory. """ assert isinstance(param, str) entry = self.scanner_params.get(param) if not entry: return False return entry.get('mandatory')
Returns if a scanner parameter is mandatory.
def search_subscriptions(self, **kwargs): """ Search for all subscriptions by parameters """ params = [(key, kwargs[key]) for key in sorted(kwargs.keys())] url = "/notification/v1/subscription?{}".format( urlencode(params, doseq=True)) response = NWS_DAO().getURL(url, self._read_headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) subscriptions = [] for datum in data.get("Subscriptions", []): subscriptions.append(self._subscription_from_json(datum)) return subscriptions
Search for all subscriptions by parameters
def check(text): """Suggest the preferred forms.""" err = "pinker.latin" msg = "Use English. '{}' is the preferred form." list = [ ["other things being equal", ["ceteris paribus"]], ["among other things", ["inter alia"]], ["in and of itself", ["simpliciter"]], ["having made the necessary changes", ["mutatis mutandis"]], ] return preferred_forms_check(text, list, err, msg)
Suggest the preferred forms.
def BSR_Get_Row(A, i): """Return row i in BSR matrix A. Only nonzero entries are returned Parameters ---------- A : bsr_matrix Input matrix i : int Row number Returns ------- z : array Actual nonzero values for row i colindx Array of column indices for the nonzeros of row i Examples -------- >>> from numpy import array >>> from scipy.sparse import bsr_matrix >>> from pyamg.util.BSR_utils import BSR_Get_Row >>> indptr = array([0,2,3,6]) >>> indices = array([0,2,2,0,1,2]) >>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2) >>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) ) >>> Brow = BSR_Get_Row(B,2) >>> print Brow[1] [4 5] """ blocksize = A.blocksize[0] BlockIndx = int(i/blocksize) rowstart = A.indptr[BlockIndx] rowend = A.indptr[BlockIndx+1] localRowIndx = i % blocksize # Get z indys = A.data[rowstart:rowend, localRowIndx, :].nonzero() z = A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] colindx = np.zeros((1, z.__len__()), dtype=np.int32) counter = 0 for j in range(rowstart, rowend): coloffset = blocksize*A.indices[j] indys = A.data[j, localRowIndx, :].nonzero()[0] increment = indys.shape[0] colindx[0, counter:(counter+increment)] = coloffset + indys counter += increment return np.mat(z).T, colindx[0, :]
Return row i in BSR matrix A. Only nonzero entries are returned Parameters ---------- A : bsr_matrix Input matrix i : int Row number Returns ------- z : array Actual nonzero values for row i colindx Array of column indices for the nonzeros of row i Examples -------- >>> from numpy import array >>> from scipy.sparse import bsr_matrix >>> from pyamg.util.BSR_utils import BSR_Get_Row >>> indptr = array([0,2,3,6]) >>> indices = array([0,2,2,0,1,2]) >>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2) >>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) ) >>> Brow = BSR_Get_Row(B,2) >>> print Brow[1] [4 5]
def get_t_factor(t1, t2): """Time difference between two datetimes, expressed as decimal year """ t_factor = None if t1 is not None and t2 is not None and t1 != t2: dt = t2 - t1 year = timedelta(days=365.25) t_factor = abs(dt.total_seconds() / year.total_seconds()) return t_factor
Time difference between two datetimes, expressed as decimal year
def CheckCondition(condition, check_object): """Check if a condition matches an object. Args: condition: A string condition e.g. "os == 'Windows'" check_object: Object to validate, e.g. an rdf_client.KnowledgeBase() Returns: True or False depending on whether the condition matches. Raises: ConditionError: If condition is bad. """ try: of = objectfilter.Parser(condition).Parse() compiled_filter = of.Compile(objectfilter.BaseFilterImplementation) return compiled_filter.Matches(check_object) except objectfilter.Error as e: raise ConditionError(e)
Check if a condition matches an object. Args: condition: A string condition e.g. "os == 'Windows'" check_object: Object to validate, e.g. an rdf_client.KnowledgeBase() Returns: True or False depending on whether the condition matches. Raises: ConditionError: If condition is bad.
def print_all(self, out=sys.stdout): """ Prints all of the thread profiler results to a given file. (stdout by default) """ THREAD_FUNC_NAME_LEN = 25 THREAD_NAME_LEN = 13 THREAD_ID_LEN = 15 THREAD_SCHED_CNT_LEN = 10 out.write(CRLF) out.write("name tid ttot scnt") out.write(CRLF) for stat in self: out.write(StatString(stat.name).ltrim(THREAD_NAME_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(stat.id).rtrim(THREAD_ID_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(_fft(stat.ttot)).rtrim(TIME_COLUMN_LEN)) out.write(" " * COLUMN_GAP) out.write(StatString(stat.sched_count).rtrim(THREAD_SCHED_CNT_LEN)) out.write(CRLF)
Prints all of the thread profiler results to a given file. (stdout by default)
def decimal_field_data(field, **kwargs): """ Return random value for DecimalField >>> result = any_form_field(forms.DecimalField(max_value=100, min_value=11, max_digits=4, decimal_places = 2)) >>> type(result) <type 'str'> >>> from decimal import Decimal >>> Decimal(result) >= 11, Decimal(result) <= Decimal('99.99') (True, True) """ min_value = 0 max_value = 10 from django.core.validators import MinValueValidator, MaxValueValidator for elem in field.validators: if isinstance(elem, MinValueValidator): min_value = elem.limit_value if isinstance(elem, MaxValueValidator): max_value = elem.limit_value if (field.max_digits and field.decimal_places): from decimal import Decimal max_value = min(max_value, Decimal('%s.%s' % ('9'*(field.max_digits-field.decimal_places), '9'*field.decimal_places))) min_value = kwargs.get('min_value') or min_value max_value = kwargs.get('max_value') or max_value return str(xunit.any_decimal(min_value=min_value, max_value=max_value, decimal_places = field.decimal_places or 2))
Return random value for DecimalField >>> result = any_form_field(forms.DecimalField(max_value=100, min_value=11, max_digits=4, decimal_places = 2)) >>> type(result) <type 'str'> >>> from decimal import Decimal >>> Decimal(result) >= 11, Decimal(result) <= Decimal('99.99') (True, True)
def raw_sign(message, secret): """Sign a message.""" digest = hmac.new(secret, message, hashlib.sha256).digest() return base64.b64encode(digest)
Sign a message.
def get_history_tags(self, exp, rep=0): """ returns all available tags (logging keys) of the given experiment repetition. Note: Technically, each repetition could have different tags, therefore the rep number can be passed in as parameter, even though usually all repetitions have the same tags. The default repetition is 0 and in most cases, can be omitted. """ history = self.get_history(exp, rep, 'all') return history.keys()
returns all available tags (logging keys) of the given experiment repetition. Note: Technically, each repetition could have different tags, therefore the rep number can be passed in as parameter, even though usually all repetitions have the same tags. The default repetition is 0 and in most cases, can be omitted.
def gen_zonal_stats( vectors, raster, layer=0, band=1, nodata=None, affine=None, stats=None, all_touched=False, categorical=False, category_map=None, add_stats=None, zone_func=None, raster_out=False, prefix=None, geojson_out=False, **kwargs): """Zonal statistics of raster values aggregated to vector geometries. Parameters ---------- vectors: path to an vector source or geo-like python objects raster: ndarray or path to a GDAL raster source If ndarray is passed, the ``affine`` kwarg is required. layer: int or string, optional If `vectors` is a path to an fiona source, specify the vector layer to use either by name or number. defaults to 0 band: int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata: float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. defaults to `None`. affine: Affine instance required only for ndarrays, otherwise it is read from src stats: list of str, or space-delimited str, optional Which statistics to calculate for each zone. All possible choices are listed in ``utils.VALID_STATS``. defaults to ``DEFAULT_STATS``, a subset of these. all_touched: bool, optional Whether to include every raster cell touched by a geometry, or only those having a center point within the polygon. defaults to `False` categorical: bool, optional category_map: dict A dictionary mapping raster values to human-readable categorical names. Only applies when categorical is True add_stats: dict with names and functions of additional stats to compute, optional zone_func: callable function to apply to zone ndarray prior to computing stats raster_out: boolean Include the masked numpy array for each feature?, optional Each feature dictionary will have the following additional keys: mini_raster_array: The clipped and masked numpy array mini_raster_affine: Affine transformation mini_raster_nodata: NoData Value prefix: string add a prefix to the keys (default: None) geojson_out: boolean Return list of GeoJSON-like features (default: False) Original feature geometry and properties will be retained with zonal stats appended as additional properties. Use with `prefix` to ensure unique and meaningful property names. Returns ------- generator of dicts (if geojson_out is False) Each item corresponds to a single vector feature and contains keys for each of the specified stats. generator of geojson features (if geojson_out is True) GeoJSON-like Feature as python dict """ stats, run_count = check_stats(stats, categorical) # Handle 1.0 deprecations transform = kwargs.get('transform') if transform: warnings.warn("GDAL-style transforms will disappear in 1.0. " "Use affine=Affine.from_gdal(*transform) instead", DeprecationWarning) if not affine: affine = Affine.from_gdal(*transform) cp = kwargs.get('copy_properties') if cp: warnings.warn("Use `geojson_out` to preserve feature properties", DeprecationWarning) band_num = kwargs.get('band_num') if band_num: warnings.warn("Use `band` to specify band number", DeprecationWarning) band = band_num with Raster(raster, affine, nodata, band) as rast: features_iter = read_features(vectors, layer) for _, feat in enumerate(features_iter): geom = shape(feat['geometry']) if 'Point' in geom.type: geom = boxify_points(geom, rast) geom_bounds = tuple(geom.bounds) fsrc = rast.read(bounds=geom_bounds) # rasterized geometry rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched) # nodata mask isnodata = (fsrc.array == fsrc.nodata) # add nan mask (if necessary) has_nan = ( np.issubdtype(fsrc.array.dtype, np.floating) and np.isnan(fsrc.array.min())) if has_nan: isnodata = (isnodata | np.isnan(fsrc.array)) # Mask the source data array # mask everything that is not a valid value or not within our geom masked = np.ma.MaskedArray( fsrc.array, mask=(isnodata | ~rv_array)) # If we're on 64 bit platform and the array is an integer type # make sure we cast to 64 bit to avoid overflow. # workaround for https://github.com/numpy/numpy/issues/8433 if sysinfo.platform_bits == 64 and \ masked.dtype != np.int64 and \ issubclass(masked.dtype.type, np.integer): masked = masked.astype(np.int64) # execute zone_func on masked zone ndarray if zone_func is not None: if not callable(zone_func): raise TypeError(('zone_func must be a callable ' 'which accepts function a ' 'single `zone_array` arg.')) zone_func(masked) if masked.compressed().size == 0: # nothing here, fill with None and move on feature_stats = dict([(stat, None) for stat in stats]) if 'count' in stats: # special case, zero makes sense here feature_stats['count'] = 0 else: if run_count: keys, counts = np.unique(masked.compressed(), return_counts=True) pixel_count = dict(zip([np.asscalar(k) for k in keys], [np.asscalar(c) for c in counts])) if categorical: feature_stats = dict(pixel_count) if category_map: feature_stats = remap_categories(category_map, feature_stats) else: feature_stats = {} if 'min' in stats: feature_stats['min'] = float(masked.min()) if 'max' in stats: feature_stats['max'] = float(masked.max()) if 'mean' in stats: feature_stats['mean'] = float(masked.mean()) if 'count' in stats: feature_stats['count'] = int(masked.count()) # optional if 'sum' in stats: feature_stats['sum'] = float(masked.sum()) if 'std' in stats: feature_stats['std'] = float(masked.std()) if 'median' in stats: feature_stats['median'] = float(np.median(masked.compressed())) if 'majority' in stats: feature_stats['majority'] = float(key_assoc_val(pixel_count, max)) if 'minority' in stats: feature_stats['minority'] = float(key_assoc_val(pixel_count, min)) if 'unique' in stats: feature_stats['unique'] = len(list(pixel_count.keys())) if 'range' in stats: try: rmin = feature_stats['min'] except KeyError: rmin = float(masked.min()) try: rmax = feature_stats['max'] except KeyError: rmax = float(masked.max()) feature_stats['range'] = rmax - rmin for pctile in [s for s in stats if s.startswith('percentile_')]: q = get_percentile(pctile) pctarr = masked.compressed() feature_stats[pctile] = np.percentile(pctarr, q) if 'nodata' in stats or 'nan' in stats: featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array)) if 'nodata' in stats: feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum()) if 'nan' in stats: feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0 if add_stats is not None: for stat_name, stat_func in add_stats.items(): feature_stats[stat_name] = stat_func(masked) if raster_out: feature_stats['mini_raster_array'] = masked feature_stats['mini_raster_affine'] = fsrc.affine feature_stats['mini_raster_nodata'] = fsrc.nodata if prefix is not None: prefixed_feature_stats = {} for key, val in feature_stats.items(): newkey = "{}{}".format(prefix, key) prefixed_feature_stats[newkey] = val feature_stats = prefixed_feature_stats if geojson_out: for key, val in feature_stats.items(): if 'properties' not in feat: feat['properties'] = {} feat['properties'][key] = val yield feat else: yield feature_stats
Zonal statistics of raster values aggregated to vector geometries. Parameters ---------- vectors: path to an vector source or geo-like python objects raster: ndarray or path to a GDAL raster source If ndarray is passed, the ``affine`` kwarg is required. layer: int or string, optional If `vectors` is a path to an fiona source, specify the vector layer to use either by name or number. defaults to 0 band: int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata: float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. defaults to `None`. affine: Affine instance required only for ndarrays, otherwise it is read from src stats: list of str, or space-delimited str, optional Which statistics to calculate for each zone. All possible choices are listed in ``utils.VALID_STATS``. defaults to ``DEFAULT_STATS``, a subset of these. all_touched: bool, optional Whether to include every raster cell touched by a geometry, or only those having a center point within the polygon. defaults to `False` categorical: bool, optional category_map: dict A dictionary mapping raster values to human-readable categorical names. Only applies when categorical is True add_stats: dict with names and functions of additional stats to compute, optional zone_func: callable function to apply to zone ndarray prior to computing stats raster_out: boolean Include the masked numpy array for each feature?, optional Each feature dictionary will have the following additional keys: mini_raster_array: The clipped and masked numpy array mini_raster_affine: Affine transformation mini_raster_nodata: NoData Value prefix: string add a prefix to the keys (default: None) geojson_out: boolean Return list of GeoJSON-like features (default: False) Original feature geometry and properties will be retained with zonal stats appended as additional properties. Use with `prefix` to ensure unique and meaningful property names. Returns ------- generator of dicts (if geojson_out is False) Each item corresponds to a single vector feature and contains keys for each of the specified stats. generator of geojson features (if geojson_out is True) GeoJSON-like Feature as python dict
def batch( self, owner, action=None, attribute_write_type=None, halt_on_error=False, playbook_triggers_enabled=None, ): """Return instance of Batch""" from .tcex_ti_batch import TcExBatch return TcExBatch( self, owner, action, attribute_write_type, halt_on_error, playbook_triggers_enabled )
Return instance of Batch
def list_(env=None, user=None): """ List the installed packages on an environment Returns ------- Dictionary: {package: {version: 1.0.0, build: 1 } ... } """ cmd = _create_conda_cmd('list', args=['--json'], env=env, user=user) ret = _execcmd(cmd, user=user) if ret['retcode'] == 0: pkg_list = json.loads(ret['stdout']) packages = {} for pkg in pkg_list: pkg_info = pkg.split('-') name, version, build = '-'.join(pkg_info[:-2]), pkg_info[-2], pkg_info[-1] packages[name] = {'version': version, 'build': build} return packages else: return ret
List the installed packages on an environment Returns ------- Dictionary: {package: {version: 1.0.0, build: 1 } ... }
def construct_all(templates, **unbound_var_values): """Constructs all the given templates in a single pass without redundancy. This is useful when the templates have a common substructure and you want the smallest possible graph. Args: templates: A sequence of templates. **unbound_var_values: The unbound_var values to replace. Returns: A list of results corresponding to templates. Raises: TypeError: If any value in templates is unsupported. ValueError: If the unbound_var values specified are not complete or contain unknown values. """ def _merge_dicts(src, dst): for k, v in six.iteritems(src): if dst.get(k, v) != v: raise ValueError('Conflicting values bound for %s: %s and %s' % (k, v, dst[k])) else: dst[k] = v # pylint: disable=protected-access all_unbound_vars = {} context = {} for x in templates: if isinstance(x, _DeferredLayer): _merge_unbound_var_dicts(x.unbound_vars, all_unbound_vars) _merge_dicts(x._partial_context, context) else: raise TypeError('Unexpected type: %s' % type(x)) _merge_dicts( _assign_values_to_unbound_vars(all_unbound_vars, unbound_var_values), context) # We need to create a result of known size to avoid client pylint errors. result = list(templates) for i, x in enumerate(result): if isinstance(x, _DeferredLayer): result[i] = x._construct(context) return result
Constructs all the given templates in a single pass without redundancy. This is useful when the templates have a common substructure and you want the smallest possible graph. Args: templates: A sequence of templates. **unbound_var_values: The unbound_var values to replace. Returns: A list of results corresponding to templates. Raises: TypeError: If any value in templates is unsupported. ValueError: If the unbound_var values specified are not complete or contain unknown values.
def async_func(self, function): """Decorator for let a normal function return the NewFuture""" @wraps(function) def wrapped(*args, **kwargs): return self.submit(function, *args, **kwargs) return wrapped
Decorator for let a normal function return the NewFuture
def within_bounding_box(self, limits): """ Selects the earthquakes within a bounding box. :parameter limits: A list or a numpy array with four elements in the following order: - min x (longitude) - min y (latitude) - max x (longitude) - max y (latitude) :returns: Returns a :class:htmk.seismicity.catalogue.Catalogue` instance """ is_valid = np.logical_and( self.catalogue.data['longitude'] >= limits[0], np.logical_and(self.catalogue.data['longitude'] <= limits[2], np.logical_and( self.catalogue.data['latitude'] >= limits[1], self.catalogue.data['latitude'] <= limits[3]))) return self.select_catalogue(is_valid)
Selects the earthquakes within a bounding box. :parameter limits: A list or a numpy array with four elements in the following order: - min x (longitude) - min y (latitude) - max x (longitude) - max y (latitude) :returns: Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
def read_rle_bit_packed_hybrid(file_obj, width, length=None): """Read values from `fo` using the rel/bit-packed hybrid encoding. If length is not specified, then a 32-bit int is read first to grab the length of the encoded data. """ debug_logging = logger.isEnabledFor(logging.DEBUG) io_obj = file_obj if length is None: length = read_plain_int32(file_obj, 1)[0] raw_bytes = file_obj.read(length) if raw_bytes == b'': return None io_obj = io.BytesIO(raw_bytes) res = [] while io_obj.tell() < length: header = read_unsigned_var_int(io_obj) if header & 1 == 0: res += read_rle(io_obj, header, width, debug_logging) else: res += read_bitpacked(io_obj, header, width, debug_logging) return res
Read values from `fo` using the rel/bit-packed hybrid encoding. If length is not specified, then a 32-bit int is read first to grab the length of the encoded data.
def tyn_calus_scaling(target, DABo, To, mu_o, viscosity='pore.viscosity', temperature='pore.temperature'): r""" Uses Tyn_Calus model to adjust a diffusion coeffciient for liquids from reference conditions to conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions mu_o, To : float, array_like Viscosity & temperature at reference conditions, respectively pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K) """ Ti = target[temperature] mu_i = target[viscosity] value = DABo*(Ti/To)*(mu_o/mu_i) return value
r""" Uses Tyn_Calus model to adjust a diffusion coeffciient for liquids from reference conditions to conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions mu_o, To : float, array_like Viscosity & temperature at reference conditions, respectively pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K)
def recache(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ o=i.get('out','') # Listing all repos r=ck.access({'action':'list', 'module_uoa':ck.cfg['repo_name']}) if r['return']>0: return r l=r['lst'] cru={} cri={} # Processing repos # We need 2 passes (if some repos such as remote ones are referenced inside new repos) for ps in [0,1]: for q in l: if ps==0 or (ps==1 and q.get('processed','')!='yes'): ruoa=q['repo_uoa'] muoa=q['module_uoa'] duoa=q['data_uoa'] duid=q['data_uid'] # First try to load from cache to check that not remote ... remote=False rx=ck.load_repo_info_from_cache({'repo_uoa':duoa}) if rx['return']==0: rd=rx.get('dict',{}) if rd.get('remote','')=='yes': remote=True if not remote: if duid==ck.cfg['repo_uid_default'] or duid==ck.cfg['repo_uid_local']: if o=='con': ck.out('Skipping repo '+duoa+' ...') else: if o=='con': ck.out('Processing repo '+duoa+' ...') # Repo dictionary (may be changing in .ckr.json) dt={} # Find real repo and get .ckr.json rx=ck.access({'action':'where', 'module_uoa':muoa, 'data_uoa':duoa}) if rx['return']==0: pckr=os.path.join(rx['path'], ck.cfg['repo_file']) if os.path.isfile(pckr): rx=ck.load_json_file({'json_file':pckr}) if rx['return']>0: return rx dt=rx['dict']['dict'] # Load extra info repo (do not use repo, since may not exist in cache) rx=ck.access({'action':'load', 'module_uoa':muoa, 'data_uoa':duoa}) if rx['return']>0: if ps==0: continue else: return rx if len(dt)==0: dt=rx['dict'] else: if rx['dict'].get('path','')!='': dt['path']=rx['dict']['path'] dname=rx['data_name'] dalias=rx['data_alias'] dp=rx['path'] if duoa!=duid: cru[duoa]=duid dd={'dict':dt} dd['data_uid']=duid dd['data_uoa']=duoa dd['data_alias']=dalias dd['data_name']=dname dd['path_to_repo_desc']=dp cri[duid]=dd q['processed']='yes' # Recording ck.cache_repo_uoa=cru ck.cache_repo_info=cri rx=ck.save_repo_cache({}) if rx['return']>0: return rx rx=ck.reload_repo_cache({'force':'yes'}) if rx['return']>0: return rx if o=='con': ck.out('') ck.out('Repositories were successfully recached!') return {'return':0}
Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(TSDBHandler, self).get_default_config_help() config.update({ 'host': '', 'port': '', 'timeout': '', 'tags': '', 'prefix': '', 'batch': '', 'compression': '', 'user': '', 'password': '', 'cleanMetrics': True, 'skipAggregates': True, }) return config
Returns the help text for the configuration options for this handler
def insertBulkBlock(self, blockDump): """ API to insert a bulk block :param blockDump: Output of the block dump command, example can be found in https://svnweb.cern.ch/trac/CMSDMWM/browser/DBS/trunk/Client/tests/dbsclient_t/unittests/blockdump.dict :type blockDump: dict """ #We first check if the first lumi section has event_count or not frst = True if (blockDump['files'][0]['file_lumi_list'][0]).get('event_count') == None: frst = False # when frst == True, we are looking for event_count == None in the data, if we did not find None (redFlg = False), # eveything is good. Otherwise, we have to remove all even_count in lumis and raise exception. # when frst == False, weare looking for event_count != None in the data, if we did not find Not None (redFlg = False), # everything is good. Otherwise, we have to remove all even_count in lumis and raise exception. redFlag = False if frst == True: eventCT = (fl.get('event_count') == None for f in blockDump['files'] for fl in f['file_lumi_list']) else: eventCT = (fl.get('event_count') != None for f in blockDump['files'] for fl in f['file_lumi_list']) redFlag = any(eventCT) if redFlag: for f in blockDump['files']: for fl in f['file_lumi_list']: if 'event_count' in fl: del fl['event_count'] result = self.__callServer("bulkblocks", data=blockDump, callmethod='POST' ) if redFlag: raise dbsClientException("Mixed event_count per lumi in the block: %s" %blockDump['block']['block_name'], "The block was inserted into DBS, but you need to check if the data is valid.") else: return result
API to insert a bulk block :param blockDump: Output of the block dump command, example can be found in https://svnweb.cern.ch/trac/CMSDMWM/browser/DBS/trunk/Client/tests/dbsclient_t/unittests/blockdump.dict :type blockDump: dict
def _set_loam_show_debug_information(self, v, load=False): """ Setter method for loam_show_debug_information, mapped from YANG variable /loam_show_debug_state/loam_show_debug_information (container) If this variable is read-only (config: false) in the source YANG file, then _set_loam_show_debug_information is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_loam_show_debug_information() directly. YANG Description: LINK-OAM debug Information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=loam_show_debug_information.loam_show_debug_information, is_container='container', presence=False, yang_name="loam-show-debug-information", rest_name="loam-show-debug-information", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-loam-show-debug-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """loam_show_debug_information must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=loam_show_debug_information.loam_show_debug_information, is_container='container', presence=False, yang_name="loam-show-debug-information", rest_name="loam-show-debug-information", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-loam-show-debug-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=False)""", }) self.__loam_show_debug_information = t if hasattr(self, '_set'): self._set()
Setter method for loam_show_debug_information, mapped from YANG variable /loam_show_debug_state/loam_show_debug_information (container) If this variable is read-only (config: false) in the source YANG file, then _set_loam_show_debug_information is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_loam_show_debug_information() directly. YANG Description: LINK-OAM debug Information
def get_kernel_spec(self, kernel_name): """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises :exc:`NoSuchKernel` if the given kernel name is not found. """ try: return super(EnvironmentKernelSpecManager, self).get_kernel_spec(kernel_name) except (NoSuchKernel, FileNotFoundError): venv_kernel_name = kernel_name.lower() specs = self.get_all_kernel_specs_for_envs() if venv_kernel_name in specs: return specs[venv_kernel_name] else: raise NoSuchKernel(kernel_name)
Returns a :class:`KernelSpec` instance for the given kernel_name. Raises :exc:`NoSuchKernel` if the given kernel name is not found.
def touch(self, filepath): """Touches the specified file so that its modified time changes.""" if self.is_ssh(filepath): self._check_ssh() remotepath = self._get_remote(filepath) stdin, stdout, stderr = self.ssh.exec_command("touch {}".format(remotepath)) stdin.close() else: os.system("touch {}".format(filepath))
Touches the specified file so that its modified time changes.
def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of): """ Helper function to warn about unknown bytes found in the file""" diff = expected_pos - self.stream.tell() if diff != 0: logger.warning( "There are {} bytes between {} and {}".format(diff, end_of, start_of) )
Helper function to warn about unknown bytes found in the file
def get_cso_dataframe(self): """ get a dataframe of composite observation sensitivity, as returned by PEST in the seo file. Note that this formulation deviates slightly from the PEST documentation in that the values are divided by (npar-1) rather than by (npar). The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1) Returns: cso : pandas.DataFrame """ assert self.jco is not None assert self.pst is not None weights = self.pst.observation_data.loc[self.jco.to_dataframe().index,"weight"].copy().values cso = np.diag(np.sqrt((self.qhalfx.x.dot(self.qhalfx.x.T))))/(float(self.pst.npar-1)) cso_df = pd.DataFrame.from_dict({'obnme':self.jco.to_dataframe().index,'cso':cso}) cso_df.index=cso_df['obnme'] cso_df.drop('obnme', axis=1, inplace=True) return cso_df
get a dataframe of composite observation sensitivity, as returned by PEST in the seo file. Note that this formulation deviates slightly from the PEST documentation in that the values are divided by (npar-1) rather than by (npar). The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1) Returns: cso : pandas.DataFrame
def daemons(self): """ List of daemons for this module """ for attr in dir(self): field = getattr(self, attr) if isinstance(field, Daemon): yield field
List of daemons for this module
def _create_packet(self, request): """Create a formatted packet from a request. :type request: str :param request: Formatted zabbix request :rtype: str :return: Data packet for zabbix """ data_len = struct.pack('<Q', len(request)) packet = b'ZBXD\x01' + data_len + request def ord23(x): if not isinstance(x, int): return ord(x) else: return x logger.debug('Packet [str]: %s', packet) logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet)) return packet
Create a formatted packet from a request. :type request: str :param request: Formatted zabbix request :rtype: str :return: Data packet for zabbix
def validate(self, value): """ Accepts: str, unicode Returns: list of tuples in the format (ip, port) """ val = super(SlavesValue, self).validate(value) slaves = val.replace(" ", "") slaves = filter(None, slaves.split(',')) slaves = [x.split(":") for x in slaves] res = list() for x in slaves: self._validate_ip(x[0]) if len(x) == 1: res.append((x[0], 53)) else: res.append((x[0], int(x[1]))) return res
Accepts: str, unicode Returns: list of tuples in the format (ip, port)
def stmt_lambda_proc(self, inputstring, **kwargs): """Add statement lambda definitions.""" regexes = [] for i in range(len(self.stmt_lambdas)): name = self.stmt_lambda_name(i) regex = compile_regex(r"\b%s\b" % (name,)) regexes.append(regex) out = [] for line in inputstring.splitlines(): for i, regex in enumerate(regexes): if regex.search(line): indent, line = split_leading_indent(line) out.append(indent + self.stmt_lambdas[i]) out.append(line) return "\n".join(out)
Add statement lambda definitions.
def play_song(self, song, tempo=120, delay=0.05): """ Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration. It supports symbolic notes (e.g. ``A4``, ``D#3``, ``Gb5``) and durations (e.g. ``q``, ``h``). For an exhaustive list of accepted note symbols and values, have a look at the ``_NOTE_FREQUENCIES`` and ``_NOTE_VALUES`` private dictionaries in the source code. The value can be suffixed by modifiers: - a *divider* introduced by a ``/`` to obtain triplets for instance (e.g. ``q/3`` for a triplet of eight note) - a *multiplier* introduced by ``*`` (e.g. ``*1.5`` is a dotted note). Shortcuts exist for common modifiers: - ``3`` produces a triplet member note. For instance `e3` gives a triplet of eight notes, i.e. 3 eight notes in the duration of a single quarter. You must ensure that 3 triplets notes are defined in sequence to match the count, otherwise the result will not be the expected one. - ``.`` produces a dotted note, i.e. which duration is one and a half the base one. Double dots are not currently supported. Example:: >>> # A long time ago in a galaxy far, >>> # far away... >>> Sound.play_song(( >>> ('D4', 'e3'), # intro anacrouse >>> ('D4', 'e3'), >>> ('D4', 'e3'), >>> ('G4', 'h'), # meas 1 >>> ('D5', 'h'), >>> ('C5', 'e3'), # meas 2 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 3 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 4 >>> ('B4', 'e3'), >>> ('C5', 'e3'), >>> ('A4', 'h.'), >>> )) .. important:: Only 4/4 signature songs are supported with respect to note durations. :param iterable[tuple(string, string)] song: the song :param int tempo: the song tempo, given in quarters per minute :param float delay: delay between notes (in seconds) :return: the spawn subprocess from ``subprocess.Popen`` :raises ValueError: if invalid note in song or invalid play parameters """ if tempo <= 0: raise ValueError('invalid tempo (%s)' % tempo) if delay < 0: raise ValueError('invalid delay (%s)' % delay) delay_ms = int(delay * 1000) meas_duration_ms = 60000 / tempo * 4 # we only support 4/4 bars, hence "* 4" def beep_args(note, value): """ Builds the arguments string for producing a beep matching the requested note and value. Args: note (str): the note note and octave value (str): the note value expression Returns: str: the arguments to be passed to the beep command """ freq = self._NOTE_FREQUENCIES.get(note.upper(), self._NOTE_FREQUENCIES[note]) if '/' in value: base, factor = value.split('/') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] / float(factor) elif '*' in value: base, factor = value.split('*') duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * float(factor) elif value.endswith('.'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 1.5 elif value.endswith('3'): base = value[:-1] duration_ms = meas_duration_ms * self._NOTE_VALUES[base] * 2 / 3 else: duration_ms = meas_duration_ms * self._NOTE_VALUES[value] return '-f %d -l %d -D %d' % (freq, duration_ms, delay_ms) try: return self.beep(' -n '.join( [beep_args(note, value) for (note, value) in song] )) except KeyError as e: raise ValueError('invalid note (%s)' % e)
Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration. It supports symbolic notes (e.g. ``A4``, ``D#3``, ``Gb5``) and durations (e.g. ``q``, ``h``). For an exhaustive list of accepted note symbols and values, have a look at the ``_NOTE_FREQUENCIES`` and ``_NOTE_VALUES`` private dictionaries in the source code. The value can be suffixed by modifiers: - a *divider* introduced by a ``/`` to obtain triplets for instance (e.g. ``q/3`` for a triplet of eight note) - a *multiplier* introduced by ``*`` (e.g. ``*1.5`` is a dotted note). Shortcuts exist for common modifiers: - ``3`` produces a triplet member note. For instance `e3` gives a triplet of eight notes, i.e. 3 eight notes in the duration of a single quarter. You must ensure that 3 triplets notes are defined in sequence to match the count, otherwise the result will not be the expected one. - ``.`` produces a dotted note, i.e. which duration is one and a half the base one. Double dots are not currently supported. Example:: >>> # A long time ago in a galaxy far, >>> # far away... >>> Sound.play_song(( >>> ('D4', 'e3'), # intro anacrouse >>> ('D4', 'e3'), >>> ('D4', 'e3'), >>> ('G4', 'h'), # meas 1 >>> ('D5', 'h'), >>> ('C5', 'e3'), # meas 2 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 3 >>> ('B4', 'e3'), >>> ('A4', 'e3'), >>> ('G5', 'h'), >>> ('D5', 'q'), >>> ('C5', 'e3'), # meas 4 >>> ('B4', 'e3'), >>> ('C5', 'e3'), >>> ('A4', 'h.'), >>> )) .. important:: Only 4/4 signature songs are supported with respect to note durations. :param iterable[tuple(string, string)] song: the song :param int tempo: the song tempo, given in quarters per minute :param float delay: delay between notes (in seconds) :return: the spawn subprocess from ``subprocess.Popen`` :raises ValueError: if invalid note in song or invalid play parameters
async def logs( self, service_id: str, *, details: bool = False, follow: bool = False, stdout: bool = False, stderr: bool = False, since: int = 0, timestamps: bool = False, is_tty: bool = False, tail: str = "all" ) -> Union[str, AsyncIterator[str]]: """ Retrieve logs of the given service Args: details: show service context and extra details provided to logs follow: return the logs as a stream. stdout: return logs from stdout stderr: return logs from stderr since: return logs since this time, as a UNIX timestamp timestamps: add timestamps to every log line is_tty: the service has a pseudo-TTY allocated tail: only return this number of log lines from the end of the logs, specify as an integer or `all` to output all log lines. """ if stdout is False and stderr is False: raise TypeError("Need one of stdout or stderr") params = { "details": details, "follow": follow, "stdout": stdout, "stderr": stderr, "since": since, "timestamps": timestamps, "tail": tail, } response = await self.docker._query( "services/{service_id}/logs".format(service_id=service_id), method="GET", params=params, ) return await multiplexed_result(response, follow, is_tty=is_tty)
Retrieve logs of the given service Args: details: show service context and extra details provided to logs follow: return the logs as a stream. stdout: return logs from stdout stderr: return logs from stderr since: return logs since this time, as a UNIX timestamp timestamps: add timestamps to every log line is_tty: the service has a pseudo-TTY allocated tail: only return this number of log lines from the end of the logs, specify as an integer or `all` to output all log lines.
def min_geodetic_distance(a, b): """ Compute the minimum distance between first mesh and each point of the second mesh when both are defined on the earth surface. :param a: a pair of (lons, lats) or an array of cartesian coordinates :param b: a pair of (lons, lats) or an array of cartesian coordinates """ if isinstance(a, tuple): a = spherical_to_cartesian(a[0].flatten(), a[1].flatten()) if isinstance(b, tuple): b = spherical_to_cartesian(b[0].flatten(), b[1].flatten()) return cdist(a, b).min(axis=0)
Compute the minimum distance between first mesh and each point of the second mesh when both are defined on the earth surface. :param a: a pair of (lons, lats) or an array of cartesian coordinates :param b: a pair of (lons, lats) or an array of cartesian coordinates
def webify(v: Any, preserve_newlines: bool = True) -> str: """ Converts a value into an HTML-safe ``str`` (formerly, in Python 2: ``unicode``). Converts value ``v`` to a string; escapes it to be safe in HTML format (escaping ampersands, replacing newlines with ``<br>``, etc.). Returns ``""`` for blank input. """ nl = "<br>" if preserve_newlines else " " if v is None: return "" if not isinstance(v, str): v = str(v) # noinspection PyDeprecation return cgi.escape(v).replace("\n", nl).replace("\\n", nl)
Converts a value into an HTML-safe ``str`` (formerly, in Python 2: ``unicode``). Converts value ``v`` to a string; escapes it to be safe in HTML format (escaping ampersands, replacing newlines with ``<br>``, etc.). Returns ``""`` for blank input.
def calmarnorm(sharpe, T, tau = 1.0): ''' Multiplicator for normalizing calmar ratio to period tau ''' return calmar(sharpe,tau)/calmar(sharpe,T)
Multiplicator for normalizing calmar ratio to period tau
def set_exit_handler(self): """Set the signal handler to manage_signal (defined in this class) Only set handlers for signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2 :return: None """ signal.signal(signal.SIGINT, self.manage_signal) signal.signal(signal.SIGTERM, self.manage_signal) signal.signal(signal.SIGHUP, self.manage_signal) signal.signal(signal.SIGQUIT, self.manage_signal)
Set the signal handler to manage_signal (defined in this class) Only set handlers for signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2 :return: None
def format_name(format, name, target_type, prop_set): """ Given a target, as given to a custom tag rule, returns a string formatted according to the passed format. Format is a list of properties that is represented in the result. For each element of format the corresponding target information is obtained and added to the result string. For all, but the literal, the format value is taken as the as string to prepend to the output to join the item to the rest of the result. If not given "-" is used as a joiner. The format options can be: <base>[joiner] :: The basename of the target name. <toolset>[joiner] :: The abbreviated toolset tag being used to build the target. <threading>[joiner] :: Indication of a multi-threaded build. <runtime>[joiner] :: Collective tag of the build runtime. <version:/version-feature | X.Y[.Z]/>[joiner] :: Short version tag taken from the given "version-feature" in the build properties. Or if not present, the literal value as the version number. <property:/property-name/>[joiner] :: Direct lookup of the given property-name value in the build properties. /property-name/ is a regular expression. e.g. <property:toolset-.*:flavor> will match every toolset. /otherwise/ :: The literal value of the format argument. For example this format: boost_ <base> <toolset> <threading> <runtime> <version:boost-version> Might return: boost_thread-vc80-mt-gd-1_33.dll, or boost_regex-vc80-gd-1_33.dll The returned name also has the target type specific prefix and suffix which puts it in a ready form to use as the value from a custom tag rule. """ if __debug__: from ..build.property_set import PropertySet assert is_iterable_typed(format, basestring) assert isinstance(name, basestring) assert isinstance(target_type, basestring) assert isinstance(prop_set, PropertySet) # assert(isinstance(prop_set, property_set.PropertySet)) if type.is_derived(target_type, 'LIB'): result = "" ; for f in format: grist = get_grist(f) if grist == '<base>': result += os.path.basename(name) elif grist == '<toolset>': result += join_tag(get_value(f), toolset_tag(name, target_type, prop_set)) elif grist == '<threading>': result += join_tag(get_value(f), threading_tag(name, target_type, prop_set)) elif grist == '<runtime>': result += join_tag(get_value(f), runtime_tag(name, target_type, prop_set)) elif grist.startswith('<version:'): key = grist[len('<version:'):-1] version = prop_set.get('<' + key + '>') if not version: version = key version = __re_version.match(version) result += join_tag(get_value(f), version[1] + '_' + version[2]) elif grist.startswith('<property:'): key = grist[len('<property:'):-1] property_re = re.compile('<(' + key + ')>') p0 = None for prop in prop_set.raw(): match = property_re.match(prop) if match: p0 = match[1] break if p0: p = prop_set.get('<' + p0 + '>') if p: assert(len(p) == 1) result += join_tag(ungrist(f), p) else: result += f result = b2.build.virtual_target.add_prefix_and_suffix( ''.join(result), target_type, prop_set) return result
Given a target, as given to a custom tag rule, returns a string formatted according to the passed format. Format is a list of properties that is represented in the result. For each element of format the corresponding target information is obtained and added to the result string. For all, but the literal, the format value is taken as the as string to prepend to the output to join the item to the rest of the result. If not given "-" is used as a joiner. The format options can be: <base>[joiner] :: The basename of the target name. <toolset>[joiner] :: The abbreviated toolset tag being used to build the target. <threading>[joiner] :: Indication of a multi-threaded build. <runtime>[joiner] :: Collective tag of the build runtime. <version:/version-feature | X.Y[.Z]/>[joiner] :: Short version tag taken from the given "version-feature" in the build properties. Or if not present, the literal value as the version number. <property:/property-name/>[joiner] :: Direct lookup of the given property-name value in the build properties. /property-name/ is a regular expression. e.g. <property:toolset-.*:flavor> will match every toolset. /otherwise/ :: The literal value of the format argument. For example this format: boost_ <base> <toolset> <threading> <runtime> <version:boost-version> Might return: boost_thread-vc80-mt-gd-1_33.dll, or boost_regex-vc80-gd-1_33.dll The returned name also has the target type specific prefix and suffix which puts it in a ready form to use as the value from a custom tag rule.
def loadStructuredPoints(filename): """Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object. .. hint:: |readStructuredPoints| |readStructuredPoints.py|_ """ reader = vtk.vtkStructuredPointsReader() reader.SetFileName(filename) reader.Update() gf = vtk.vtkImageDataGeometryFilter() gf.SetInputConnection(reader.GetOutputPort()) gf.Update() return Actor(gf.GetOutput())
Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object. .. hint:: |readStructuredPoints| |readStructuredPoints.py|_
def addSibling(self, elem): """Add a new element @elem to the list of siblings of @cur merging adjacent TEXT nodes (@elem may be freed) If the new element was already inserted in a document it is first unlinked from its existing context. """ if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlAddSibling(self._o, elem__o) if ret is None:raise treeError('xmlAddSibling() failed') __tmp = xmlNode(_obj=ret) return __tmp
Add a new element @elem to the list of siblings of @cur merging adjacent TEXT nodes (@elem may be freed) If the new element was already inserted in a document it is first unlinked from its existing context.
def _get_compute_func(self, nmr_samples, thinning, return_output): """Get the MCMC algorithm as a computable function. Args: nmr_samples (int): the number of samples we will draw thinning (int): the thinning factor we want to use return_output (boolean): if the kernel should return output Returns: mot.lib.cl_function.CLFunction: the compute function """ cl_func = ''' void compute(global uint* rng_state, global mot_float_type* current_chain_position, global mot_float_type* current_log_likelihood, global mot_float_type* current_log_prior, ulong iteration_offset, ulong nmr_iterations, ''' + ('''global mot_float_type* samples, global mot_float_type* log_likelihoods, global mot_float_type* log_priors,''' if return_output else '') + ''' void* method_data, void* data){ bool is_first_work_item = get_local_id(0) == 0; rand123_data rand123_rng_data = rand123_initialize_data((uint[]){ rng_state[0], rng_state[1], rng_state[2], rng_state[3], rng_state[4], rng_state[5], 0, 0}); void* rng_data = (void*)&rand123_rng_data; for(ulong i = 0; i < nmr_iterations; i++){ ''' if return_output: cl_func += ''' if(is_first_work_item){ if(i % ''' + str(thinning) + ''' == 0){ log_likelihoods[i / ''' + str(thinning) + '''] = *current_log_likelihood; log_priors[i / ''' + str(thinning) + '''] = *current_log_prior; for(uint j = 0; j < ''' + str(self._nmr_params) + '''; j++){ samples[(ulong)(i / ''' + str(thinning) + ''') // remove the interval + j * ''' + str(nmr_samples) + ''' // parameter index ] = current_chain_position[j]; } } } ''' cl_func += ''' _advanceSampler(method_data, data, i + iteration_offset, rng_data, current_chain_position, current_log_likelihood, current_log_prior); } if(is_first_work_item){ uint state[8]; rand123_data_to_array(rand123_rng_data, state); for(uint i = 0; i < 6; i++){ rng_state[i] = state[i]; } } } ''' return SimpleCLFunction.from_string( cl_func, dependencies=[Rand123(), self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func(), SimpleCLCodeObject(self._get_state_update_cl_func(nmr_samples, thinning, return_output))])
Get the MCMC algorithm as a computable function. Args: nmr_samples (int): the number of samples we will draw thinning (int): the thinning factor we want to use return_output (boolean): if the kernel should return output Returns: mot.lib.cl_function.CLFunction: the compute function
def buildlist(self, enabled): """Run dialog buildlist """ choice = [] for item in self.data: choice.append((item, False)) for item in enabled: choice.append((item, True)) items = [(tag, tag, sta) for (tag, sta) in choice] code, self.tags = self.d.buildlist( text=self.text, items=items, visit_items=True, item_help=False, title=self.title) if code == "ok": self.unicode_to_string() return self.ununicode if code in ["cancel", "esc"]: self.exit()
Run dialog buildlist
def nsamples_to_hourminsec(x, pos): '''Convert axes labels to experiment duration in hours/minutes/seconds Notes ----- Matplotlib FuncFormatter function https://matplotlib.org/examples/pylab_examples/custom_ticker1.html ''' h, m, s = hourminsec(x/16.0) return '{:.0f}h {:2.0f}′ {:2.1f}″'.format(h, m, s)
Convert axes labels to experiment duration in hours/minutes/seconds Notes ----- Matplotlib FuncFormatter function https://matplotlib.org/examples/pylab_examples/custom_ticker1.html
def create_ospf_profile(): """ An OSPF Profile contains administrative distance and redistribution settings. An OSPF Profile is applied at the engine level. When creating an OSPF Profile, you must reference a OSPFDomainSetting. An OSPFDomainSetting holds the settings of the area border router (ABR) type, throttle timer settings, and the max metric router link-state advertisement (LSA) settings. """ OSPFDomainSetting.create(name='custom', abr_type='cisco') ospf_domain = OSPFDomainSetting('custom') # obtain resource ospf_profile = OSPFProfile.create(name='myospfprofile', domain_settings_ref=ospf_domain.href) print(ospf_profile)
An OSPF Profile contains administrative distance and redistribution settings. An OSPF Profile is applied at the engine level. When creating an OSPF Profile, you must reference a OSPFDomainSetting. An OSPFDomainSetting holds the settings of the area border router (ABR) type, throttle timer settings, and the max metric router link-state advertisement (LSA) settings.
def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int """ if not override_rdclass is None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self)
Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int
def add_parameter(self, field_name, param_name, param_value): """ Add a parameter to a field into script_fields The ScriptFields object will be returned, so calls to this can be chained. """ try: self.fields[field_name]['params'][param_name] = param_value except Exception as ex: raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name, param_value, ex)) return self
Add a parameter to a field into script_fields The ScriptFields object will be returned, so calls to this can be chained.
def sendtoaddress(self, recv_addr, amount, comment=""): """send ammount to address, with optional comment. Returns txid. sendtoaddress(ADDRESS, AMMOUNT, COMMENT)""" return self.req("sendtoaddress", [recv_addr, amount, comment])
send ammount to address, with optional comment. Returns txid. sendtoaddress(ADDRESS, AMMOUNT, COMMENT)
def summary(self): """Return a formatted string giving a quick summary of the results.""" res = ("nlive: {:d}\n" "niter: {:d}\n" "ncall: {:d}\n" "eff(%): {:6.3f}\n" "logz: {:6.3f} +/- {:6.3f}" .format(self.nlive, self.niter, sum(self.ncall), self.eff, self.logz[-1], self.logzerr[-1])) print('Summary\n=======\n'+res)
Return a formatted string giving a quick summary of the results.
def get_distribution_path(venv, distribution): ''' Return the path to a distribution installed inside a virtualenv .. versionadded:: 2016.3.0 venv Path to the virtualenv. distribution Name of the distribution. Note, all non-alphanumeric characters will be converted to dashes. CLI Example: .. code-block:: bash salt '*' virtualenv.get_distribution_path /path/to/my/venv my_distribution ''' _verify_safe_py_code(distribution) bin_path = _verify_virtualenv(venv) ret = __salt__['cmd.exec_code_all']( bin_path, 'import pkg_resources; ' "print(pkg_resources.get_distribution('{0}').location)".format( distribution ) ) if ret['retcode'] != 0: raise CommandExecutionError('{stdout}\n{stderr}'.format(**ret)) return ret['stdout']
Return the path to a distribution installed inside a virtualenv .. versionadded:: 2016.3.0 venv Path to the virtualenv. distribution Name of the distribution. Note, all non-alphanumeric characters will be converted to dashes. CLI Example: .. code-block:: bash salt '*' virtualenv.get_distribution_path /path/to/my/venv my_distribution
def _choice_getter(self): """ Return a function object suitable for the "get" side of the property descriptor. """ def get_group_member_element(obj): return obj.first_child_found_in(*self._member_nsptagnames) get_group_member_element.__doc__ = ( 'Return the child element belonging to this element group, or ' '|None| if no member child is present.' ) return get_group_member_element
Return a function object suitable for the "get" side of the property descriptor.
def UpdateAcqEraEndDate(self, acquisition_era_name ="", end_date=0): """ Input dictionary has to have the following keys: acquisition_era_name, end_date. """ if acquisition_era_name =="" or end_date==0: dbsExceptionHandler('dbsException-invalid-input', "acquisition_era_name and end_date are required") conn = self.dbi.connection() tran = conn.begin() try: self.acqud.execute(conn, acquisition_era_name, end_date, tran) if tran:tran.commit() tran = None finally: if tran:tran.rollback() if conn:conn.close()
Input dictionary has to have the following keys: acquisition_era_name, end_date.
def get_repo_parent(path): """ Returns parent repo or input path if none found. :return: grit.Local or path """ # path is a repository if is_repo(path): return Local(path) # path is inside a repository elif not os.path.isdir(path): _rel = '' while path and path != '/': if is_repo(path): return Local(path) else: _rel = os.path.join(os.path.basename(path), _rel) path = os.path.dirname(path) return path
Returns parent repo or input path if none found. :return: grit.Local or path
def send_invoice_email(self, invoice_id, email_dict): """ Sends an invoice by email If you want to send your email to more than one persons do: 'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}} :param invoice_id: the invoice id :param email_dict: the email dict :return dict """ return self._create_post_request( resource=INVOICES, billomat_id=invoice_id, send_data=email_dict, command=EMAIL, )
Sends an invoice by email If you want to send your email to more than one persons do: 'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}} :param invoice_id: the invoice id :param email_dict: the email dict :return dict
def _make_connect(module, args, kwargs): """ Returns a function capable of making connections with a particular driver given the supplied credentials. """ # pylint: disable-msg=W0142 return functools.partial(module.connect, *args, **kwargs)
Returns a function capable of making connections with a particular driver given the supplied credentials.
def _print_stats(self, env, stats): """ Prints statistic information using io stream. `env` ``Environment`` object. `stats` Tuple of task stats for each date. """ def _format_time(mins): """ Generates formatted time string. """ mins = int(mins) if mins < MINS_IN_HOUR: time_str = '0:{0:02}'.format(mins) else: hours = mins // MINS_IN_HOUR mins %= MINS_IN_HOUR if mins > 0: time_str = '{0}:{1:02}'.format(hours, mins) else: time_str = '{0}'.format(hours) return time_str if not stats: env.io.write('No stats found.') return for date, tasks in stats: env.io.write('') total_mins = float(sum(v[1] for v in tasks)) env.io.write('[ {0} ]'.format(date.strftime('%Y-%m-%d'))) env.io.write('') for name, mins in tasks: # format time time_str = _format_time(mins) # generate stat line line = ' {0:>5}'.format(time_str) line += ' ({0:2.0f}%) - '.format(mins * 100.0 / total_mins) if len(name) > 55: name = name[:55] + '...' line += name env.io.write(line) # generate total line env.io.write('_' * len(line)) time_str = _format_time(total_mins) env.io.write(' {0:>5} (total)'.format(time_str)) env.io.write('')
Prints statistic information using io stream. `env` ``Environment`` object. `stats` Tuple of task stats for each date.
def register(self, prefix, viewset, base_name=None): """Add any registered route into a global API directory. If the prefix includes a path separator, store the URL in the directory under the first path segment. Otherwise, store it as-is. For example, if there are two registered prefixes, 'v1/users' and 'groups', `directory` will look liks: { 'v1': { 'users': { '_url': 'users-list' '_viewset': <class 'UserViewSet'> }, } 'groups': { '_url': 'groups-list' '_viewset': <class 'GroupViewSet'> } } """ if base_name is None: base_name = prefix super(DynamicRouter, self).register(prefix, viewset, base_name) prefix_parts = prefix.split('/') if len(prefix_parts) > 1: prefix = prefix_parts[0] endpoint = '/'.join(prefix_parts[1:]) else: endpoint = prefix prefix = None if prefix and prefix not in directory: current = directory[prefix] = {} else: current = directory.get(prefix, directory) list_name = self.routes[0].name url_name = list_name.format(basename=base_name) if endpoint not in current: current[endpoint] = {} current[endpoint]['_url'] = url_name current[endpoint]['_viewset'] = viewset
Add any registered route into a global API directory. If the prefix includes a path separator, store the URL in the directory under the first path segment. Otherwise, store it as-is. For example, if there are two registered prefixes, 'v1/users' and 'groups', `directory` will look liks: { 'v1': { 'users': { '_url': 'users-list' '_viewset': <class 'UserViewSet'> }, } 'groups': { '_url': 'groups-list' '_viewset': <class 'GroupViewSet'> } }
def furthest_from_root(self): '''Return the ``Node`` that is furthest from the root and the corresponding distance. Edges with no length will be considered to have a length of 0 Returns: ``tuple``: First value is the furthest ``Node`` from the root, and second value is the corresponding distance ''' best = (self.root,0); d = dict() for node in self.traverse_preorder(): if node.edge_length is None: d[node] = 0 else: d[node] = node.edge_length if not node.is_root(): d[node] += d[node.parent] if d[node] > best[1]: best = (node,d[node]) return best
Return the ``Node`` that is furthest from the root and the corresponding distance. Edges with no length will be considered to have a length of 0 Returns: ``tuple``: First value is the furthest ``Node`` from the root, and second value is the corresponding distance
def uint64(self, val): """append a frame containing a uint64""" try: self.msg += [pack("!Q", val)] except struct.error: raise ValueError("Expected uint64") return self
append a frame containing a uint64
def add_lat_lon(self, lat, lon, precision=1e7): """Add lat, lon to gps (lat, lon in float).""" self._ef["GPS"][piexif.GPSIFD.GPSLatitudeRef] = "N" if lat > 0 else "S" self._ef["GPS"][piexif.GPSIFD.GPSLongitudeRef] = "E" if lon > 0 else "W" self._ef["GPS"][piexif.GPSIFD.GPSLongitude] = decimal_to_dms( abs(lon), int(precision)) self._ef["GPS"][piexif.GPSIFD.GPSLatitude] = decimal_to_dms( abs(lat), int(precision))
Add lat, lon to gps (lat, lon in float).
def _list_objects(self, client_kwargs, path, max_request_entries): """ Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict """ client_kwargs = client_kwargs.copy() if max_request_entries: client_kwargs['MaxKeys'] = max_request_entries while True: with _handle_client_error(): response = self.client.list_objects_v2( Prefix=path, **client_kwargs) try: for obj in response['Contents']: yield obj.pop('Key'), obj except KeyError: raise _ObjectNotFoundError('Not found: %s' % path) # Handles results on more than one page try: client_kwargs['ContinuationToken'] = response[ 'NextContinuationToken'] except KeyError: # End of results break
Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict
def acquire_read(self): """ Acquire a read lock. Several threads can hold this typeof lock. It is exclusive with write locks. """ self.monitor.acquire() while self.rwlock < 0 or self.writers_waiting: self.readers_ok.wait() self.rwlock += 1 self.monitor.release()
Acquire a read lock. Several threads can hold this typeof lock. It is exclusive with write locks.
def _set_disable_res(self, v, load=False): """ Setter method for disable_res, mapped from YANG variable /ha/process_restart/disable_res (container) If this variable is read-only (config: false) in the source YANG file, then _set_disable_res is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_disable_res() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=disable_res.disable_res, is_container='container', presence=False, yang_name="disable-res", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable process restart for fault recovery', u'cli-compact-syntax': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """disable_res must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=disable_res.disable_res, is_container='container', presence=False, yang_name="disable-res", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable process restart for fault recovery', u'cli-compact-syntax': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='container', is_config=True)""", }) self.__disable_res = t if hasattr(self, '_set'): self._set()
Setter method for disable_res, mapped from YANG variable /ha/process_restart/disable_res (container) If this variable is read-only (config: false) in the source YANG file, then _set_disable_res is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_disable_res() directly.
def connections_from_graph(env, G, edge_data=False): """Create connections for agents in the given environment from the given NetworkX graph structure. :param env: Environment where the agents live. The environment should be derived from :class:`~creamas.core.environment.Environment`, :class:`~creamas.mp.MultiEnvironment` or :class:`~creamas.ds.DistributedEnvironment`. :param G: NetworkX graph structure, either :class:`networkx.graph.Graph` or :class:`networkx.digraph.DiGraph`. The graph needs to have the same number of nodes as the environment has agents (excluding the managers). :param bool edge_data: If ``True``, edge data from the given graph is copied to the agents' :attr:`connections`. .. note:: By design, manager agents are excluded from the connections and should not be counted towards environment's agent count. The created connections are stored in each agent's :attr:`~creamas.core.agent.CreativeAgent.connections` and the possible edge data is stored as key-value pairs in the connection dictionary. The agents are sorted by their environments' hosts and ports before each agent is mapped to a node in **G**. This should cause some network generation methods in NetworkX, e.g. :func:`~networkx.generators.random_graphs.connected_watts_strogatz_graph`, to create more connections between agents in the same environment and/or node when using :class:`~creamas.mp.MultiEnvironment` or :class:`~creamas.ds.DistributedEnvironment`. """ if not issubclass(G.__class__, (Graph, DiGraph)): raise TypeError("Graph structure must be derived from Networkx's " "Graph or DiGraph.") if not hasattr(env, 'get_agents'): raise TypeError("Parameter 'env' must have get_agents.") addrs = env.get_agents(addr=True) if len(addrs) != len(G): raise ValueError("The number of graph nodes and agents in the " "environment (excluding the manager agent) must " "match. Now got {} nodes and {} agents." .format(len(G), len(addrs))) # Sort agent addresses to the order they were added to the environment. addrs = sort_addrs(addrs) _addrs2nodes(addrs, G) conn_map = _edges2conns(G, edge_data) env.create_connections(conn_map)
Create connections for agents in the given environment from the given NetworkX graph structure. :param env: Environment where the agents live. The environment should be derived from :class:`~creamas.core.environment.Environment`, :class:`~creamas.mp.MultiEnvironment` or :class:`~creamas.ds.DistributedEnvironment`. :param G: NetworkX graph structure, either :class:`networkx.graph.Graph` or :class:`networkx.digraph.DiGraph`. The graph needs to have the same number of nodes as the environment has agents (excluding the managers). :param bool edge_data: If ``True``, edge data from the given graph is copied to the agents' :attr:`connections`. .. note:: By design, manager agents are excluded from the connections and should not be counted towards environment's agent count. The created connections are stored in each agent's :attr:`~creamas.core.agent.CreativeAgent.connections` and the possible edge data is stored as key-value pairs in the connection dictionary. The agents are sorted by their environments' hosts and ports before each agent is mapped to a node in **G**. This should cause some network generation methods in NetworkX, e.g. :func:`~networkx.generators.random_graphs.connected_watts_strogatz_graph`, to create more connections between agents in the same environment and/or node when using :class:`~creamas.mp.MultiEnvironment` or :class:`~creamas.ds.DistributedEnvironment`.
def SubtractFromBalance(self, assetId, fixed8_val): """ Subtract amount to the specified balance. Args: assetId (UInt256): fixed8_val (Fixed8): amount to add. """ found = False for key, balance in self.Balances.items(): if key == assetId: self.Balances[assetId] = self.Balances[assetId] - fixed8_val found = True if not found: self.Balances[assetId] = fixed8_val * Fixed8(-1)
Subtract amount to the specified balance. Args: assetId (UInt256): fixed8_val (Fixed8): amount to add.
def load(self, graphic): """ Loads information for this item from the xml data. :param graphic | <XWalkthroughItem> """ for prop in graphic.properties(): key = prop.name() value = prop.value() if key == 'caption': value = projex.wikitext.render(value.strip()) self.setProperty(key, value) for attr, attr_value in prop.attributes().items(): self.setProperty('{0}_{1}'.format(key, attr), attr_value) self.prepare()
Loads information for this item from the xml data. :param graphic | <XWalkthroughItem>
def continue_oauth(self, oauth_callback_data=None): """ Continuation of OAuth procedure. Method must be explicitly called in order to complete OAuth. This allows external entities, e.g. websites, to provide tokens through callback URLs directly. :param oauth_callback_data: The callback URL received to a Web app :type oauth_callback_data: bytes :return: """ self.response_qs = oauth_callback_data if not self.response_qs: webbrowser.open(self.redirect) self.response_qs = input("Callback URL: ") # input the url from redirect after authorization response_qs = self.response_qs.split(b'?')[-1] # Step 3: Complete -- obtain authorized key/secret for "resource owner" access_token = self.handshaker.complete(self.request_token, response_qs) # input the access token to return a csrf (edit) token auth1 = OAuth1(self.consumer_token.key, client_secret=self.consumer_token.secret, resource_owner_key=access_token.key, resource_owner_secret=access_token.secret) self.s.auth = auth1 self.generate_edit_credentials()
Continuation of OAuth procedure. Method must be explicitly called in order to complete OAuth. This allows external entities, e.g. websites, to provide tokens through callback URLs directly. :param oauth_callback_data: The callback URL received to a Web app :type oauth_callback_data: bytes :return:
def search_archives(query): """ Return list of :class:`.DBArchive` which match all properties that are set (``not None``) using AND operator to all of them. Example: result = storage_handler.search_publications( DBArchive(isbn="azgabash") ) Args: query (obj): :class:`.DBArchive` with `some` of the properties set. Returns: list: List of matching :class:`.DBArchive` or ``[]`` if no match \ was found. Raises: InvalidType: When the `query` is not instance of :class:`.DBArchive`. """ _assert_obj_type(query, name="query", obj_type=DBArchive) return _get_handler().search_objects(query)
Return list of :class:`.DBArchive` which match all properties that are set (``not None``) using AND operator to all of them. Example: result = storage_handler.search_publications( DBArchive(isbn="azgabash") ) Args: query (obj): :class:`.DBArchive` with `some` of the properties set. Returns: list: List of matching :class:`.DBArchive` or ``[]`` if no match \ was found. Raises: InvalidType: When the `query` is not instance of :class:`.DBArchive`.
def write_to_socket(self, frame_data): """Write data to the socket. :param str frame_data: :return: """ self._wr_lock.acquire() try: total_bytes_written = 0 bytes_to_send = len(frame_data) while total_bytes_written < bytes_to_send: try: if not self.socket: raise socket.error('connection/socket error') bytes_written = ( self.socket.send(frame_data[total_bytes_written:]) ) if bytes_written == 0: raise socket.error('connection/socket error') total_bytes_written += bytes_written except socket.timeout: pass except socket.error as why: if why.args[0] in (EWOULDBLOCK, EAGAIN): continue self._exceptions.append(AMQPConnectionError(why)) return finally: self._wr_lock.release()
Write data to the socket. :param str frame_data: :return:
def add_listener_policy(self, json_data): """Attaches listerner policies to an ELB Args: json_data (json): return data from ELB upsert """ env = boto3.session.Session(profile_name=self.env, region_name=self.region) elbclient = env.client('elb') # create stickiness policy if set in configs stickiness = {} elb_settings = self.properties['elb'] if elb_settings.get('ports'): ports = elb_settings['ports'] for listener in ports: if listener.get("stickiness"): stickiness = self.add_stickiness() LOG.info('Stickiness Found: %s', stickiness) break # Attach policies to created ELB for job in json.loads(json_data)['job']: for listener in job['listeners']: policies = [] ext_port = listener['externalPort'] if listener['listenerPolicies']: policies.extend(listener['listenerPolicies']) if stickiness.get(ext_port): policies.append(stickiness.get(ext_port)) if policies: LOG.info('Adding listener policies: %s', policies) elbclient.set_load_balancer_policies_of_listener( LoadBalancerName=self.app, LoadBalancerPort=ext_port, PolicyNames=policies)
Attaches listerner policies to an ELB Args: json_data (json): return data from ELB upsert
def _to_dict_fixed_width_arrays(self, var_len_str=True): """A dict of arrays that stores data and annotation. It is sufficient for reconstructing the object. """ self.strings_to_categoricals() obs_rec, uns_obs = df_to_records_fixed_width(self._obs, var_len_str) var_rec, uns_var = df_to_records_fixed_width(self._var, var_len_str) layers = self.layers.as_dict() d = { 'X': self._X, 'obs': obs_rec, 'var': var_rec, 'obsm': self._obsm, 'varm': self._varm, 'layers': layers, # add the categories to the unstructured annotation 'uns': {**self._uns, **uns_obs, **uns_var}} if self.raw is not None: self.strings_to_categoricals(self.raw._var) var_rec, uns_var = df_to_records_fixed_width(self.raw._var, var_len_str) d['raw.X'] = self.raw.X d['raw.var'] = var_rec d['raw.varm'] = self.raw.varm d['raw.cat'] = uns_var return d
A dict of arrays that stores data and annotation. It is sufficient for reconstructing the object.
def more(value1, value2): """ Checks if first value is greater than the second one. The operation can be performed over numbers or strings. :param value1: the first value to compare :param value2: the second value to compare :return: true if the first value is greater than second and false otherwise. """ number1 = FloatConverter.to_nullable_float(value1) number2 = FloatConverter.to_nullable_float(value2) if number1 == None or number2 == None: return False return number1 > number2
Checks if first value is greater than the second one. The operation can be performed over numbers or strings. :param value1: the first value to compare :param value2: the second value to compare :return: true if the first value is greater than second and false otherwise.
def popular(self, **kwargs): """ Get the list of popular movies on The Movie Database. This list refreshes every day. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API. """ path = self._get_path('popular') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of popular movies on The Movie Database. This list refreshes every day. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API.
def trunk_update(request, trunk_id, old_trunk, new_trunk): """Handle update to a trunk in (at most) three neutron calls. The JavaScript side should know only about the old and new state of a trunk. However it should not know anything about how the old and new are meant to be diffed and sent to neutron. We handle that here. This code was adapted from Heat, see: https://review.opendev.org/442496 Call #1) Update all changed properties but 'sub_ports'. PUT /v2.0/trunks/TRUNK_ID openstack network trunk set Call #2) Delete subports not needed anymore. PUT /v2.0/trunks/TRUNK_ID/remove_subports openstack network trunk unset --subport Call #3) Create new subports. PUT /v2.0/trunks/TRUNK_ID/add_subports openstack network trunk set --subport A single neutron port cannot be two subports at the same time (ie. have two segmentation (type, ID)s on the same trunk or to belong to two trunks). Therefore we have to delete old subports before creating new ones to avoid conflicts. """ LOG.debug("trunk_update(): trunk_id=%s", trunk_id) # NOTE(bence romsics): We want to do set operations on the subports, # however we receive subports represented as dicts. In Python # mutable objects like dicts are not hashable so they cannot be # inserted into sets. So we convert subport dicts to (immutable) # frozensets in order to do the set operations. def dict2frozenset(d): """Convert a dict to a frozenset. Create an immutable equivalent of a dict, so it's hashable therefore can be used as an element of a set or a key of another dictionary. """ return frozenset(d.items()) # cf. neutron_lib/api/definitions/trunk.py updatable_props = ('admin_state_up', 'description', 'name') prop_diff = { k: new_trunk[k] for k in updatable_props if old_trunk[k] != new_trunk[k]} subports_old = {dict2frozenset(d): d for d in old_trunk.get('sub_ports', [])} subports_new = {dict2frozenset(d): d for d in new_trunk.get('sub_ports', [])} old_set = set(subports_old.keys()) new_set = set(subports_new.keys()) delete = old_set - new_set create = new_set - old_set dicts_delete = [subports_old[fs] for fs in delete] dicts_create = [subports_new[fs] for fs in create] trunk = old_trunk if prop_diff: LOG.debug('trunk_update(): update properties of trunk %s: %s', trunk_id, prop_diff) body = _prepare_body_update_trunk(prop_diff) trunk = neutronclient(request).update_trunk( trunk_id, body=body).get('trunk') if dicts_delete: LOG.debug('trunk_update(): delete subports of trunk %s: %s', trunk_id, dicts_delete) body = _prepare_body_remove_subports(dicts_delete) trunk = neutronclient(request).trunk_remove_subports( trunk_id, body=body) if dicts_create: LOG.debug('trunk_update(): create subports of trunk %s: %s', trunk_id, dicts_create) body = _prepare_body_add_subports(dicts_create) trunk = neutronclient(request).trunk_add_subports( trunk_id, body=body) return Trunk(trunk)
Handle update to a trunk in (at most) three neutron calls. The JavaScript side should know only about the old and new state of a trunk. However it should not know anything about how the old and new are meant to be diffed and sent to neutron. We handle that here. This code was adapted from Heat, see: https://review.opendev.org/442496 Call #1) Update all changed properties but 'sub_ports'. PUT /v2.0/trunks/TRUNK_ID openstack network trunk set Call #2) Delete subports not needed anymore. PUT /v2.0/trunks/TRUNK_ID/remove_subports openstack network trunk unset --subport Call #3) Create new subports. PUT /v2.0/trunks/TRUNK_ID/add_subports openstack network trunk set --subport A single neutron port cannot be two subports at the same time (ie. have two segmentation (type, ID)s on the same trunk or to belong to two trunks). Therefore we have to delete old subports before creating new ones to avoid conflicts.
def usearch_cluster_error_correction( fasta_filepath, output_filepath=None, output_uc_filepath=None, percent_id_err=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_err_corrected.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id_err, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--cluster': fasta_filepath, '--consout': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath if output_uc_filepath: data['--uc'] = output_uc_filepath app_result = app(data) return app_result, output_filepath
Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
async def _auth_handler_post_get_auth(self): ''' If the user supplied auth does rely on a response (is a PostResponseAuth object) then we call the auth's __call__ returning a dict to update the request's headers with, as long as there is an appropriate 401'd response object to calculate auth details from. ''' # pylint: disable=not-callable if isinstance(self.auth, PostResponseAuth): if self.history_objects: authable_resp = self.history_objects[-1] if authable_resp.status_code == 401: if not self.auth.auth_attempted: self.auth.auth_attempted = True return await self.auth(authable_resp, self) return {}
If the user supplied auth does rely on a response (is a PostResponseAuth object) then we call the auth's __call__ returning a dict to update the request's headers with, as long as there is an appropriate 401'd response object to calculate auth details from.
def get_subsamples(data, samples, force): """ Apply state, ncluster, and force filters to select samples to be run. """ subsamples = [] for sample in samples: if not force: if sample.stats.state >= 5: print("""\ Skipping Sample {}; Already has consens reads. Use force arg to overwrite.\ """.format(sample.name)) elif not sample.stats.clusters_hidepth: print("""\ Skipping Sample {}; No clusters found."""\ .format(sample.name, int(sample.stats.clusters_hidepth))) elif sample.stats.state < 4: print("""\ Skipping Sample {}; not yet finished step4 """\ .format(sample.name)) else: subsamples.append(sample) else: if not sample.stats.clusters_hidepth: print("""\ Skipping Sample {}; No clusters found in {}."""\ .format(sample.name, sample.files.clusters)) elif sample.stats.state < 4: print("""\ Skipping Sample {}; not yet finished step4"""\ .format(sample.name)) else: subsamples.append(sample) if len(subsamples) == 0: raise IPyradWarningExit(""" No samples to cluster, exiting. """) ## if sample is already done skip if "hetero_est" not in data.stats: print(" No estimates of heterozygosity and error rate. Using default "\ "values") for sample in subsamples: sample.stats.hetero_est = 0.001 sample.stats.error_est = 0.0001 if data._headers: print(u"""\ Mean error [{:.5f} sd={:.5f}] Mean hetero [{:.5f} sd={:.5f}]"""\ .format(data.stats.error_est.mean(), data.stats.error_est.std(), data.stats.hetero_est.mean(), data.stats.hetero_est.std())) return subsamples
Apply state, ncluster, and force filters to select samples to be run.
def tvBrowserHazard_selection_changed(self): """Update layer description label.""" (is_compatible, desc) = self.get_layer_description_from_browser( 'hazard') self.lblDescribeBrowserHazLayer.setText(desc) self.lblDescribeBrowserHazLayer.setEnabled(is_compatible) self.parent.pbnNext.setEnabled(is_compatible)
Update layer description label.
def appendAnchor(self, name=None, position=None, color=None, anchor=None): """ Append an anchor to this glyph. >>> anchor = glyph.appendAnchor("top", (10, 20)) This will return a :class:`BaseAnchor` object representing the new anchor in the glyph. ``name`` indicated the name to be assigned to the anchor. It must be a :ref:`type-string` or ``None``. ``position`` indicates the x and y location to be applied to the anchor. It must be a :ref:`type-coordinate` value. ``color`` indicates the color to be applied to the anchor. It must be a :ref:`type-color` or ``None``. >>> anchor = glyph.appendAnchor("top", (10, 20), color=(1, 0, 0, 1)) ``anchor`` may be a :class:`BaseAnchor` object from which attribute values will be copied. If ``name``, ``position`` or ``color`` are specified as arguments, those values will be used instead of the values in the given anchor object. """ identifier = None if anchor is not None: anchor = normalizers.normalizeAnchor(anchor) if name is None: name = anchor.name if position is None: position = anchor.position if color is None: color = anchor.color if anchor.identifier is not None: existing = set([a.identifier for a in self.anchors if a.identifier is not None]) if anchor.identifier not in existing: identifier = anchor.identifier name = normalizers.normalizeAnchorName(name) position = normalizers.normalizeCoordinateTuple(position) if color is not None: color = normalizers.normalizeColor(color) identifier = normalizers.normalizeIdentifier(identifier) return self._appendAnchor(name, position=position, color=color, identifier=identifier)
Append an anchor to this glyph. >>> anchor = glyph.appendAnchor("top", (10, 20)) This will return a :class:`BaseAnchor` object representing the new anchor in the glyph. ``name`` indicated the name to be assigned to the anchor. It must be a :ref:`type-string` or ``None``. ``position`` indicates the x and y location to be applied to the anchor. It must be a :ref:`type-coordinate` value. ``color`` indicates the color to be applied to the anchor. It must be a :ref:`type-color` or ``None``. >>> anchor = glyph.appendAnchor("top", (10, 20), color=(1, 0, 0, 1)) ``anchor`` may be a :class:`BaseAnchor` object from which attribute values will be copied. If ``name``, ``position`` or ``color`` are specified as arguments, those values will be used instead of the values in the given anchor object.
def get_definition_properties(self, project, definition_id, filter=None): """GetDefinitionProperties. [Preview API] Gets properties for a definition. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :param [str] filter: A comma-delimited list of properties. If specified, filters to these specific properties. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') query_parameters = {} if filter is not None: filter = ",".join(filter) query_parameters['filter'] = self._serialize.query('filter', filter, 'str') response = self._send(http_method='GET', location_id='d9826ad7-2a68-46a9-a6e9-677698777895', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('object', response)
GetDefinitionProperties. [Preview API] Gets properties for a definition. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :param [str] filter: A comma-delimited list of properties. If specified, filters to these specific properties. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
def OpenServerEndpoint(self, path, verify_cb=lambda x: True, data=None, params=None, headers=None, method="GET", timeout=None): """Search through all the base URLs to connect to one that works. This is a thin wrapper around requests.request() so most parameters are documented there. Args: path: The URL path to access in this endpoint. verify_cb: A callback which should return True if the response is reasonable. This is used to detect if we are able to talk to the correct endpoint. If not we try a different endpoint/proxy combination. data: Parameters to send in POST bodies (See Requests documentation). params: Parameters to send in GET URLs (See Requests documentation). headers: Additional headers (See Requests documentation) method: The HTTP method to use. If not set we select one automatically. timeout: See Requests documentation. Returns: an HTTPObject() instance with the correct error code set. """ tries = 0 last_error = HTTPObject(code=404) while tries < len(self.base_urls): base_url_index = self.last_base_url_index % len(self.base_urls) active_base_url = self.base_urls[base_url_index] result = self.OpenURL( self._ConcatenateURL(active_base_url, path), data=data, params=params, headers=headers, method=method, timeout=timeout, verify_cb=verify_cb, ) if not result.Success(): tries += 1 self.last_base_url_index += 1 last_error = result continue # The URL worked - we record that. self.active_base_url = active_base_url return result # No connection is possible at all. logging.info( "Could not connect to GRR servers %s, directly or through " "these proxies: %s.", self.base_urls, self.proxies) return last_error
Search through all the base URLs to connect to one that works. This is a thin wrapper around requests.request() so most parameters are documented there. Args: path: The URL path to access in this endpoint. verify_cb: A callback which should return True if the response is reasonable. This is used to detect if we are able to talk to the correct endpoint. If not we try a different endpoint/proxy combination. data: Parameters to send in POST bodies (See Requests documentation). params: Parameters to send in GET URLs (See Requests documentation). headers: Additional headers (See Requests documentation) method: The HTTP method to use. If not set we select one automatically. timeout: See Requests documentation. Returns: an HTTPObject() instance with the correct error code set.
def x_plus(self, dx=None): """ Mutable x addition. Defaults to set delta value. """ if dx is None: self.x += self.dx else: self.x = self.x + dx
Mutable x addition. Defaults to set delta value.
def getManager(self, force=False): """Extract the YadisServiceManager for this object's URL and suffix from the session. @param force: True if the manager should be returned regardless of whether it's a manager for self.url. @return: The current YadisServiceManager, if it's for this URL, or else None """ manager = self.session.get(self.getSessionKey()) if (manager is not None and (manager.forURL(self.url) or force)): return manager else: return None
Extract the YadisServiceManager for this object's URL and suffix from the session. @param force: True if the manager should be returned regardless of whether it's a manager for self.url. @return: The current YadisServiceManager, if it's for this URL, or else None
def remove_father(self, father): """ Remove the father node. Do nothing if the node is not a father Args: fathers: list of fathers to add """ self._fathers = [x for x in self._fathers if x.node_id != father.node_id]
Remove the father node. Do nothing if the node is not a father Args: fathers: list of fathers to add
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None, tags=None, connection_auth=None, **kwargs): ''' .. versionadded:: 2019.2.0 Ensure a virtual network exists. :param name: Name of the virtual network. :param resource_group: The resource group assigned to the virtual network. :param address_prefixes: A list of CIDR blocks which can be used by subnets within the virtual network. :param dns_servers: A list of DNS server addresses. :param tags: A dictionary of strings can be passed as tag metadata to the virtual network object. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure virtual network exists: azurearm_network.virtual_network_present: - name: vnet1 - resource_group: group1 - address_prefixes: - '10.0.0.0/8' - '192.168.0.0/16' - dns_servers: - '8.8.8.8' - tags: contact_name: Elmer Fudd Gantry - connection_auth: {{ profile }} - require: - azurearm_resource: Ensure resource group exists ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret vnet = __salt__['azurearm_network.virtual_network_get']( name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' not in vnet: tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {}) if tag_changes: ret['changes']['tags'] = tag_changes dns_changes = set(dns_servers or []).symmetric_difference( set(vnet.get('dhcp_options', {}).get('dns_servers', []))) if dns_changes: ret['changes']['dns_servers'] = { 'old': vnet.get('dhcp_options', {}).get('dns_servers', []), 'new': dns_servers, } addr_changes = set(address_prefixes or []).symmetric_difference( set(vnet.get('address_space', {}).get('address_prefixes', []))) if addr_changes: ret['changes']['address_space'] = { 'address_prefixes': { 'old': vnet.get('address_space', {}).get('address_prefixes', []), 'new': address_prefixes, } } if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'): ret['changes']['enable_ddos_protection'] = { 'old': vnet.get('enable_ddos_protection'), 'new': kwargs.get('enable_ddos_protection') } if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'): ret['changes']['enable_vm_protection'] = { 'old': vnet.get('enable_vm_protection'), 'new': kwargs.get('enable_vm_protection') } if not ret['changes']: ret['result'] = True ret['comment'] = 'Virtual network {0} is already present.'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Virtual network {0} would be updated.'.format(name) return ret else: ret['changes'] = { 'old': {}, 'new': { 'name': name, 'resource_group': resource_group, 'address_space': {'address_prefixes': address_prefixes}, 'dhcp_options': {'dns_servers': dns_servers}, 'enable_ddos_protection': kwargs.get('enable_ddos_protection', False), 'enable_vm_protection': kwargs.get('enable_vm_protection', False), 'tags': tags, } } if __opts__['test']: ret['comment'] = 'Virtual network {0} would be created.'.format(name) ret['result'] = None return ret vnet_kwargs = kwargs.copy() vnet_kwargs.update(connection_auth) vnet = __salt__['azurearm_network.virtual_network_create_or_update']( name=name, resource_group=resource_group, address_prefixes=address_prefixes, dns_servers=dns_servers, tags=tags, **vnet_kwargs ) if 'error' not in vnet: ret['result'] = True ret['comment'] = 'Virtual network {0} has been created.'.format(name) return ret ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error')) return ret
.. versionadded:: 2019.2.0 Ensure a virtual network exists. :param name: Name of the virtual network. :param resource_group: The resource group assigned to the virtual network. :param address_prefixes: A list of CIDR blocks which can be used by subnets within the virtual network. :param dns_servers: A list of DNS server addresses. :param tags: A dictionary of strings can be passed as tag metadata to the virtual network object. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure virtual network exists: azurearm_network.virtual_network_present: - name: vnet1 - resource_group: group1 - address_prefixes: - '10.0.0.0/8' - '192.168.0.0/16' - dns_servers: - '8.8.8.8' - tags: contact_name: Elmer Fudd Gantry - connection_auth: {{ profile }} - require: - azurearm_resource: Ensure resource group exists
def setup_suspend(self): """Setup debugger to "suspend" execution """ self.frame_calling = None self.frame_stop = None self.frame_return = None self.frame_suspend = True self.pending_stop = True self.enable_tracing() return
Setup debugger to "suspend" execution
def prune_cached(values): """Remove the items that have already been cached.""" import os config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, 'cache.txt') if not os.path.isfile(file_path): return values cached = [x.strip() for x in open(file_path, 'r').readlines()] output = list() for item in values: hashed = hash_values(item) if hashed in cached: continue output.append(item) return output
Remove the items that have already been cached.
def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """ def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
Create a method for given unary operator