code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def index(self, strictindex): """ Return a chunk in a sequence referenced by index. """ return self._select(self._pointer.index(self.ruamelindex(strictindex)))
Return a chunk in a sequence referenced by index.
def put_key(key_name, value, description, meta, modify, add, lock, key_type, stash, passphrase, backend): """Insert a key to the stash `KEY_NAME` is the name of the key to insert `VALUE` is a key=value argument which can be provided multiple times. it is the encrypted value of your key """ stash = _get_stash(backend, stash, passphrase) try: click.echo('Stashing {0} key...'.format(key_type)) stash.put( name=key_name, value=_build_dict_from_key_value(value), modify=modify, metadata=_build_dict_from_key_value(meta), description=description, lock=lock, key_type=key_type, add=add) click.echo('Key stashed successfully') except GhostError as ex: sys.exit(ex)
Insert a key to the stash `KEY_NAME` is the name of the key to insert `VALUE` is a key=value argument which can be provided multiple times. it is the encrypted value of your key
def _construct_w(self, inputs): """Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1]. """ weight_shape = self._kernel_shape + (1, 1) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2], dtype=inputs.dtype) w = tf.get_variable("w", shape=weight_shape, dtype=inputs.dtype, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) return w
Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1].
def getSubstituteType(self, elt, ps): '''if xsi:type does not match the instance type attr, check to see if it is a derived type substitution. DONT Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object. ''' pyclass = SchemaInstanceType.getTypeDefinition(*self.type) if pyclass is None: raise EvaluateException( 'No Type registed for xsi:type=(%s, %s)' % (self.type[0], self.type[1]), ps.Backtrace(elt)) typeName = _find_type(elt) prefix,typeName = SplitQName(typeName) uri = ps.GetElementNSdict(elt).get(prefix) subclass = SchemaInstanceType.getTypeDefinition(uri, typeName) if subclass is None: raise EvaluateException( 'No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)' % (uri, typeName, self.type[0], self.type[1]), ps.Backtrace(elt)) if not issubclass(subclass, pyclass) and subclass(None) and not issubclass(subclass, pyclass): raise TypeError( 'Substitute Type (%s, %s) is not derived from %s' % (self.type[0], self.type[1], pyclass), ps.Backtrace(elt)) return subclass((self.nspname, self.pname))
if xsi:type does not match the instance type attr, check to see if it is a derived type substitution. DONT Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object.
def save_figure_tofile(fig, fmt, fname): """Save fig to fname in the format specified by fmt.""" root, ext = osp.splitext(fname) if ext == '.png' and fmt == 'image/svg+xml': qimg = svg_to_image(fig) qimg.save(fname) else: if fmt == 'image/svg+xml' and is_unicode(fig): fig = fig.encode('utf-8') with open(fname, 'wb') as f: f.write(fig)
Save fig to fname in the format specified by fmt.
def add_categories(self, new_categories, inplace=False): """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: msg = ("new categories must not include old categories: " "{already_included!s}") raise ValueError(msg.format(already_included=already_included)) new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self if inplace else self.copy() cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) if not inplace: return cat
Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories
def plot_spectrum(self, convention='power', unit='per_l', base=10., lmax=None, xscale='lin', yscale='log', grid=True, legend=None, axes_labelsize=None, tick_labelsize=None, show=True, ax=None, fname=None, **kwargs): """ Plot the spectrum as a function of spherical harmonic degree. Usage ----- x.plot_spectrum([convention, unit, base, lmax, xscale, yscale, grid, axes_labelsize, tick_labelsize, legend, show, ax, fname, **kwargs]) Parameters ---------- convention : str, optional, default = 'power' The type of spectrum to plot: 'power' for power spectrum, 'energy' for energy spectrum, and 'l2norm' for the l2 norm spectrum. unit : str, optional, default = 'per_l' If 'per_l', plot the total contribution to the spectrum for each spherical harmonic degree l. If 'per_lm', plot the average contribution to the spectrum for each coefficient at spherical harmonic degree l. If 'per_dlogl', plot the spectrum per log interval dlog_a(l). base : float, optional, default = 10. The logarithm base when calculating the 'per_dlogl' spectrum, and the base to use for logarithmic axes. lmax : int, optional, default = self.lmax The maximum spherical harmonic degree to plot. xscale : str, optional, default = 'lin' Scale of the x axis: 'lin' for linear or 'log' for logarithmic. yscale : str, optional, default = 'log' Scale of the y axis: 'lin' for linear or 'log' for logarithmic. grid : bool, optional, default = True If True, plot grid lines. legend : str, optional, default = None Text to use for the legend. axes_labelsize : int, optional, default = None The font size for the x and y axes labels. tick_labelsize : int, optional, default = None The font size for the x and y tick labels. show : bool, optional, default = True If True, plot to the screen. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. **kwargs : keyword arguments, optional Keyword arguments for pyplot.plot(). Description ----------- This method plots either the power spectrum, energy spectrum, or l2-norm spectrum. Total power is defined as the integral of the function squared over all space, divided by the area the function spans. If the mean of the function is zero, this is equivalent to the variance of the function. The total energy is the integral of the function squared over all space and is 4pi times the total power. For normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is the sum of the magnitude of the coefficients squared. The output spectrum can be expresed using one of three units. 'per_l' returns the contribution to the total spectrum from all angular orders at degree l. 'per_lm' returns the average contribution to the total spectrum from a single coefficient at degree l, which is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to the total spectrum from all angular orders over an infinitessimal logarithmic degree band. The contrubution in the band dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a). """ if lmax is None: lmax = self.lmax spectrum = self.spectrum(convention=convention, unit=unit, base=base, lmax=lmax) ls = _np.arange(lmax + 1) if ax is None: fig, axes = _plt.subplots(1, 1) else: axes = ax if axes_labelsize is None: axes_labelsize = _mpl.rcParams['axes.labelsize'] if tick_labelsize is None: tick_labelsize = _mpl.rcParams['xtick.labelsize'] axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize) if convention == 'Energy': axes.set_ylabel('Energy', fontsize=axes_labelsize) if legend is None: if (unit == 'per_l'): legend = 'Energy per degree' elif (unit == 'per_lm'): legend = 'Energy per coefficient' elif (unit == 'per_dlogl'): legend = 'Energy per log bandwidth' elif convention == 'l2norm': axes.set_ylabel('l2 norm', fontsize=axes_labelsize) if legend is None: if (unit == 'per_l'): legend = 'l2 norm per degree' elif (unit == 'per_lm'): legend = 'l2 norm per coefficient' elif (unit == 'per_dlogl'): legend = 'l2 norm per log bandwidth' else: axes.set_ylabel('Power', fontsize=axes_labelsize) if legend is None: if (unit == 'per_l'): legend = 'Power per degree' elif (unit == 'per_lm'): legend = 'Power per coefficient' elif (unit == 'per_dlogl'): legend = 'Power per log bandwidth' if xscale == 'log': axes.set_xscale('log', basex=base) if yscale == 'log': axes.set_yscale('log', basey=base) if xscale == 'log': axes.plot(ls[1:lmax+1], spectrum[1:lmax+1], label=legend, **kwargs) else: axes.plot(ls[:lmax+1], spectrum[:lmax+1], label=legend, **kwargs) axes.set(xlim=(ls[0], ls[lmax])) axes.grid(grid, which='major') axes.minorticks_on() axes.tick_params(labelsize=tick_labelsize) axes.legend() if ax is None: fig.tight_layout(pad=0.5) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes
Plot the spectrum as a function of spherical harmonic degree. Usage ----- x.plot_spectrum([convention, unit, base, lmax, xscale, yscale, grid, axes_labelsize, tick_labelsize, legend, show, ax, fname, **kwargs]) Parameters ---------- convention : str, optional, default = 'power' The type of spectrum to plot: 'power' for power spectrum, 'energy' for energy spectrum, and 'l2norm' for the l2 norm spectrum. unit : str, optional, default = 'per_l' If 'per_l', plot the total contribution to the spectrum for each spherical harmonic degree l. If 'per_lm', plot the average contribution to the spectrum for each coefficient at spherical harmonic degree l. If 'per_dlogl', plot the spectrum per log interval dlog_a(l). base : float, optional, default = 10. The logarithm base when calculating the 'per_dlogl' spectrum, and the base to use for logarithmic axes. lmax : int, optional, default = self.lmax The maximum spherical harmonic degree to plot. xscale : str, optional, default = 'lin' Scale of the x axis: 'lin' for linear or 'log' for logarithmic. yscale : str, optional, default = 'log' Scale of the y axis: 'lin' for linear or 'log' for logarithmic. grid : bool, optional, default = True If True, plot grid lines. legend : str, optional, default = None Text to use for the legend. axes_labelsize : int, optional, default = None The font size for the x and y axes labels. tick_labelsize : int, optional, default = None The font size for the x and y tick labels. show : bool, optional, default = True If True, plot to the screen. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. **kwargs : keyword arguments, optional Keyword arguments for pyplot.plot(). Description ----------- This method plots either the power spectrum, energy spectrum, or l2-norm spectrum. Total power is defined as the integral of the function squared over all space, divided by the area the function spans. If the mean of the function is zero, this is equivalent to the variance of the function. The total energy is the integral of the function squared over all space and is 4pi times the total power. For normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is the sum of the magnitude of the coefficients squared. The output spectrum can be expresed using one of three units. 'per_l' returns the contribution to the total spectrum from all angular orders at degree l. 'per_lm' returns the average contribution to the total spectrum from a single coefficient at degree l, which is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to the total spectrum from all angular orders over an infinitessimal logarithmic degree band. The contrubution in the band dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
def bandstats(filenames=None, num_sample_points=3, temperature=None, degeneracy_tol=1e-4, parabolic=True): """Calculate the effective masses of the bands of a semiconductor. Args: filenames (:obj:`str` or :obj:`list`, optional): Path to vasprun.xml or vasprun.xml.gz file. If no filenames are provided, the code will search for vasprun.xml or vasprun.xml.gz files in folders named 'split-0*'. Failing that, the code will look for a vasprun in the current directory. If a :obj:`list` of vasprun files is provided, these will be combined into a single band structure. num_sample_points (:obj:`int`, optional): Number of k-points to sample when fitting the effective masses. temperature (:obj:`int`, optional): Find band edges within kB * T of the valence band maximum and conduction band minimum. Not currently implemented. degeneracy_tol (:obj:`float`, optional): Tolerance for determining the degeneracy of the valence band maximum and conduction band minimum. parabolic (:obj:`bool`, optional): Use a parabolic fit of the band edges. If ``False`` then nonparabolic fitting will be attempted. Defaults to ``True``. Returns: dict: The hole and electron effective masses. Formatted as a :obj:`dict` with keys: ``'hole_data'`` and ``'electron_data'``. The data is a :obj:`list` of :obj:`dict` with the keys: 'effective_mass' (:obj:`float`) The effective mass in units of electron rest mass, :math:`m_0`. 'energies' (:obj:`numpy.ndarray`) Band eigenvalues in eV. 'distances' (:obj:`numpy.ndarray`) Distances of the k-points in reciprocal space. 'band_id' (:obj:`int`) The index of the band, 'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`) The spin channel 'start_kpoint' (:obj:`int`) The index of the k-point at which the band extrema occurs 'end_kpoint' (:obj:`int`) """ if not filenames: filenames = find_vasprun_files() elif isinstance(filenames, str): filenames = [filenames] bandstructures = [] for vr_file in filenames: vr = BSVasprun(vr_file, parse_projected_eigen=False) bs = vr.get_band_structure(line_mode=True) bandstructures.append(bs) bs = get_reconstructed_band_structure(bandstructures) if bs.is_metal(): logging.error('ERROR: System is metallic!') sys.exit() _log_band_gap_information(bs) vbm_data = bs.get_vbm() cbm_data = bs.get_cbm() logging.info('\nValence band maximum:') _log_band_edge_information(bs, vbm_data) logging.info('\nConduction band minimum:') _log_band_edge_information(bs, cbm_data) if parabolic: logging.info('\nUsing parabolic fitting of the band edges') else: logging.info('\nUsing nonparabolic fitting of the band edges') if temperature: logging.error('ERROR: This feature is not yet supported!') else: # Work out where the hole and electron band edges are. # Fortunately, pymatgen does this for us. Points at which to calculate # the effective mass are identified as a tuple of: # (spin, band_index, kpoint_index) hole_extrema = [] for spin, bands in vbm_data['band_index'].items(): hole_extrema.extend([(spin, band, kpoint) for band in bands for kpoint in vbm_data['kpoint_index']]) elec_extrema = [] for spin, bands in cbm_data['band_index'].items(): elec_extrema.extend([(spin, band, kpoint) for band in bands for kpoint in cbm_data['kpoint_index']]) # extract the data we need for fitting from the band structure hole_data = [] for extrema in hole_extrema: hole_data.extend(get_fitting_data(bs, *extrema, num_sample_points=num_sample_points)) elec_data = [] for extrema in elec_extrema: elec_data.extend(get_fitting_data(bs, *extrema, num_sample_points=num_sample_points)) # calculate the effective masses and log the information logging.info('\nHole effective masses:') for data in hole_data: eff_mass = fit_effective_mass(data['distances'], data['energies'], parabolic=parabolic) data['effective_mass'] = eff_mass _log_effective_mass_data(data, bs.is_spin_polarized, mass_type='m_h') logging.info('\nElectron effective masses:') for data in elec_data: eff_mass = fit_effective_mass(data['distances'], data['energies'], parabolic=parabolic) data['effective_mass'] = eff_mass _log_effective_mass_data(data, bs.is_spin_polarized) return {'hole_data': hole_data, 'electron_data': elec_data}
Calculate the effective masses of the bands of a semiconductor. Args: filenames (:obj:`str` or :obj:`list`, optional): Path to vasprun.xml or vasprun.xml.gz file. If no filenames are provided, the code will search for vasprun.xml or vasprun.xml.gz files in folders named 'split-0*'. Failing that, the code will look for a vasprun in the current directory. If a :obj:`list` of vasprun files is provided, these will be combined into a single band structure. num_sample_points (:obj:`int`, optional): Number of k-points to sample when fitting the effective masses. temperature (:obj:`int`, optional): Find band edges within kB * T of the valence band maximum and conduction band minimum. Not currently implemented. degeneracy_tol (:obj:`float`, optional): Tolerance for determining the degeneracy of the valence band maximum and conduction band minimum. parabolic (:obj:`bool`, optional): Use a parabolic fit of the band edges. If ``False`` then nonparabolic fitting will be attempted. Defaults to ``True``. Returns: dict: The hole and electron effective masses. Formatted as a :obj:`dict` with keys: ``'hole_data'`` and ``'electron_data'``. The data is a :obj:`list` of :obj:`dict` with the keys: 'effective_mass' (:obj:`float`) The effective mass in units of electron rest mass, :math:`m_0`. 'energies' (:obj:`numpy.ndarray`) Band eigenvalues in eV. 'distances' (:obj:`numpy.ndarray`) Distances of the k-points in reciprocal space. 'band_id' (:obj:`int`) The index of the band, 'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`) The spin channel 'start_kpoint' (:obj:`int`) The index of the k-point at which the band extrema occurs 'end_kpoint' (:obj:`int`)
def _set_guard(self, v, load=False): """ Setter method for guard, mapped from YANG variable /interface/port_channel/spanning_tree/guard (container) If this variable is read-only (config: false) in the source YANG file, then _set_guard is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_guard() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard mode", u'display-when': u'((/protocol/spanning-tree/stp) or(/protocol/spanning-tree/rstp))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """guard must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard mode", u'display-when': u'((/protocol/spanning-tree/stp) or(/protocol/spanning-tree/rstp))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""", }) self.__guard = t if hasattr(self, '_set'): self._set()
Setter method for guard, mapped from YANG variable /interface/port_channel/spanning_tree/guard (container) If this variable is read-only (config: false) in the source YANG file, then _set_guard is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_guard() directly.
def _make_unique_slug(slug: str, language: str, is_unique: Callable[[str], bool]) -> str: """Guarentees that the specified slug is unique by appending a number until it is unique. Arguments: slug: The slug to make unique. is_unique: Function that can be called to verify whether the generate slug is unique. Returns: A guarenteed unique slug. """ index = 1 unique_slug = slug while not is_unique(unique_slug, language): unique_slug = '%s-%d' % (slug, index) index += 1 return unique_slug
Guarentees that the specified slug is unique by appending a number until it is unique. Arguments: slug: The slug to make unique. is_unique: Function that can be called to verify whether the generate slug is unique. Returns: A guarenteed unique slug.
def accuracy(self): r"""Return accuracy. Accuracy is defined as :math:`\frac{tp + tn}{population}` Cf. https://en.wikipedia.org/wiki/Accuracy Returns ------- float The accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy() 0.782608695652174 """ if self.population() == 0: return float('NaN') return (self._tp + self._tn) / self.population()
r"""Return accuracy. Accuracy is defined as :math:`\frac{tp + tn}{population}` Cf. https://en.wikipedia.org/wiki/Accuracy Returns ------- float The accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy() 0.782608695652174
def optimize(self, loss, num_async_replicas=1, use_tpu=False): """Return a training op minimizing loss.""" lr = learning_rate.learning_rate_schedule(self.hparams) if num_async_replicas > 1: log_info("Dividing learning rate by num_async_replicas: %d", num_async_replicas) lr /= math.sqrt(float(num_async_replicas)) train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu) return train_op
Return a training op minimizing loss.
def _validate_j2k_colorspace(self, cparams, colorspace): """ Cannot specify a colorspace with J2K. """ if cparams.codec_fmt == opj2.CODEC_J2K and colorspace is not None: msg = 'Do not specify a colorspace when writing a raw codestream.' raise IOError(msg)
Cannot specify a colorspace with J2K.
def __parse_affiliations_json(self, affiliations, uuid): """Parse identity's affiliations from a json dict""" enrollments = [] for affiliation in affiliations.values(): name = self.__encode(affiliation['name']) try: start_date = str_to_datetime(affiliation['active']) end_date = str_to_datetime(affiliation['inactive']) except InvalidDateError as e: raise InvalidFormatError(cause=str(e)) # Ignore affiliation if not start_date and not end_date: continue if not start_date: start_date = MIN_PERIOD_DATE if not end_date: end_date = MAX_PERIOD_DATE org = self._organizations.get(name, None) # Set enrolllment period according to organization data if org: start_date = org.active if start_date < org.active else start_date end_date = org.inactive if end_date > org.inactive else end_date if not org: org = Organization(name=name) org.active = MIN_PERIOD_DATE org.inactive = MAX_PERIOD_DATE enrollment = Enrollment(start=start_date, end=end_date, organization=org) enrollments.append(enrollment) return enrollments
Parse identity's affiliations from a json dict
def get(self): """Gets the object out of the plasma store. Returns: The object from the plasma store. """ if len(self.call_queue): return self.apply(lambda x: x).get() try: return ray.get(self.oid) except RayTaskError as e: handle_ray_task_error(e)
Gets the object out of the plasma store. Returns: The object from the plasma store.
def snooze(self, from_email, duration): """Snooze this incident for `duration` seconds.""" if from_email is None or not isinstance(from_email, six.string_types): raise MissingFromEmail(from_email) endpoint = '/'.join((self.endpoint, self.id, 'snooze')) add_headers = {'from': from_email, } return self.__class__.create( endpoint=endpoint, api_key=self.api_key, add_headers=add_headers, data_key='duration', data=duration, )
Snooze this incident for `duration` seconds.
def options(self, *args, **kwargs): """XHR cross-domain OPTIONS handler""" self.enable_cache() self.handle_session_cookie() self.preflight() if self.verify_origin(): allowed_methods = getattr(self, 'access_methods', 'OPTIONS, POST') self.set_header('Access-Control-Allow-Methods', allowed_methods) self.set_header('Allow', allowed_methods) self.set_status(204) else: # Set forbidden self.set_status(403) self.finish()
XHR cross-domain OPTIONS handler
def get_global_gradient_norm(self) -> float: """ Returns global gradient norm. """ # average norm across executors: exec_norms = [global_norm([arr for arr in exe.grad_arrays if arr is not None]) for exe in self.executors] norm_val = sum(exec_norms) / float(len(exec_norms)) norm_val *= self.optimizer.rescale_grad return norm_val
Returns global gradient norm.
def extract_mime(self, mime, def_mime='unk'): """ Utility function to extract mimetype only from a full content type, removing charset settings """ self['mime'] = def_mime if mime: self['mime'] = self.MIME_RE.split(mime, 1)[0] self['_content_type'] = mime
Utility function to extract mimetype only from a full content type, removing charset settings
def control_group(self, control_group_id, ctrl, shift, alt): """Act on a control group, selecting, setting, etc.""" action = sc_pb.Action() select = action.action_ui.control_group mod = sc_ui.ActionControlGroup if not ctrl and not shift and not alt: select.action = mod.Recall elif ctrl and not shift and not alt: select.action = mod.Set elif not ctrl and shift and not alt: select.action = mod.Append elif not ctrl and not shift and alt: select.action = mod.SetAndSteal elif not ctrl and shift and alt: select.action = mod.AppendAndSteal else: return # unknown select.control_group_index = control_group_id return action
Act on a control group, selecting, setting, etc.
def inv(self): """The inverse rotation""" result = Rotation(self.r.transpose()) result._cache_inv = self return result
The inverse rotation
def _deliverAnswer(self, answer): """ Attempt to deliver an answer to a message sent to this store, via my store's parent's L{IMessageRouter} powerup. @param answer: an L{AlreadyAnswered} that contains an answer to a message sent to this store. """ router = self.siteRouter if answer.deliveryDeferred is None: d = answer.deliveryDeferred = router.routeAnswer( answer.originalSender, answer.originalTarget, answer.value, answer.messageID) def destroyAnswer(result): answer.deleteFromStore() def transportErrorCheck(f): answer.deliveryDeferred = None f.trap(MessageTransportError) d.addCallbacks(destroyAnswer, transportErrorCheck) d.addErrback(log.err)
Attempt to deliver an answer to a message sent to this store, via my store's parent's L{IMessageRouter} powerup. @param answer: an L{AlreadyAnswered} that contains an answer to a message sent to this store.
def serialize(transform, **kwargs): '''Serialize a transformation object or pipeline. Parameters ---------- transform : BaseTransform or Pipeline The transformation object to be serialized kwargs Additional keyword arguments to `jsonpickle.encode()` Returns ------- json_str : str A JSON encoding of the transformation See Also -------- deserialize Examples -------- >>> D = muda.deformers.TimeStretch(rate=1.5) >>> muda.serialize(D) '{"params": {"rate": 1.5}, "__class__": {"py/type": "muda.deformers.time.TimeStretch"}}' ''' params = transform.get_params() return jsonpickle.encode(params, **kwargs)
Serialize a transformation object or pipeline. Parameters ---------- transform : BaseTransform or Pipeline The transformation object to be serialized kwargs Additional keyword arguments to `jsonpickle.encode()` Returns ------- json_str : str A JSON encoding of the transformation See Also -------- deserialize Examples -------- >>> D = muda.deformers.TimeStretch(rate=1.5) >>> muda.serialize(D) '{"params": {"rate": 1.5}, "__class__": {"py/type": "muda.deformers.time.TimeStretch"}}'
async def start(request): """ Begins the session manager for factory calibration, if a session is not already in progress, or if the "force" key is specified in the request. To force, use the following body: { "force": true } :return: The current session ID token or an error message """ global session try: body = await request.json() except json.decoder.JSONDecodeError: # Body will be null for requests without parameters (normal operation) log.debug("No body in {}".format(request)) body = {} if not session or body.get('force'): hardware = hw_from_req(request) if body.get('force') and session: await release(data={}) session = SessionManager(hardware) res = init_pipette() if res: status = 201 data = {'token': session.id, 'pipette': res} else: session = None status = 403 data = {'message': 'Error, pipette not recognized'} else: data = {'message': 'Error, session in progress. Use "force" key in' ' request body to override'} status = 409 return web.json_response(data, status=status)
Begins the session manager for factory calibration, if a session is not already in progress, or if the "force" key is specified in the request. To force, use the following body: { "force": true } :return: The current session ID token or an error message
def set_plot_tcrhoc(self,linestyle=['-'],burn_limit=0.997,marker=['o'],markevery=500,end_model=[-1],deg_line=True): ''' Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] for i in range(len(self.runs_H5_surf)): m1p65_last=se(self.runs_H5_out[i]) t1_model=-1 if end_model[i] != -1: t1_model=end_model[i] model=m1p65_last.se.cycles model_list=[] for k in range(0,len(model),5): model_list.append(model[k]) rho1=m1p65_last.get(model_list,'rho') #[:(t1_model-t0_model)] T1=m1p65_last.get(model_list,'temperature')#[:(t1_model-t0_model)] T_unit=m1p65_last.get('temperature_unit') mass=m1p65_last.get(model_list,'mass') mini=m1p65_last.get('mini') zini=m1p65_last.get('zini') #info=m1p65_last.burnstage_upgrade() #burn_info=[] #burn_cycle=[] #for k in range(len(info[0])): # if 'start' in info[3][k]: # burn_info.append( info[3][k]) # burn_cycle.append(info[0][k]) #print burn_info #print burn_cycle ''' #H #Maybe use get_elemental_abunds(), too slow if individually taken h=m1p65_last.get(model_list,'H-1') h_ini=mini*h[0][-1] h_limit=h_ini*burn_limit print h_limit #He he=m1p65_last.get(model_list,'He-4') he+=m1p65_last.get(model_list,'He-3') he_ini=mini*he[0][-1] he_limit=he_ini*burn_limit print he_limit if mini>5: c=m1p65_last.get(model_list,'C-12') c_ini=mini*c[0][-1] c_limit=c_ini*burn_limit if mini>10: ne=m1p65_last.get(model_list,'Ne-20') ne_ini= mini*ne[0][-1] ne_limit=ne_ini*burn_limit o=m1p65_last.get(model_list,'O-16') o_ini=mini*o[0][-1] o_limit=o_ini*burn_limit si=m1p65_last.get(model_list,'Si-28') si_ini=mini*si[0][-1] si_limit=si_ini*burn_limit print '#############################3' print h_ini,he_ini rho=[] T=[] he_depl_idx=-1 h_depl_idx=-1 c_depl_idx=-1 ne_depl_idx=-1 o_depl_idx=-1 si_depl_idx=-1 ''' rho=[] T=[] for k in range(len(model_list)): rho_center=rho1[k][0] T_center=T1[k][0] rho.append(rho_center) T.append(T_center) ''' mass11=np.array([0]+list(mass[k])) delta_mass=mass11[1:]-mass11[:-1] #for the low-mass + massive AGB if (sum(np.array(he[k])*np.array(delta_mass))<he_limit) and (he_depl_idx==-1): he_depl_idx=k if (sum(np.array(h[k])*np.array(delta_mass))<h_limit) and (h_depl_idx==-1): h_depl_idx=k #for the SAGB + massive if mini>5: if (sum(np.array(c[k])*np.array(delta_mass))<c_limit) and (c_depl_idx==-1): c_depl_idx=k #for the massive stars if mini>10: if (sum(np.array(ne[k])*np.array(delta_mass))<ne_limit) and (ne_depl_idx==-1): ne_depl_idx=k if (sum(np.array(o[k])*np.array(delta_mass))<o_limit) and (o_depl_idx==-1): o_depl_idx=k if (sum(np.array(si[k])*np.array(delta_mass))<si_limit) and (si_depl_idx==-1): si_depl_idx=k ''' T=np.log10(np.array(T)*T_unit) #T_degeneracy=np.log10(np.array(rho)**(2./3.)) rho=np.log10(np.array(rho)) figure(1) #plot(logTeff,logL,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) #pl.plot(rho,T,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) label=str(mini)+'$M_{\odot}$, Z='+str(zini) plt.plot(rho,T,label=label,linestyle=linestyle[i],marker=marker[i],markevery=markevery) ''' #plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') print burn_info print burn_cycle burn_cycle=np.array(burn_cycle)/100. print 'changed cycles:', burn_cycle print len(rho) #For different burning stages: if 'H_start' in burn_info: plt.plot(rho[burn_cycle[0]],T[burn_cycle[0]],marker='o',color='b') if 'He_start' in burn_info: plt.plot(rho[burn_cycle[1]],T[burn_cycle[1]],marker='o',color='r') if 'C_start' in burn_info: plt.plot(rho[burn_cycle[2]],T[burn_cycle[2]],marker='o',color='g') if 'Ne_start' in burn_info: plt.plot(rho[burn_cycle[3]],T[burn_cycle[3]],marker='D',color='b') if 'O_start' in burn_info: plt.plot(rho[burn_cycle[4]],T[burn_cycle[4]],marker='D',color='r') if 'Si_start' in burn_info: plt.plot(rho[burn_cycle[5]],T[burn_cycle[5]],marker='D',color='g') ''' #ax = plt.gca() #ax.invert_xaxis() plt.rcParams.update({'font.size': 16}) plt.rc('xtick', labelsize=16) plt.rc('ytick', labelsize=16) legend(loc=4) plt.xlabel('log $\\rho_{\\rm c}$',fontsize=18) plt.ylabel('log $T_{\\rm c}$',fontsize=18) ''' #plt.gca().invert_xaxis() figure(0) #pl.plot(rho,T,marker=symbs[i],label=self.run_label[i],linestyle=linestyle[i],markevery=markevery) plt.plot(rho,T,label=self.extra_label[i],linestyle=linestyle[i]) plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') #For different burning stages: if not h_depl_idx ==-1: plt.plot(rho[h_depl_idx],T[h_depl_idx],marker='o',color='b') if not he_depl_idx ==-1: plt.plot(rho[he_depl_idx],T[he_depl_idx],marker='o',color='r') if not c_depl_idx ==-1: plt.plot(rho[c_depl_idx],T[c_depl_idx],marker='o',color='g') if not ne_depl_idx ==-1: plt.plot(rho[ne_depl_idx],T[ne_depl_idx],marker='D',color='b') if not o_depl_idx ==-1: plt.plot(rho[o_depl_idx],T[o_depl_idx],marker='D',color='r') if not si_depl_idx ==-1: plt.plot(rho[si_depl_idx],T[si_depl_idx],marker='D',color='g') #ax = plt.gca() #ax.invert_xaxis() plt.rcParams.update({'font.size': 16}) plt.rc('xtick', labelsize=16) plt.rc('ytick', labelsize=16) legend(loc=4) plt.xlabel('log $\\rho_{\\rm c}$',fontsize=18) plt.ylabel('log $T_{\\rm c}$',fontsize=18) #plt.gca().invert_xaxis() ''' i+=1 if deg_line==True: rho=np.arange(0,9,0.01) T_degeneracy=2./3. *rho +np.log10(1.207e5 * 1.8/(2.**(5./3.))) #T_degeneracy=np.log10( (10**np.array(rho))**(2./3.)) plt.plot(rho,T_degeneracy,color='b',linestyle='-',label='$P_e = P_{e,deg}$') plt.legend(loc=2)
Plots HRDs end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs
def parse_duration_with_start(start, duration): """ Attepmt to parse an ISO8601 formatted duration based on a start datetime. Accepts a ``duration`` and a start ``datetime``. ``duration`` must be an ISO8601 formatted string. Returns a ``datetime.timedelta`` object. """ elements = _parse_duration_string(_clean(duration)) year, month = _year_month_delta_from_elements(elements) end = start.replace( year=start.year + year, month=start.month + month ) del elements['years'] del elements['months'] end += _timedelta_from_elements(elements) return start, end - start
Attepmt to parse an ISO8601 formatted duration based on a start datetime. Accepts a ``duration`` and a start ``datetime``. ``duration`` must be an ISO8601 formatted string. Returns a ``datetime.timedelta`` object.
def collapse_witnesses(self): """Groups together witnesses for the same n-gram and work that has the same count, and outputs a single row for each group. This output replaces the siglum field with a sigla field that provides a comma separated list of the witness sigla. Due to this, it is not necessarily possible to run other Results methods on results that have had their witnesses collapsed. """ # In order to allow for additional columns to be present in # the input data (such as label count), copy the siglum # information into a new final column, then put the sigla # information into the siglum field and finally rename it. # # This means that in merge_sigla below, the column names are # reversed from what would be expected. if self._matches.empty: self._matches.rename(columns={constants.SIGLUM_FIELDNAME: constants.SIGLA_FIELDNAME}, inplace=True) return self._matches.loc[:, constants.SIGLA_FIELDNAME] = \ self._matches[constants.SIGLUM_FIELDNAME] # This code makes the not unwarranted assumption that the same # n-gram means the same size and that the same work means the # same label. grouped = self._matches.groupby( [constants.WORK_FIELDNAME, constants.NGRAM_FIELDNAME, constants.COUNT_FIELDNAME], sort=False) def merge_sigla(df): # Take the first result row; only the siglum should differ # between them, and there may only be one row. merged = df[0:1] sigla = list(df[constants.SIGLA_FIELDNAME]) sigla.sort() merged[constants.SIGLUM_FIELDNAME] = ', '.join(sigla) return merged self._matches = grouped.apply(merge_sigla) del self._matches[constants.SIGLA_FIELDNAME] self._matches.rename(columns={constants.SIGLUM_FIELDNAME: constants.SIGLA_FIELDNAME}, inplace=True)
Groups together witnesses for the same n-gram and work that has the same count, and outputs a single row for each group. This output replaces the siglum field with a sigla field that provides a comma separated list of the witness sigla. Due to this, it is not necessarily possible to run other Results methods on results that have had their witnesses collapsed.
def fetch_suvi_l1b(self, product, correct=True, median_kernel=5): """ Given a product keyword, downloads the SUVI l1b image into the current directory. NOTE: the suvi_l1b_url must be properly set for the Fetcher object :param product: the keyword for the product, e.g. suvi-l1b-fe094 :param correct: remove nans and negatives :return: tuple of product name, fits header, and data object the header and data object will be None if the request failed """ if self.date < datetime(2018, 5, 23) and not (self.date >= datetime(2017, 9, 6) \ and self.date <= datetime(2017, 9, 10, 23, 59)): print("SUVI data is only available after 2018-5-23") return product, None, None url = self.suvi_base_url + product + "/{}/{:02d}/{:02d}".format(self.date.year, self.date.month, self.date.day) if self.verbose: print("Requesting from {}".format(url)) try: req = urllib.request.Request(url) with urllib.request.urlopen(req) as response: page = response.read() except (URLError, HTTPError): msg = "The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser." raise RuntimeError(msg.format(url)) soup = BeautifulSoup(page, 'html.parser') links = [link['href'] for link in soup.find_all('a', href=True)] links = [link for link in links if "SUVI" in link] meta = [self.parse_filename_meta(fn) for fn in links if ".fits" in fn] links = sorted(meta, key=lambda m: np.abs((m[2] - self.date).total_seconds()))[:10] links = [fn for fn, _, _, _, _ in links] i = 0 def download_and_check(i): try: urllib.request.urlretrieve(url + "/" + links[i], "{}.fits".format(product)) except (URLError, HTTPError): msg = "THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct." raise RuntimeError(msg.format(url + "/" + links[i])) with fits.open("{}.fits".format(product)) as hdu: head = hdu[0].header return head['exptime'] > 0.5 while not download_and_check(i): i += 1 with fits.open("{}.fits".format(product)) as hdu: head = hdu[0].header data = hdu[0].data os.remove("{}.fits".format(product)) if correct: data[np.isnan(data)] = 0 data[data < 0] = 0 if median_kernel: data = medfilt(data, median_kernel) data, head = self.align_solar_fov(head, data, 2.5, 2.0, rotate=True, scale=False) if self.verbose: print(product, " is using ", head['date-obs']) return product, head, data
Given a product keyword, downloads the SUVI l1b image into the current directory. NOTE: the suvi_l1b_url must be properly set for the Fetcher object :param product: the keyword for the product, e.g. suvi-l1b-fe094 :param correct: remove nans and negatives :return: tuple of product name, fits header, and data object the header and data object will be None if the request failed
def stream(self, muted=values.unset, hold=values.unset, coaching=values.unset, limit=None, page_size=None): """ Streams ParticipantInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param bool muted: Whether to return only participants that are muted :param bool hold: Whether to return only participants that are on hold :param bool coaching: Whether to return only participants who are coaching another call :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.conference.participant.ParticipantInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(muted=muted, hold=hold, coaching=coaching, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
Streams ParticipantInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param bool muted: Whether to return only participants that are muted :param bool hold: Whether to return only participants that are on hold :param bool coaching: Whether to return only participants who are coaching another call :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.conference.participant.ParticipantInstance]
def retrieve(self, id) : """ Retrieve a single source Returns a single source available to the user by the provided id If a source with the supplied unique identifier does not exist it returns an error :calls: ``get /sources/{id}`` :param int id: Unique identifier of a Source. :return: Dictionary that support attriubte-style access and represent Source resource. :rtype: dict """ _, _, source = self.http_client.get("/sources/{id}".format(id=id)) return source
Retrieve a single source Returns a single source available to the user by the provided id If a source with the supplied unique identifier does not exist it returns an error :calls: ``get /sources/{id}`` :param int id: Unique identifier of a Source. :return: Dictionary that support attriubte-style access and represent Source resource. :rtype: dict
def load_mo(self, state, page_idx): """ Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object """ try: key = next(self._storage.irange(maximum=page_idx, reverse=True)) except StopIteration: return None else: return self._storage[key]
Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object
def _run_xmlsec(self, com_list, extra_args): """ Common code to invoke xmlsec and parse the output. :param com_list: Key-value parameter list for xmlsec :param extra_args: Positional parameters to be appended after all key-value parameters :result: Whatever xmlsec wrote to an --output temporary file """ with NamedTemporaryFile(suffix='.xml', delete=self._xmlsec_delete_tmpfiles) as ntf: com_list.extend(['--output', ntf.name]) com_list += extra_args logger.debug('xmlsec command: %s', ' '.join(com_list)) pof = Popen(com_list, stderr=PIPE, stdout=PIPE) p_out, p_err = pof.communicate() p_out = p_out.decode() p_err = p_err.decode() if pof.returncode != 0: errmsg = "returncode={code}\nerror={err}\noutput={out}".format( code=pof.returncode, err=p_err, out=p_out ) logger.error(errmsg) raise XmlsecError(errmsg) ntf.seek(0) return p_out, p_err, ntf.read()
Common code to invoke xmlsec and parse the output. :param com_list: Key-value parameter list for xmlsec :param extra_args: Positional parameters to be appended after all key-value parameters :result: Whatever xmlsec wrote to an --output temporary file
def showinfo(self): ''' 總覽顯示 ''' print 'money:',self.money print 'store:',self.store print 'avgprice:',self.avgprice
總覽顯示
def reading(self): """Message reading """ sys.stdout.write("{0}Reading package lists...{1} ".format( self.meta.color["GREY"], self.meta.color["ENDC"])) sys.stdout.flush()
Message reading
def remove_kv_store(self, key): """Remove a key-value store entry. :param key: string """ data = { 'operation': 'DELETE', 'key': key } return self.post(self.make_url("/useragent-kv"), data=to_json(data), headers=self.default_headers).text
Remove a key-value store entry. :param key: string
def validateInterfaceName(n): """ Verifies that the supplied name is a valid DBus Interface name. Throws an L{error.MarshallingError} if the format is invalid @type n: C{string} @param n: A DBus interface name """ try: if '.' not in n: raise Exception('At least two components required') if '..' in n: raise Exception('".." not allowed in interface names') if len(n) > 255: raise Exception('Name exceeds maximum length of 255') if n[0] == '.': raise Exception('Names may not begin with a "."') if n[0].isdigit(): raise Exception('Names may not begin with a digit') if if_re.search(n): raise Exception( 'Names contains a character outside the set [A-Za-z0-9_.]') if dot_digit_re.search(n): raise Exception( 'No components of an interface name may begin with a digit') except Exception as e: raise MarshallingError('Invalid interface name "%s": %s' % (n, str(e)))
Verifies that the supplied name is a valid DBus Interface name. Throws an L{error.MarshallingError} if the format is invalid @type n: C{string} @param n: A DBus interface name
def dollars_to_math(source): r""" Replace dollar signs with backticks. More precisely, do a regular expression search. Replace a plain dollar sign ($) by a backtick (`). Replace an escaped dollar sign (\$) by a dollar sign ($). Don't change a dollar sign preceded or followed by a backtick (`$ or $`), because of strings like "``$HOME``". Don't make any changes on lines starting with spaces, because those are indented and hence part of a block of code or examples. This also doesn't replaces dollar signs enclosed in curly braces, to avoid nested math environments, such as :: $f(n) = 0 \text{ if $n$ is prime}$ Thus the above line would get changed to `f(n) = 0 \text{ if $n$ is prime}` """ s = "\n".join(source) if s.find("$") == -1: return # This searches for "$blah$" inside a pair of curly braces -- # don't change these, since they're probably coming from a nested # math environment. So for each match, we replace it with a temporary # string, and later on we substitute the original back. global _data _data = {} def repl(matchobj): global _data s = matchobj.group(0) t = "___XXX_REPL_%d___" % len(_data) _data[t] = s return t s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) # matches $...$ dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$") # regular expression for \$ slashdollar = re.compile(r"\\\$") s = dollars.sub(r":math:`\1`", s) s = slashdollar.sub(r"$", s) # change the original {...} things in: for r in _data: s = s.replace(r, _data[r]) # now save results in "source" source[:] = [s]
r""" Replace dollar signs with backticks. More precisely, do a regular expression search. Replace a plain dollar sign ($) by a backtick (`). Replace an escaped dollar sign (\$) by a dollar sign ($). Don't change a dollar sign preceded or followed by a backtick (`$ or $`), because of strings like "``$HOME``". Don't make any changes on lines starting with spaces, because those are indented and hence part of a block of code or examples. This also doesn't replaces dollar signs enclosed in curly braces, to avoid nested math environments, such as :: $f(n) = 0 \text{ if $n$ is prime}$ Thus the above line would get changed to `f(n) = 0 \text{ if $n$ is prime}`
def geoplot(df, filter=None, n=0, p=0, sort=None, x=None, y=None, figsize=(25, 10), inline=False, by=None, cmap='YlGn', **kwargs): """ Generates a geographical data nullity heatmap, which shows the distribution of missing data across geographic regions. The precise output depends on the inputs provided. If no geographical context is provided, a quadtree is computed and nullities are rendered as abstract geographic squares. If geographical context is provided in the form of a column of geographies (region, borough. ZIP code, etc.) in the `DataFrame`, convex hulls are computed for each of the point groups and the heatmap is generated within them. :param df: The DataFrame whose completeness is being geoplotted. :param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). :param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None. :param n: The cap on the number of columns to include in the filtered DataFrame. :param p: The cap on the percentage fill of the columns in the filtered DataFrame. :param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`. :param x: The variable in the dataset containing the x-coordinates of the dataset. :param y: The variable in the dataset containing the y-coordinates of the dataset. :param by: If specified, plot in convex hull mode, using the given column to cluster points in the same area. If not specified, plot in quadtree mode. :param cmap: The colormap to display the data with. Defaults to `YlGn`. :param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will return its figure. :param kwargs: Additional keyword arguments are passed to the underlying `geoplot` function. :return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing. """ import geoplot as gplt import geopandas as gpd from shapely.geometry import Point df = nullity_filter(df, filter=filter, n=n, p=p) df = nullity_sort(df, sort=sort) nullity = df.notnull().sum(axis='columns') / df.shape[1] if x and y: gdf = gpd.GeoDataFrame(nullity, columns=['nullity'], geometry=df.apply(lambda srs: Point(srs[x], srs[y]), axis='columns')) else: raise ValueError("The 'x' and 'y' parameters must be specified.") if by: if df[by].isnull().any(): warnings.warn('The "{0}" column included null values. The offending records were dropped'.format(by)) df = df.dropna(subset=[by]) gdf = gdf.loc[df.index] vc = df[by].value_counts() if (vc < 3).any(): warnings.warn('Grouping by "{0}" included clusters with fewer than three points, which cannot be made ' 'polygonal. The offending records were dropped.'.format(by)) where = df[by].isin((df[by].value_counts() > 2).where(lambda b: b).dropna().index.values) gdf = gdf.loc[where] gdf[by] = df[by] gplt.aggplot(gdf, figsize=figsize, hue='nullity', agg=np.average, cmap=cmap, by=by, edgecolor='None', **kwargs) ax = plt.gca() if inline: plt.show() else: return ax
Generates a geographical data nullity heatmap, which shows the distribution of missing data across geographic regions. The precise output depends on the inputs provided. If no geographical context is provided, a quadtree is computed and nullities are rendered as abstract geographic squares. If geographical context is provided in the form of a column of geographies (region, borough. ZIP code, etc.) in the `DataFrame`, convex hulls are computed for each of the point groups and the heatmap is generated within them. :param df: The DataFrame whose completeness is being geoplotted. :param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). :param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None. :param n: The cap on the number of columns to include in the filtered DataFrame. :param p: The cap on the percentage fill of the columns in the filtered DataFrame. :param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`. :param x: The variable in the dataset containing the x-coordinates of the dataset. :param y: The variable in the dataset containing the y-coordinates of the dataset. :param by: If specified, plot in convex hull mode, using the given column to cluster points in the same area. If not specified, plot in quadtree mode. :param cmap: The colormap to display the data with. Defaults to `YlGn`. :param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will return its figure. :param kwargs: Additional keyword arguments are passed to the underlying `geoplot` function. :return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
def x_vs_y(collection_x, collection_y, title_x=None, title_y=None, width=43, filter_none=False): """ Print a histogram with bins for x to the left and bins of y to the right """ data = merge_x_y(collection_x, collection_y, filter_none) max_value = get_max_x_y(data) bins_total = int(float(max_value) / width) + 1 if title_x is not None and title_y is not None: headers = [title_x, title_y] else: headers = None result = [] # Sort keys for item in sorted(data): #result.append([item, str(data[item]['x']) + '|' + str(data[item]['y'])]) bins_x = int((float(data[item]['x']) / float(max_value)) * bins_total) + 1 bins_y = int((float(data[item]['y']) / float(max_value)) * bins_total) + 1 print(bins_x) print(bins_y) #result.append([item, str(data[item]['x']), str(data[item]['y'])]) result.append([item, '*' * bins_x, '*' * bins_y]) result = to_smart_columns(result, headers=headers) return result
Print a histogram with bins for x to the left and bins of y to the right
def get_addressbooks(self, order=None, append_remaining=True): """returns list of all defined :class:`AddressBook` objects""" order = order or [] abooks = [] for a in order: if a: if a.abook: abooks.append(a.abook) if append_remaining: for a in self._accounts: if a.abook and a.abook not in abooks: abooks.append(a.abook) return abooks
returns list of all defined :class:`AddressBook` objects
def consult_response_hook(self, item_session: ItemSession) -> Actions: '''Return scripting action when a response ends.''' try: return self.hook_dispatcher.call( PluginFunctions.handle_response, item_session ) except HookDisconnected: return Actions.NORMAL
Return scripting action when a response ends.
def get_telnet_template(auth, url, template_name=None): """ Takes no input, or template_name as input to issue RESTUL call to HP IMC :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :return list object containing one or more dictionaries where each dictionary represents one telnet template :rtype list """ f_url = url + "/imcrs/plat/res/telnet?start=0&size=10000&desc=false&total=false" response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: telnet_templates = (json.loads(response.text)) template = None if type(telnet_templates['telnetParamTemplate']) is dict: my_templates = [telnet_templates['telnetParamTemplate']] telnet_templates['telnetParamTemplate'] = my_templates if template_name is None: return telnet_templates['telnetParamTemplate'] elif template_name is not None: for telnet_template in telnet_templates['telnetParamTemplate']: if telnet_template['name'] == template_name: template = [telnet_template] print (type(template)) if template == None: return 404 else: return template except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_telnet_templates: An Error has occured"
Takes no input, or template_name as input to issue RESTUL call to HP IMC :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :return list object containing one or more dictionaries where each dictionary represents one telnet template :rtype list
def get_assessment_basic_authoring_session_for_bank(self, bank_id, proxy): """Gets the ``OsidSession`` associated with the assessment authoring service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of a bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentBasicAuthoringSession) - an ``AssessmentBasicAuthoringSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_basic_authoring()`` or ``supports_visibe_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_basic_authoring()`` and ``supports_visibe_federation()`` is ``true``.* """ if not self.supports_assessment_basic_authoring(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentBasicAuthoringSession(bank_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the assessment authoring service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of a bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentBasicAuthoringSession) - an ``AssessmentBasicAuthoringSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_basic_authoring()`` or ``supports_visibe_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_basic_authoring()`` and ``supports_visibe_federation()`` is ``true``.*
def get_clan_war(self, tag: crtag, **params: keys): """Get inforamtion about a clan's current clan war Parameters ---------- *tag: str A valid clan tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.CLAN + '/' + tag + '/war' return self._get_model(url, **params)
Get inforamtion about a clan's current clan war Parameters ---------- *tag: str A valid clan tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications.
def address(self): """ Should be called to get client direct address @method address @returns {String} client direct address """ if self.isDirect(): base36 = self._iban[4:] asInt = int(base36, 36) return to_checksum_address(pad_left_hex(baseN(asInt, 16), 20)) return ""
Should be called to get client direct address @method address @returns {String} client direct address
def _raise_on_error(data: Union[str, dict]) -> None: """Raise the appropriate exception on error.""" if isinstance(data, str): raise_error(data) elif 'status' in data and data['status'] != 'success': raise_error(data['data']['message'])
Raise the appropriate exception on error.
def search(self, query): """ Search the Skype Directory for a user. Args: query (str): name to search for Returns: SkypeUser list: collection of possible results """ results = self.skype.conn("GET", SkypeConnection.API_DIRECTORY, auth=SkypeConnection.Auth.SkypeToken, params={"searchstring": query, "requestId": "0"}).json().get("results", []) return [SkypeUser.fromRaw(self.skype, json.get("nodeProfileData", {})) for json in results]
Search the Skype Directory for a user. Args: query (str): name to search for Returns: SkypeUser list: collection of possible results
def cumprod(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative product across the DataFrame. Args: axis (int): The axis to take product on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative product of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 self._validate_dtypes(numeric_only=True) return self.__constructor__( query_compiler=self._query_compiler.cumprod( axis=axis, skipna=skipna, **kwargs ) )
Perform a cumulative product across the DataFrame. Args: axis (int): The axis to take product on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative product of the DataFrame.
def get_job_logs(id): """Get the crawl logs from the job.""" crawler_job = models.CrawlerJob.query.filter_by(id=id).one_or_none() if crawler_job is None: click.secho( ( "CrawlJob %s was not found, maybe it's not a crawl job?" % id ), fg='yellow', ) sys.exit(1) if crawler_job.logs is None: click.secho( ( "CrawlJob %s has no log, it might be that it has not run " "yet, you can try again later." % id ), fg='yellow', ) sys.exit(1) _show_file( file_path=crawler_job.logs, header_name='Log', )
Get the crawl logs from the job.
def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False): '''Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character). relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).''' # The idea of replication combined with ECC was a bit inspired by this paper: Friedman, Roy, Yoav Kantor, and Amir Kantor. "Combining Erasure-Code and Replication Redundancy Schemes for Increased Storage and Repair Efficiency in P2P Storage Systems.", 2013, Technion, Computer Science Department, Technical Report CS-2013-03 # But it is a very well known concept in redundancy engineering, usually called triple-modular redundancy (which is here extended to n-modular since we can supply any number of files we want, not just three). # Preference in case of ambiguity is always given to the file of the first folder. fileshandles = [] for filepath in fileslist: if filepath: # Already a file handle? Just store it in the fileshandles list if hasattr(filepath, 'read'): fileshandles.append(filepath) # Else it's a string filepath, open the file else: fileshandles.append(open(filepath, 'rb')) # Create and open output (merged) file, except if we were already given a file handle if hasattr(outpath, 'write'): outfile = outpath else: outpathfull = os.path.join(outpath, relfilepath) pardir = os.path.dirname(outpathfull) if not os.path.exists(pardir): os.makedirs(pardir) outfile = open(outpathfull, 'wb') # Cannot vote if there's not at least 3 files! # In this case, just copy the file from the first folder, verbatim if len(fileshandles) < 3: # If there's at least one input file, then copy it verbatim to the output folder if fileshandles: create_dir_if_not_exist(os.path.dirname(outpathfull)) buf = 1 while (buf): buf = fileshandles[0].read() outfile.write(buf) outfile.flush() return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles))) errors = [] entries = [1]*len(fileshandles) # init with 0 to start the while loop while (entries.count('') < len(fileshandles)): final_entry = [] # Read a block from all input files into memory for i in xrange(len(fileshandles)): entries[i] = fileshandles[i].read(blocksize) # End of file for all files, we exit if entries.count('') == len(fileshandles): break # Else if there's only one file, just copy the file's content over elif len(entries) == 1: final_entry = entries[0] # Else, do the majority vote else: # Walk along each column (imagine the strings being rows in a matrix, then we pick one column at each iteration = all characters at position i of each string), so that we can compare these characters easily for i in xrange(max(len(entry) for entry in entries)): hist = {} # kind of histogram, we just memorize how many times a character is presented at the position i in each string TODO: use collections.Counter instead of dict()? # Extract the character at position i of each string and compute the histogram at the same time (number of time this character appear among all strings at this position i) for entry in entries: # Check if we are not beyond the current entry's length if i < len(entry): # TODO: check this line, this should allow the vote to continue even if some files are shorter than others # Extract the character and use it to contribute to the histogram # TODO: add warning message when one file is not of the same size as the others key = str(ord(entry[i])) # convert to the ascii value to avoid any funky problem with encoding in dict keys hist[key] = hist.get(key, 0) + 1 # increment histogram for this value. If it does not exists, use 0. (essentially equivalent to hist[key] += 1 but with exception management if key did not already exists) # If there's only one character (it's the same accross all strings at position i), then it's an exact match, we just save the character and we can skip to the next iteration if len(hist) == 1: final_entry.append(chr(int(hist.iterkeys().next()))) continue # Else, the character is different among different entries, we will pick the major one (mode) elif len(hist) > 1: # Sort the dict by value (and reverse because we want the most frequent first) skeys = sorted(hist, key=hist.get, reverse=True) # Ambiguity! If each entries present a different character (thus the major has only an occurrence of 1), then it's too ambiguous and we just set a null byte to signal that if hist[skeys[0]] == 1: if default_char_null: if default_char_null is True: final_entry.append("\x00") else: final_entry.append(default_char_null) else: # Use the entry of the first file that is still open first_char = '' for entry in entries: # Found the first file that has a character at this position: store it and break loop if i < len(entry): first_char = entry[i] break # Use this character in spite of ambiguity final_entry.append(first_char) errors.append(outfile.tell() + i) # Print an error indicating the characters that failed # Else if there is a tie (at least two characters appear with the same frequency), then we just pick one of them elif hist[skeys[0]] == hist[skeys[1]]: final_entry.append(chr(int(skeys[0]))) # TODO: find a way to account for both characters. Maybe return two different strings that will both have to be tested? (eg: maybe one has a tampered hash, both will be tested and if one correction pass the hash then it's ok we found the correct one) # Else we have a clear major character that appear in more entries than any other character, then we keep this one else: final_entry.append(chr(int(skeys[0]))) # alternative one-liner: max(hist.iteritems(), key=operator.itemgetter(1))[0] continue # Concatenate to a string (this is faster than using a string from the start and concatenating at each iteration because Python strings are immutable so Python has to copy over the whole string, it's in O(n^2) final_entry = ''.join(final_entry) # Commit to output file outfile.write(final_entry) outfile.flush() # Errors signaling if errors: error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors]) # Signal to user that this file has unrecoverable corruptions (he may try to fix the bits manually or with his own script) return (1, error_msg) # return an error # Close all input files for fh in fileshandles: fh.close() # Close output file if outfile != outpath: # close only if we were not given a file handle in the first place outfile.flush() outfile.close() return (0, None)
Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character). relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).
def get_custom_layouts_active_label(self): """ Provides the default **Custom_Layouts_active_label** widget. :return: Layout active label. :rtype: Active_QLabel """ layout_active_label = Active_QLabel(self, QPixmap(umbra.ui.common.get_resource_path(UiConstants.custom_layouts_icon)), QPixmap( umbra.ui.common.get_resource_path( UiConstants.custom_layouts_hover_icon)), QPixmap(umbra.ui.common.get_resource_path( UiConstants.custom_layouts_active_icon))) layout_active_label.setObjectName("Custom_Layouts_active_label") self.__custom_layouts_menu = QMenu("Layouts", layout_active_label) for layout in self.__user_layouts: self.__container.layouts_manager.register_layout(layout.identity, layout) self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action( "Actions|Umbra|ToolBar|Layouts|Restore layout {0}".format(layout.name), shortcut=layout.shortcut, slot=functools.partial(self.__container.layouts_manager.restore_layout, layout.identity))) self.__custom_layouts_menu.addSeparator() for layout in self.__user_layouts: self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action( "Actions|Umbra|ToolBar|Layouts|Store layout {0}".format(layout.name), shortcut=Qt.CTRL + layout.shortcut, slot=functools.partial(self.__container.layouts_manager.store_layout, layout.identity))) self.__custom_layouts_menu.addSeparator() self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action( "Actions|Umbra|ToolBar|Layouts|Toggle FullScreen", shortcut=Qt.ControlModifier + Qt.SHIFT + Qt.Key_F, slot=self.__container.toggle_full_screen)) layout_active_label.set_menu(self.__custom_layouts_menu) return layout_active_label
Provides the default **Custom_Layouts_active_label** widget. :return: Layout active label. :rtype: Active_QLabel
def get_rpms(self): """ Build a list of installed RPMs in the format required for the metadata. """ tags = [ 'NAME', 'VERSION', 'RELEASE', 'ARCH', 'EPOCH', 'SIGMD5', 'SIGPGP:pgpsig', 'SIGGPG:pgpsig', ] cmd = "/bin/rpm " + rpm_qf_args(tags) try: # py3 (status, output) = subprocess.getstatusoutput(cmd) except AttributeError: # py2 with open('/dev/null', 'r+') as devnull: p = subprocess.Popen(cmd, shell=True, stdin=devnull, stdout=subprocess.PIPE, stderr=devnull) (stdout, stderr) = p.communicate() status = p.wait() output = stdout.decode() if status != 0: self.log.debug("%s: stderr output: %s", cmd, stderr) raise RuntimeError("%s: exit code %s" % (cmd, status)) return parse_rpm_output(output.splitlines(), tags)
Build a list of installed RPMs in the format required for the metadata.
def focus(self): """ Unsets the focused element """ focus_msg = FocusSignalMsg(None, self._focus) self._focus = None self.focus_signal.emit(focus_msg)
Unsets the focused element
def resample(y, orig_sr, target_sr, res_type='kaiser_best', fix=True, scale=False, **kwargs): """Resample a time series from orig_sr to target_sr Parameters ---------- y : np.ndarray [shape=(n,) or shape=(2, n)] audio time series. Can be mono or stereo. orig_sr : number > 0 [scalar] original sampling rate of `y` target_sr : number > 0 [scalar] target sampling rate res_type : str resample type (see note) .. note:: By default, this uses `resampy`'s high-quality mode ('kaiser_best'). To use a faster method, set `res_type='kaiser_fast'`. To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`. To use `scipy.signal.resample_poly`, set `res_type='polyphase'`. .. note:: When using `res_type='polyphase'`, only integer sampling rates are supported. fix : bool adjust the length of the resampled signal to be of size exactly `ceil(target_sr * len(y) / orig_sr)` scale : bool Scale the resampled signal so that `y` and `y_hat` have approximately equal total energy. kwargs : additional keyword arguments If `fix==True`, additional keyword arguments to pass to `librosa.util.fix_length`. Returns ------- y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)] `y` resampled from `orig_sr` to `target_sr` Raises ------ ParameterError If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both integer-valued. See Also -------- librosa.util.fix_length scipy.signal.resample resampy.resample Notes ----- This function caches at level 20. Examples -------- Downsample from 22 KHz to 8 KHz >>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050) >>> y_8k = librosa.resample(y, sr, 8000) >>> y.shape, y_8k.shape ((1355168,), (491671,)) """ # First, validate the audio buffer util.valid_audio(y, mono=False) if orig_sr == target_sr: return y ratio = float(target_sr) / orig_sr n_samples = int(np.ceil(y.shape[-1] * ratio)) if res_type in ('scipy', 'fft'): y_hat = scipy.signal.resample(y, n_samples, axis=-1) elif res_type == 'polyphase': if int(orig_sr) != orig_sr or int(target_sr) != target_sr: raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.') # For polyphase resampling, we need up- and down-sampling ratios # We can get those from the greatest common divisor of the rates # as long as the rates are integrable orig_sr = int(orig_sr) target_sr = int(target_sr) gcd = np.gcd(orig_sr, target_sr) y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1) else: y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1) if fix: y_hat = util.fix_length(y_hat, n_samples, **kwargs) if scale: y_hat /= np.sqrt(ratio) return np.ascontiguousarray(y_hat, dtype=y.dtype)
Resample a time series from orig_sr to target_sr Parameters ---------- y : np.ndarray [shape=(n,) or shape=(2, n)] audio time series. Can be mono or stereo. orig_sr : number > 0 [scalar] original sampling rate of `y` target_sr : number > 0 [scalar] target sampling rate res_type : str resample type (see note) .. note:: By default, this uses `resampy`'s high-quality mode ('kaiser_best'). To use a faster method, set `res_type='kaiser_fast'`. To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`. To use `scipy.signal.resample_poly`, set `res_type='polyphase'`. .. note:: When using `res_type='polyphase'`, only integer sampling rates are supported. fix : bool adjust the length of the resampled signal to be of size exactly `ceil(target_sr * len(y) / orig_sr)` scale : bool Scale the resampled signal so that `y` and `y_hat` have approximately equal total energy. kwargs : additional keyword arguments If `fix==True`, additional keyword arguments to pass to `librosa.util.fix_length`. Returns ------- y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)] `y` resampled from `orig_sr` to `target_sr` Raises ------ ParameterError If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both integer-valued. See Also -------- librosa.util.fix_length scipy.signal.resample resampy.resample Notes ----- This function caches at level 20. Examples -------- Downsample from 22 KHz to 8 KHz >>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050) >>> y_8k = librosa.resample(y, sr, 8000) >>> y.shape, y_8k.shape ((1355168,), (491671,))
def keep_max_priority_effects(effects): """ Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter """ priority_values = map(effect_priority, effects) max_priority = max(priority_values) return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter
def cache_subdirectory( reference_name=None, annotation_name=None, annotation_version=None): """ Which cache subdirectory to use for a given annotation database over a particular reference. All arguments can be omitted to just get the base subdirectory for all pyensembl cached datasets. """ if reference_name is None: reference_name = "" if annotation_name is None: annotation_name = "" if annotation_version is None: annotation_version = "" reference_dir = join(CACHE_BASE_SUBDIR, reference_name) annotation_dir = "%s%s" % (annotation_name, annotation_version) return join(reference_dir, annotation_dir)
Which cache subdirectory to use for a given annotation database over a particular reference. All arguments can be omitted to just get the base subdirectory for all pyensembl cached datasets.
def get_export_files(cursor, id, version, types, exports_dirs, read_file=True): """Retrieve files associated with document.""" request = get_current_request() type_info = dict(request.registry.settings['_type_info']) metadata = get_content_metadata(id, version, cursor) legacy_id = metadata['legacy_id'] legacy_version = metadata['legacy_version'] reachable_dirs = [dir for dir in exports_dirs if safe_stat(dir)] # 1 result per type, in the same order as the given types results = [] for type in list(types): if type not in type_info: raise ExportError("invalid type '{}' requested.".format(type)) file_extension = type_info[type]['file_extension'] # skip module PDFs if metadata['mediaType'] == MODULE_MIMETYPE and \ file_extension == 'pdf': continue mimetype = type_info[type]['mimetype'] filename = '{}@{}.{}'.format(id, version, file_extension) legacy_filenames = [ '{}-{}.{}'.format(legacy_id, legacy_version, ext) for ext in LEGACY_EXTENSION_MAP[file_extension] ] slugify_title_filename = u'{}-{}.{}'.format(slugify(metadata['title']), version, file_extension) for dir in reachable_dirs: filepath = os.path.join(dir, filename) try: if read_file: with open(filepath, 'r') as file: stats = os.fstat(file.fileno()) contents = file.read() else: stats = os.stat(filepath) contents = None modtime = fromtimestamp(int(stats.st_mtime)) results.append((slugify_title_filename, mimetype, stats.st_size, modtime, 'good', contents)) break except EnvironmentError: pass else: # Let's see if the legacy file's there and make the new link legacy_file_found = False for dir in reachable_dirs: filepath = os.path.join(dir, filename) legacy_filepaths = [os.path.join(dir, fn) for fn in legacy_filenames] for legacy_filepath in legacy_filepaths: try: if read_file: with open(legacy_filepath, 'r') as file: stats = os.fstat(file.fileno()) contents = file.read() else: stats = os.stat(legacy_filepath) contents = None modtime = fromtimestamp(stats.st_mtime) os.link(legacy_filepath, filepath) results.append((slugify_title_filename, mimetype, stats.st_size, modtime, 'good', contents)) legacy_file_found = True break except EnvironmentError: pass if legacy_file_found: break else: filenames = [filename] + legacy_filenames log_formatted_filenames = '\n'.join([' - {}'.format(x) for x in filenames]) logger.error("Could not find a file for '{}' at version '{}' " "with any of the following file names:\n{}" .format(id, version, log_formatted_filenames)) # No file, return "missing" state results.append((slugify_title_filename, mimetype, 0, None, 'missing', None)) return results
Retrieve files associated with document.
def load(self, content): """Parse yaml content.""" # Try parsing the YAML with global tags try: config = yaml.load(content, Loader=self._loader(self._global_tags)) except yaml.YAMLError: raise InvalidConfigError(_("Config is not valid yaml.")) # Try extracting just the tool portion try: config = config[self.tool] except (TypeError, KeyError): return None # If no scopes, just apply global default if not isinstance(config, dict): config = self._apply_default(config, self._global_default) else: # Figure out what scopes exist scoped_keys = set(key for key in self._scopes) # For every scope for key in config: # If scope has custom tags, apply if key in scoped_keys: # local tags, and local default tags, default = self._scopes[key] # Inherit global default if no local default if not default: default = self._global_default config[key] = self._apply_default(config[key], default) self._apply_scope(config[key], tags) # Otherwise just apply global default else: config[key] = self._apply_default(config[key], self._global_default) self._validate(config) return config
Parse yaml content.
def apply_mask(self, mask_img): """First set_mask and the get_masked_data. Parameters ---------- mask_img: nifti-like image, NeuroImage or str 3D mask array: True where a voxel should be used. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- The masked data deepcopied """ self.set_mask(mask_img) return self.get_data(masked=True, smoothed=True, safe_copy=True)
First set_mask and the get_masked_data. Parameters ---------- mask_img: nifti-like image, NeuroImage or str 3D mask array: True where a voxel should be used. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- The masked data deepcopied
def strip(self, chars=None): """ Like str.strip, except it returns the Colr instance. """ return self.__class__( self._str_strip('strip', chars), no_closing=chars and (closing_code in chars), )
Like str.strip, except it returns the Colr instance.
def SMS_me(self, body): """ Quickly send an SMS to yourself. Calls :py:meth:`SMS`. * *stored credential name: PERSONAL_PHONE_NUMBER* :param string body: The content of the SMS message. """ logging.debug('Texting myself') return self.text(self._credentials['PERSONAL_PHONE_NUMBER'], body)
Quickly send an SMS to yourself. Calls :py:meth:`SMS`. * *stored credential name: PERSONAL_PHONE_NUMBER* :param string body: The content of the SMS message.
def _add_timedeltalike_scalar(self, other): """ Add a delta of a timedeltalike return the i8 result view """ if isna(other): # i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds new_values = np.empty(len(self), dtype='i8') new_values[:] = iNaT return new_values inc = delta_to_nanoseconds(other) new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view('i8') new_values = self._maybe_mask_results(new_values) return new_values.view('i8')
Add a delta of a timedeltalike return the i8 result view
def set_U(self, U): """Set background zonal flow""" self.Ubg = np.asarray(U)[np.newaxis,...]
Set background zonal flow
def moving_average(data, xcol, ycol, width): """Compute the moving average of YCOL'th column of each sample point in DATA. In particular, for each element I in DATA, this function extracts up to WIDTH*2+1 elements, consisting of I itself, WIDTH elements before I, and WIDTH elements after I. It then computes the mean of the YCOL'th column of these elements, and it composes a two-element sample consisting of XCOL'th element and the mean. >>> data = [[10,20], [20,30], [30,50], [40,70], [50,5]] ... chart_data.moving_average(data, 0, 1, 1) [(10, 25.0), (20, 33.333333333333336), (30, 50.0), (40, 41.666666666666664), (50, 37.5)] The above value actually represents: [(10, (20+30)/2), (20, (20+30+50)/3), (30, (30+50+70)/3), (40, (50+70+5)/3), (50, (70+5)/2)] """ out = [] try: for i in range(len(data)): n = 0 total = 0 for j in range(i - width, i + width + 1): if j >= 0 and j < len(data): total += data[j][ycol] n += 1 out.append((data[i][xcol], float(total) / n)) except IndexError: raise IndexError("bad data: %s,xcol=%d,ycol=%d,width=%d" % (data, xcol, ycol, width)) return out
Compute the moving average of YCOL'th column of each sample point in DATA. In particular, for each element I in DATA, this function extracts up to WIDTH*2+1 elements, consisting of I itself, WIDTH elements before I, and WIDTH elements after I. It then computes the mean of the YCOL'th column of these elements, and it composes a two-element sample consisting of XCOL'th element and the mean. >>> data = [[10,20], [20,30], [30,50], [40,70], [50,5]] ... chart_data.moving_average(data, 0, 1, 1) [(10, 25.0), (20, 33.333333333333336), (30, 50.0), (40, 41.666666666666664), (50, 37.5)] The above value actually represents: [(10, (20+30)/2), (20, (20+30+50)/3), (30, (30+50+70)/3), (40, (50+70+5)/3), (50, (70+5)/2)]
def scale(self, by): """Scale the points in the Pattern. Parameters ---------- by : float or np.ndarray, shape=(3,) The factor to scale by. If a scalar, scale all directions isotropically. If np.ndarray, scale each direction independently. """ self.points *= np.asarray([by]) self._adjust_ports()
Scale the points in the Pattern. Parameters ---------- by : float or np.ndarray, shape=(3,) The factor to scale by. If a scalar, scale all directions isotropically. If np.ndarray, scale each direction independently.
def is_token_valid(self, token): """ Checks validity of a given token :param token: Access or refresh token """ try: _tinfo = self.handler.info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: return False # Dependent on what state the session is in. session_info = self[_tinfo['sid']] if session_info["oauth_state"] == "authz": if _tinfo['handler'] != self.handler['code']: return False elif session_info["oauth_state"] == "token": if _tinfo['handler'] != self.handler['access_token']: return False return True
Checks validity of a given token :param token: Access or refresh token
def _reset_server_state(self) -> None: """ Clear stored information about the server. """ self.last_helo_response = None self._last_ehlo_response = None self.esmtp_extensions = {} self.supports_esmtp = False self.server_auth_methods = []
Clear stored information about the server.
def _check_timers(self): """ Check for expired timers. If there are any timers that expired, place them in the event queue. """ if self._timer_queue: timer = self._timer_queue[0] if timer['timeout_abs'] < _current_time_millis(): # the timer is expired, remove first element in queue self._timer_queue.pop(0) # put into the event queue self._logger.debug('Timer {} expired for stm {}, adding it to event queue.'.format(timer['id'], timer['stm'].id)) self._add_event(timer['id'], [], {}, timer['stm'], front=True) # not necessary to set next timeout, # complete check timers will be called again else: self._next_timeout = ( timer['timeout_abs'] - _current_time_millis()) / 1000 if self._next_timeout < 0: self._next_timeout = 0 else: self._next_timeout = None
Check for expired timers. If there are any timers that expired, place them in the event queue.
def group_by(fn: Callable[[T], TR]): """ >>> from Redy.Collections import Flow, Traversal >>> x = [1, '1', 1.0] >>> Flow(x)[Traversal.group_by(type)] """ def inner(seq: ActualIterable[T]) -> Dict[TR, List[T]]: ret = defaultdict(list) for each in seq: ret[fn(each)].append(each) return ret return inner
>>> from Redy.Collections import Flow, Traversal >>> x = [1, '1', 1.0] >>> Flow(x)[Traversal.group_by(type)]
def prepare_files(self, finder): """ Prepare process. Create temp directories, download and/or unpack files. """ # make the wheelhouse if self.wheel_download_dir: ensure_dir(self.wheel_download_dir) self._walk_req_to_install( functools.partial(self._prepare_file, finder))
Prepare process. Create temp directories, download and/or unpack files.
def cli_auth(context): """ Authenticates and then outputs the resulting information. See :py:mod:`swiftly.cli.auth` for context usage information. See :py:class:`CLIAuth` for more information. """ with context.io_manager.with_stdout() as fp: with context.client_manager.with_client() as client: info = [] client.auth() if getattr(client, 'auth_cache_path', None): info.append(('Auth Cache', client.auth_cache_path)) if getattr(client, 'auth_url', None): info.append(('Auth URL', client.auth_url)) if getattr(client, 'auth_user', None): info.append(('Auth User', client.auth_user)) if getattr(client, 'auth_key', None): info.append(('Auth Key', client.auth_key)) if getattr(client, 'auth_tenant', None): info.append(('Auth Tenant', client.auth_tenant)) if getattr(client, 'auth_methods', None): info.append(('Auth Methods', client.auth_methods)) if getattr(client, 'storage_path', None): info.append(('Direct Storage Path', client.storage_path)) if getattr(client, 'cdn_path', None): info.append(('Direct CDN Path', client.cdn_path)) if getattr(client, 'local_path', None): info.append(('Local Path', client.local_path)) if getattr(client, 'regions', None): info.append(('Regions', ' '.join(client.regions))) if getattr(client, 'default_region', None): info.append(('Default Region', client.default_region)) if getattr(client, 'region', None): info.append(('Selected Region', client.region)) if getattr(client, 'snet', None): info.append(('SNet', client.snet)) if getattr(client, 'storage_url', None): info.append(('Storage URL', client.storage_url)) if getattr(client, 'cdn_url', None): info.append(('CDN URL', client.cdn_url)) if getattr(client, 'auth_token', None): info.append(('Auth Token', client.auth_token)) if not info: info.append(( 'No auth information available', 'Maybe no credentials were provided?')) fmt = '%%-%ds %%s\n' % (max(len(t) for t, v in info) + 1) for t, v in info: fp.write(fmt % (t + ':', v)) fp.flush()
Authenticates and then outputs the resulting information. See :py:mod:`swiftly.cli.auth` for context usage information. See :py:class:`CLIAuth` for more information.
def extract_lzma(archive, compression, cmd, verbosity, interactive, outdir): """Extract an LZMA archive with the lzma Python module.""" return _extract(archive, compression, cmd, 'alone', verbosity, outdir)
Extract an LZMA archive with the lzma Python module.
def modifie_options(self, field_option, value): """Set options in modifications. All options will be stored since it should be grouped in the DB.""" options = dict(self["options"] or {}, **{field_option: value}) self.modifications["options"] = options
Set options in modifications. All options will be stored since it should be grouped in the DB.
def add_tunnel_interface(self, interface_id, address, network_value, zone_ref=None, comment=None): """ Creates a tunnel interface for a virtual engine. :param str,int interface_id: the tunnel id for the interface, used as nicid also :param str address: ip address of interface :param str network_value: network cidr for interface; format: 1.1.1.0/24 :param str zone_ref: zone reference for interface can be name, href or Zone :raises EngineCommandFailed: failure during creation :return: None """ interfaces = [{'nodes': [{'address': address, 'network_value': network_value}]}] interface = {'interface_id': interface_id, 'interfaces': interfaces, 'zone_ref': zone_ref, 'comment': comment} tunnel_interface = TunnelInterface(**interface) self._engine.add_interface(tunnel_interface)
Creates a tunnel interface for a virtual engine. :param str,int interface_id: the tunnel id for the interface, used as nicid also :param str address: ip address of interface :param str network_value: network cidr for interface; format: 1.1.1.0/24 :param str zone_ref: zone reference for interface can be name, href or Zone :raises EngineCommandFailed: failure during creation :return: None
def get_sstable_data_files(self, ks, table): """ Read sstable data files by using sstableutil, so we ignore temporary files """ p = self.get_sstable_data_files_process(ks=ks, table=table) out, _, _ = handle_external_tool_process(p, ["sstableutil", '--type', 'final', ks, table]) return sorted(filter(lambda s: s.endswith('-Data.db'), out.splitlines()))
Read sstable data files by using sstableutil, so we ignore temporary files
def _run_hooks(config, hooks, args, environ): """Actually run the hooks.""" skips = _get_skips(environ) cols = _compute_cols(hooks, args.verbose) filenames = _all_filenames(args) filenames = filter_by_include_exclude(filenames, '', config['exclude']) classifier = Classifier(filenames) retval = 0 for hook in hooks: retval |= _run_single_hook(classifier, hook, args, skips, cols) if retval and config['fail_fast']: break if retval and args.show_diff_on_failure and git.has_diff(): if args.all_files: output.write_line( 'pre-commit hook(s) made changes.\n' 'If you are seeing this message in CI, ' 'reproduce locally with: `pre-commit run --all-files`.\n' 'To run `pre-commit` as part of git workflow, use ' '`pre-commit install`.', ) output.write_line('All changes made by hooks:') subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff')) return retval
Actually run the hooks.
def prevnext(resource): """ Parse a resource to get the prev and next urn :param resource: XML Resource :type resource: etree._Element :return: Tuple representing previous and next urn :rtype: (str, str) """ _prev, _next = False, False resource = xmlparser(resource) prevnext = resource.xpath("//ti:prevnext", namespaces=XPATH_NAMESPACES) if len(prevnext) > 0: _next, _prev = None, None prevnext = prevnext[0] _next_xpath = prevnext.xpath("ti:next/ti:urn/text()", namespaces=XPATH_NAMESPACES, smart_strings=False) _prev_xpath = prevnext.xpath("ti:prev/ti:urn/text()", namespaces=XPATH_NAMESPACES, smart_strings=False) if len(_next_xpath): _next = _next_xpath[0].split(":")[-1] if len(_prev_xpath): _prev = _prev_xpath[0].split(":")[-1] return _prev, _next
Parse a resource to get the prev and next urn :param resource: XML Resource :type resource: etree._Element :return: Tuple representing previous and next urn :rtype: (str, str)
def display_latex(*objs, **kwargs): """Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False] """ raw = kwargs.pop('raw',False) if raw: for obj in objs: publish_latex(obj) else: display(*objs, include=['text/plain','text/latex'])
Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
def nearby(word): ''' Nearby word ''' w = any2unicode(word) # read from cache if w in _cache_nearby: return _cache_nearby[w] words, scores = [], [] try: for x in _vectors.neighbours(w): words.append(x[0]) scores.append(x[1]) except: pass # ignore key error, OOV # put into cache _cache_nearby[w] = (words, scores) return words, scores
Nearby word
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1): """ Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void """ with ScratchDir('.'): name = "temp_zeo" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None if rad_dict: rad_file = name + ".rad" with open(rad_file, 'w') as fp: for el in rad_dict.keys(): fp.write("{0} {1}".format(el, rad_dict[el])) atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file) vol_str = volume(atmnet, 0.3, probe_rad, 10000) sa_str = surface_area(atmnet, 0.3, probe_rad, 10000) vol = None sa = None for line in vol_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: vol = -1.0 break if float(fields[1]) == 0: vol = -1.0 break vol = float(fields[3]) for line in sa_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: # raise ValueError("Too many voids") sa = -1.0 break if float(fields[1]) == 0: sa = -1.0 break sa = float(fields[3]) if not vol or not sa: raise ValueError("Error in zeo++ output stream") return vol, sa
Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void
def create_object(container, portal_type, **data): """Creates an object slug :returns: The new created content object :rtype: object """ if "id" in data: # always omit the id as senaite LIMS generates a proper one id = data.pop("id") logger.warn("Passed in ID '{}' omitted! Senaite LIMS " "generates a proper ID for you" .format(id)) try: # Special case for ARs # => return immediately w/o update if portal_type == "AnalysisRequest": obj = create_analysisrequest(container, **data) # Omit values which are already set through the helper data = u.omit(data, "SampleType", "Analyses") # Set the container as the client, as the AR lives in it data["Client"] = container # Standard content creation else: # we want just a minimun viable object and set the data later obj = api.create(container, portal_type) # obj = api.create(container, portal_type, **data) except Unauthorized: fail(401, "You are not allowed to create this content") # Update the object with the given data, but omit the id try: update_object_with_data(obj, data) except APIError: # Failure in creation process, delete the invalid object container.manage_delObjects(obj.id) # reraise the error raise return obj
Creates an object slug :returns: The new created content object :rtype: object
def _validate_item(self, item): """Validate char/word count""" if not isinstance(item, six.string_types): raise ValidationError( self._record, "Text list field items must be strings, not '{}'".format(item.__class__) ) words = item.split(' ') item_length_type = self._field.field_definition.get('itemLengthType') item_max_length = self._field.field_definition.get('itemMaxLength') item_min_length = self._field.field_definition.get('itemMinLength') if item_length_type is not None: # Min/max word count if item_length_type == 'words': if item_max_length is not None: if len(words) > item_max_length: raise ValidationError( self._record, "Field '{}' items cannot contain more than {} words".format( self._field.name, item_max_length ) ) if item_min_length is not None: if len(words) < item_min_length: raise ValidationError( self._record, "Field '{}' items must contain at least {} words".format( self._field.name, item_min_length ) ) # Min/max char count of full item else: if item_max_length is not None: if len(item) > item_max_length: raise ValidationError( self._record, "Field '{}' items cannot contain more than {} characters".format( self._field.name, item_max_length ) ) if item_min_length is not None: if len(item) < item_min_length: raise ValidationError( self._record, "Field '{}' items must contain at least {} characters".format( self._field.name, item_min_length ) )
Validate char/word count
def _count_inversions(a, b): '''Count the number of inversions in two numpy arrays: # points i, j where a[i] >= b[j] Parameters ---------- a, b : np.ndarray, shape=(n,) (m,) The arrays to be compared. This implementation is optimized for arrays with many repeated values. Returns ------- inversions : int The number of detected inversions ''' a, a_counts = np.unique(a, return_counts=True) b, b_counts = np.unique(b, return_counts=True) inversions = 0 i = 0 j = 0 while i < len(a) and j < len(b): if a[i] < b[j]: i += 1 elif a[i] >= b[j]: inversions += np.sum(a_counts[i:]) * b_counts[j] j += 1 return inversions
Count the number of inversions in two numpy arrays: # points i, j where a[i] >= b[j] Parameters ---------- a, b : np.ndarray, shape=(n,) (m,) The arrays to be compared. This implementation is optimized for arrays with many repeated values. Returns ------- inversions : int The number of detected inversions
def reset(self, path, pretend=False): """ Rolls all of the currently applied migrations back. :param path: The path :type path: str :param pretend: Whether we execute the migrations as dry-run :type pretend: bool :rtype: count """ self._notes = [] migrations = sorted(self._repository.get_ran(), reverse=True) count = len(migrations) if count == 0: self._note("<info>Nothing to rollback.</info>") else: for migration in migrations: self._run_down(path, {"migration": migration}, pretend) return count
Rolls all of the currently applied migrations back. :param path: The path :type path: str :param pretend: Whether we execute the migrations as dry-run :type pretend: bool :rtype: count
def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. print("Updating: JednostkaAdministracyjna") ja_akt_stan=orm.JednostkaAdministracyjna.objects.all().aggregate(Max('stan_na'))['stan_na__max'] orm.JednostkaAdministracyjna.objects.filter(stan_na__exact=ja_akt_stan).update(aktywny=True) orm.JednostkaAdministracyjna.objects.exclude(stan_na__exact=ja_akt_stan).update(aktywny=False) print("Updating: Miejscowosc") m_akt_stan=orm.Miejscowosc.objects.all().aggregate(Max('stan_na'))['stan_na__max'] orm.Miejscowosc.objects.filter(stan_na__exact=m_akt_stan).update(aktywny=True) orm.Miejscowosc.objects.exclude(stan_na__exact=m_akt_stan).update(aktywny=False) print("Updating: RodzajMiejsowosci") rm_akt_stan=orm.RodzajMiejsowosci.objects.all().aggregate(Max('stan_na'))['stan_na__max'] orm.RodzajMiejsowosci.objects.filter(stan_na__exact=rm_akt_stan).update(aktywny=True) orm.RodzajMiejsowosci.objects.exclude(stan_na__exact=rm_akt_stan).update(aktywny=False) print("Updating: Ulica") u_akt_stan=orm.Ulica.objects.all().aggregate(Max('stan_na'))['stan_na__max'] orm.Ulica.objects.filter(stan_na__exact=u_akt_stan).update(aktywny=True) orm.Ulica.objects.exclude(stan_na__exact=u_akt_stan).update(aktywny=False)
Write your forwards methods here.
def _format_lines(self, tokensource): """ Just format the tokens, without any wrapping tags. Yield individual lines. """ nocls = self.noclasses lsep = self.lineseparator # for <span style=""> lookup only getcls = self.ttype2class.get c2s = self.class2style escape_table = _escape_html_table tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: if nocls: cclass = getcls(ttype) while cclass is None: ttype = ttype.parent cclass = getcls(ttype) cspan = cclass and '<span style="%s">' % c2s[cclass][0] or '' else: cls = self._get_css_classes(ttype) cspan = cls and '<span class="%s">' % cls or '' parts = value.translate(escape_table).split('\n') if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line)
Just format the tokens, without any wrapping tags. Yield individual lines.
def iter_notifications(self, all=False, participating=False, since=None, number=-1, etag=None): """Iterates over the notifications for this repository. :param bool all: (optional), show all notifications, including ones marked as read :param bool participating: (optional), show only the notifications the user is participating in directly :param since: (optional), filters out any notifications updated before the given time. This can be a `datetime` or an `ISO8601` formatted date string, e.g., 2012-05-20T23:10:27Z :type since: datetime or string :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Thread <github3.notifications.Thread>` """ url = self._build_url('notifications', base_url=self._api) params = { 'all': all, 'participating': participating, 'since': timestamp_parameter(since) } for (k, v) in list(params.items()): if not v: del params[k] return self._iter(int(number), url, Thread, params, etag)
Iterates over the notifications for this repository. :param bool all: (optional), show all notifications, including ones marked as read :param bool participating: (optional), show only the notifications the user is participating in directly :param since: (optional), filters out any notifications updated before the given time. This can be a `datetime` or an `ISO8601` formatted date string, e.g., 2012-05-20T23:10:27Z :type since: datetime or string :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Thread <github3.notifications.Thread>`
def print_exception(etype, value, tb, limit=None, file=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: # TODO: Use sys.stderr when that's implemented. file = open('/dev/stderr', 'w') #file = sys.stderr if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) lines = format_exception_only(etype, value) for line in lines: _print(file, line, '')
Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error.
def set_custom(sld, tld, nameservers): ''' Sets domain to use custom DNS servers. returns True if the custom nameservers were set successfully sld SLD of the domain name tld TLD of the domain name nameservers array of strings List of nameservers to be associated with this domain CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_custom sld tld nameserver ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setCustom') opts['SLD'] = sld opts['TLD'] = tld opts['Nameservers'] = ','.join(nameservers) response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName('DomainDNSSetCustomResult')[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('Update'))
Sets domain to use custom DNS servers. returns True if the custom nameservers were set successfully sld SLD of the domain name tld TLD of the domain name nameservers array of strings List of nameservers to be associated with this domain CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_custom sld tld nameserver
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None): """ Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by default, although it can be requested to read the whole available module structure. :param config: artifact config (ArtifactConfig instance) :param read_modules: if True all modules are read, otherwise only top-level artifact :param repo_url: the URL of the repository to use :param mvn_repo_local: local repository path :param additional_params: additional params to add on command-line when running maven """ global scm_status_cache if config.artifact in scm_status_cache.keys(): result = scm_status_cache[config.artifact] elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()): result = scm_status_cache["%s|False" % config.artifact] else: result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params) if read_modules: scm_status_cache[config.artifact] = result if ("%s|False" % config.artifact) in scm_status_cache.keys(): del(scm_status_cache["%s|False" % config.artifact]) else: scm_status_cache["%s|False" % config.artifact] = result return result
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by default, although it can be requested to read the whole available module structure. :param config: artifact config (ArtifactConfig instance) :param read_modules: if True all modules are read, otherwise only top-level artifact :param repo_url: the URL of the repository to use :param mvn_repo_local: local repository path :param additional_params: additional params to add on command-line when running maven
def searchZone(self, zone, q=None, has_geo=False, callback=None, errback=None): """ Search a zone for a given search query (e.g., for geological data, etc) :param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone :return: """ import ns1.zones return zone.search(q, has_geo, callback=callback, errback=errback)
Search a zone for a given search query (e.g., for geological data, etc) :param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone :return:
def lbrt(self): """Return (left,bottom,right,top) as a tuple.""" return self._left, self._bottom, self._right, self._top
Return (left,bottom,right,top) as a tuple.
def annulus(script, radius=None, radius1=None, radius2=None, diameter=None, diameter1=None, diameter2=None, cir_segments=32, color=None): """Create a 2D (surface) circle or annulus radius1=1 # Outer radius of the circle radius2=0 # Inner radius of the circle (if non-zero it creates an annulus) color="" # specify a color name to apply vertex colors to the newly created mesh OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius """ if radius is not None and diameter is None: if radius1 is None and diameter1 is None: radius1 = radius if radius2 is None and diameter2 is None: radius2 = 0 if diameter is not None: if radius1 is None and diameter1 is None: radius1 = diameter / 2 if radius2 is None and diameter2 is None: radius2 = 0 if diameter1 is not None: radius1 = diameter1 / 2 if diameter2 is not None: radius2 = diameter2 / 2 if radius1 is None: radius1 = 1 if radius2 is None: radius2 = 0 # Circle is created centered on the XY plane filter_xml = ''.join([ ' <filter name="Annulus">\n', ' <Param name="externalRadius" ', 'value="%s" ' % radius1, 'description="External Radius" ', 'type="RichFloat" ', '/>\n', ' <Param name="internalRadius" ', 'value="%s" ' % radius2, 'description="Internal Radius" ', 'type="RichFloat" ', '/>\n', ' <Param name="sides" ', 'value="%d" ' % cir_segments, 'description="Sides" ', 'type="RichInt" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) if isinstance(script, FilterScript): script.add_layer('Annulus', change_layer=True) if color is not None: vert_color.function(script, color=color) return None
Create a 2D (surface) circle or annulus radius1=1 # Outer radius of the circle radius2=0 # Inner radius of the circle (if non-zero it creates an annulus) color="" # specify a color name to apply vertex colors to the newly created mesh OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius
def _is_nmrstar(string): """Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string): return string return False
Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
def _get_config_fname(): """Helper for the vispy config file""" directory = _get_vispy_app_dir() if directory is None: return None fname = op.join(directory, 'vispy.json') if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None: fname = op.join(_TempDir(), 'vispy.json') return fname
Helper for the vispy config file
def current_site_url(): """Returns fully qualified URL (no trailing slash) for the current site.""" protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'https') port = getattr(settings, 'MY_SITE_PORT', '') url = '%s://%s' % (protocol, settings.SITE_DOMAIN) if port: url += ':%s' % port return url
Returns fully qualified URL (no trailing slash) for the current site.
def rej(vec, vec_onto): """ Vector rejection. Calculated by subtracting from `vec` the projection of `vec` onto `vec_onto`: .. math:: \\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec}, \\ \\mathsf{vec\\_onto}\\right) Parameters ---------- vec length-R |npfloat_| -- Vector to reject vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be rejected Returns ------- rej_vec length-R |npfloat_| -- Rejection of `vec` onto `vec_onto` """ # Imports import numpy as np # Calculate and return. rej_vec = vec - proj(vec, vec_onto) return rej_vec
Vector rejection. Calculated by subtracting from `vec` the projection of `vec` onto `vec_onto`: .. math:: \\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec}, \\ \\mathsf{vec\\_onto}\\right) Parameters ---------- vec length-R |npfloat_| -- Vector to reject vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be rejected Returns ------- rej_vec length-R |npfloat_| -- Rejection of `vec` onto `vec_onto`
async def set_as_boot_disk(self): """Set as boot disk for this node.""" await self._handler.set_boot_disk( system_id=self.node.system_id, id=self.id)
Set as boot disk for this node.
def is_network_source_fw(cls, nwk, nwk_name): """Check if SOURCE is FIREWALL, if yes return TRUE. If source is None or entry not in NWK DB, check from Name. Name should have constant AND length should match. """ if nwk is not None: if nwk.source == fw_const.FW_CONST: return True return False if nwk_name in fw_const.DUMMY_SERVICE_NWK and ( len(nwk_name) == len(fw_const.DUMMY_SERVICE_NWK) + fw_const.SERVICE_NAME_EXTRA_LEN): return True if nwk_name in fw_const.IN_SERVICE_NWK and ( len(nwk_name) == len(fw_const.IN_SERVICE_NWK) + fw_const.SERVICE_NAME_EXTRA_LEN): return True if nwk_name in fw_const.OUT_SERVICE_NWK and ( len(nwk_name) == len(fw_const.OUT_SERVICE_NWK) + fw_const.SERVICE_NAME_EXTRA_LEN): return True return False
Check if SOURCE is FIREWALL, if yes return TRUE. If source is None or entry not in NWK DB, check from Name. Name should have constant AND length should match.