code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def mi(x, y, bins_x=None, bins_y=None, bins_xy=None, method='nearest-neighbors', units='bits'): ''' compute and return the mutual information between x and y inputs: ------- x, y: numpy arrays of shape samples x dimension method: 'nearest-neighbors', 'gaussian', or 'bin' units: 'bits' or 'nats' output: ------- mi: float Notes: ------ if you are trying to mix several symbols together as in mi(x, (y0,y1,...)), try info[p] = _info.mi(x, info.combine_symbols(y0, y1, ...) ) ''' # dict.values() returns a view object that has to be converted to a list before being # converted to an array # the following lines will execute properly in python3, but not python2 because there # is no zip object try: if isinstance(x, zip): x = list(x) if isinstance(y, zip): y = list(y) except: pass # wrapped in try bracket because x, y might have no .shape attribute try: # handling for 1d np arrays if len(x.shape) == 1: x = np.expand_dims(x, 1) if len(y.shape) == 1: y = np.expand_dims(y, 1) except: pass HX = entropy(data=x, bins=bins_x, method=method, units=units) HY = entropy(data=y, bins=bins_y, method=method, units=units) HXY = entropy(data=np.concatenate([x, y], axis=1), bins=bins_xy, method=method, units=units) return HX + HY - HXY
compute and return the mutual information between x and y inputs: ------- x, y: numpy arrays of shape samples x dimension method: 'nearest-neighbors', 'gaussian', or 'bin' units: 'bits' or 'nats' output: ------- mi: float Notes: ------ if you are trying to mix several symbols together as in mi(x, (y0,y1,...)), try info[p] = _info.mi(x, info.combine_symbols(y0, y1, ...) )
def get_mapping_client(self, max_concurrency=64, auto_batch=None): """Returns a thread unsafe mapping client. This client works similar to a redis pipeline and returns eventual result objects. It needs to be joined on to work properly. Instead of using this directly you shold use the :meth:`map` context manager which automatically joins. Returns an instance of :class:`MappingClient`. """ if auto_batch is None: auto_batch = self.auto_batch return MappingClient(connection_pool=self.connection_pool, max_concurrency=max_concurrency, auto_batch=auto_batch)
Returns a thread unsafe mapping client. This client works similar to a redis pipeline and returns eventual result objects. It needs to be joined on to work properly. Instead of using this directly you shold use the :meth:`map` context manager which automatically joins. Returns an instance of :class:`MappingClient`.
def unzip(self, payload): """ Unzips a file :param payload: zip_with_rel_path: string remove_original_zip: boolean :return: (object) unzipped_path: string """ zip_with_rel_path = payload.pop('zip_with_rel_path') url = "{url_base}/resource/{pid}/functions/unzip/{path}/".format( url_base=self.hs.url_base, path=zip_with_rel_path, pid=self.pid) r = self.hs._request('POST', url, None, payload) return r
Unzips a file :param payload: zip_with_rel_path: string remove_original_zip: boolean :return: (object) unzipped_path: string
def get_preview_kwargs(self, **kwargs): """ Gets the url keyword arguments to pass to the `preview_view` callable. If the `pass_through_kwarg` attribute is set the value of `pass_through_attr` will be looked up on the object. So if you are previewing an item Obj<id=2> and :: self.pass_through_kwarg = 'object_id' self.pass_through_attr = 'pk' This will return :: { 'object_id' : 2 } """ if not self.pass_through_kwarg: return {} obj = self.get_object() return { self.pass_through_kwarg: getattr(obj, self.pass_through_attr) }
Gets the url keyword arguments to pass to the `preview_view` callable. If the `pass_through_kwarg` attribute is set the value of `pass_through_attr` will be looked up on the object. So if you are previewing an item Obj<id=2> and :: self.pass_through_kwarg = 'object_id' self.pass_through_attr = 'pk' This will return :: { 'object_id' : 2 }
def _build_cmdargs(argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = _build_arg_parser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs
Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure.
def fetch_host_ip_and_country(host: str) -> Tuple: """ Fetch ip and country by host """ ip = fetch_host_ip(host) if not host: return '', '' country = fetch_country_by_ip(ip) return ip, country
Fetch ip and country by host
def install(ctx, services, delete_after_install=False): """Install a honeypot service from the online library, local path or zipfile.""" logger.debug("running command %s (%s)", ctx.command.name, ctx.params, extra={"command": ctx.command.name, "params": ctx.params}) home = ctx.obj["HOME"] services_path = os.path.join(home, SERVICES) installed_all_plugins = True for service in services: try: plugin_utils.install_plugin(service, SERVICE, services_path, register_service) except exceptions.PluginAlreadyInstalled as exc: click.echo(exc) installed_all_plugins = False if not installed_all_plugins: raise ctx.exit(errno.EEXIST)
Install a honeypot service from the online library, local path or zipfile.
def path_to_node(tree, path): """FST node located at the given path""" if path is None: return None node = tree for key in path: node = child_by_key(node, key) return node
FST node located at the given path
def parse_individual(self, individual): """Converts a deap individual into a full list of parameters. Parameters ---------- individual: deap individual from optimization Details vary according to type of optimization, but parameters within deap individual are always between -1 and 1. This function converts them into the values used to actually build the model Returns ------- fullpars: list Full parameter list for model building. """ scaled_ind = [] for i in range(len(self._params['value_means'])): scaled_ind.append(self._params['value_means'][i] + ( individual[i] * self._params['value_ranges'][i])) fullpars = list(self._params['arrangement']) for k in range(len(self._params['variable_parameters'])): for j in range(len(fullpars)): if fullpars[j] == self._params['variable_parameters'][k]: fullpars[j] = scaled_ind[k] return fullpars
Converts a deap individual into a full list of parameters. Parameters ---------- individual: deap individual from optimization Details vary according to type of optimization, but parameters within deap individual are always between -1 and 1. This function converts them into the values used to actually build the model Returns ------- fullpars: list Full parameter list for model building.
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS_SSLAB[imt] # cap magnitude values at 8.0, see page 1709 mag = rup.mag if mag >= 8.0: mag = 8.0 # compute PGA on rock (needed for site amplification calculation) G = 10 ** (0.301 - 0.01 * mag) pga_rock = self._compute_mean(self.COEFFS_SSLAB[PGA()], G, mag, rup.hypo_depth, dists.rrup, sites.vs30, # by passing pga_rock > 500 the soil # amplification is 0 np.zeros_like(sites.vs30) + 600, PGA()) pga_rock = 10 ** (pga_rock) # compute actual mean and convert from log10 to ln and units from # cm/s**2 to g mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup, sites.vs30, pga_rock, imt) mean = np.log((10 ** mean) * 1e-2 / g) if imt.period == 4.0: mean /= 0.550 stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0]) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def _read(self, source): """ Reads and parses the config source :param file/str source: Config source string, file name, or file pointer. If file name does not exist, it is ignored. :return: True if source was successfully read, otherwise False """ if isinstance(source, str) and is_config(source): source_fp = StringIO(source) elif isinstance(source, IOBase) or isinstance(source, StringIO): source_fp = source elif os.path.exists(source): source_fp = open(source) else: return False self._parser.read_file(source_fp) self._parse_extra(source_fp) return True
Reads and parses the config source :param file/str source: Config source string, file name, or file pointer. If file name does not exist, it is ignored. :return: True if source was successfully read, otherwise False
def optimize_batch(self, batchsize=10, returns='best', paralell=True): """ Run multiple optimizations using different starting coordinates. Args: batchsize (`int`): Number of optimizations to run. returns (`str`): If ``'all'``, return results of all optimizations, ordered by stress, ascending. If ``'best'`` return the projection with the lowest stress. parallel (`bool`): If ``True``, run optimizations in parallel. Examples: .. doctest:: >>> import pandas as pd >>> from pymds import DistanceMatrix >>> dist = pd.DataFrame({ ... 'a': [0.0, 1.0, 2.0], ... 'b': [1.0, 0.0, 3 ** 0.5], ... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c']) >>> dm = DistanceMatrix(dist) >>> batch = dm.optimize_batch(batchsize=3, returns='all') >>> len(batch) 3 >>> type(batch[0]) <class 'pymds.mds.Projection'> Returns: `list` or :py:class:`pymds.Projection`: `list`: Length batchsize, containing instances of :py:class:`pymds.Projection`. Sorted by stress, ascending. or :py:class:`pymds.Projection`: Projection with the lowest stress. """ if returns not in ('best', 'all'): raise ValueError('returns must be either "best" or "all"') starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)] if paralell: with Pool() as p: results = p.map(self.optimize, starts) else: results = map(self.optimize, starts) results = sorted(results, key=lambda x: x.stress) return results if returns == 'all' else results[0]
Run multiple optimizations using different starting coordinates. Args: batchsize (`int`): Number of optimizations to run. returns (`str`): If ``'all'``, return results of all optimizations, ordered by stress, ascending. If ``'best'`` return the projection with the lowest stress. parallel (`bool`): If ``True``, run optimizations in parallel. Examples: .. doctest:: >>> import pandas as pd >>> from pymds import DistanceMatrix >>> dist = pd.DataFrame({ ... 'a': [0.0, 1.0, 2.0], ... 'b': [1.0, 0.0, 3 ** 0.5], ... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c']) >>> dm = DistanceMatrix(dist) >>> batch = dm.optimize_batch(batchsize=3, returns='all') >>> len(batch) 3 >>> type(batch[0]) <class 'pymds.mds.Projection'> Returns: `list` or :py:class:`pymds.Projection`: `list`: Length batchsize, containing instances of :py:class:`pymds.Projection`. Sorted by stress, ascending. or :py:class:`pymds.Projection`: Projection with the lowest stress.
def _try_dump_cnt(self): '''Dump counters every 60 seconds''' now = time.time() if now - self._last_dump_cnt > 60: self._last_dump_cnt = now self._dump_cnt() self._print_counter_log()
Dump counters every 60 seconds
def l1_regularizer(weight=1.0, scope=None): """Define a L1 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function. """ def regularizer(tensor): with tf.name_scope(scope, 'L1Regularizer', [tensor]): l1_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') return regularizer
Define a L1 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
def expand_defaults(schema, features): """Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type. """ schema_names = [x['name'] for x in schema] # Add missing source columns for name, transform in six.iteritems(features): if 'source_column' not in transform: transform['source_column'] = name # Check source columns are in the schema and collect which are used. used_schema_columns = [] for name, transform in six.iteritems(features): if transform['source_column'] not in schema_names: raise ValueError('source column %s is not in the schema for transform %s' % (transform['source_column'], name)) used_schema_columns.append(transform['source_column']) # Update default transformation based on schema. for col_schema in schema: schema_name = col_schema['name'] schema_type = col_schema['type'].lower() if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]: raise ValueError(('Only the following schema types are supported: %s' % ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]))) if schema_name not in used_schema_columns: # add the default transform to the features if schema_type in constant.NUMERIC_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_NUMERIC_TRANSFORM, 'source_column': schema_name} elif schema_type == constant.STRING_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM, 'source_column': schema_name} else: raise NotImplementedError('Unknown type %s' % schema_type)
Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type.
def get_groupby_statistic(data): """Calculate value counts and distinct count of a variable (technically a Series). The result is cached by column name in a global variable to avoid recomputing. Parameters ---------- data : Series The data type of the Series. Returns ------- list value count and distinct count """ if data.name is not None and data.name in _VALUE_COUNTS_MEMO: return _VALUE_COUNTS_MEMO[data.name] value_counts_with_nan = data.value_counts(dropna=False) value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0] distinct_count_with_nan = value_counts_with_nan.count() # When the inferred type of the index is just "mixed" probably the types within the series are tuple, dict, list and so on... if value_counts_without_nan.index.inferred_type == "mixed": raise TypeError('Not supported mixed type') result = [value_counts_without_nan, distinct_count_with_nan] if data.name is not None: _VALUE_COUNTS_MEMO[data.name] = result return result
Calculate value counts and distinct count of a variable (technically a Series). The result is cached by column name in a global variable to avoid recomputing. Parameters ---------- data : Series The data type of the Series. Returns ------- list value count and distinct count
def filter(func): """Filters out unwanted items using the specified function. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict. """ def expand_kv(kv): return func(*kv) def filter_values(value): cls = type(value) if isinstance(value, dict): return cls(_filter(expand_kv, value.items())) else: return cls(_filter(func, value)) return transform(filter_values)
Filters out unwanted items using the specified function. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict.
def coarse_grain(self, user_sets): r"""Coarse-grains the flux onto user-defined sets. Parameters ---------- user_sets : list of int-iterables sets of states that shall be distinguished in the coarse-grained flux. Returns ------- (sets, tpt) : (list of int-iterables, tpt-object) sets contains the sets tpt is computed on. The tpt states of the new tpt object correspond to these sets of states in this order. Sets might be identical, if the user has already provided a complete partition that respects the boundary between A, B and the intermediates. If not, Sets will have more members than provided by the user, containing the "remainder" states and reflecting the splitting at the A and B boundaries. tpt contains a new tpt object for the coarse-grained flux. All its quantities (gross_flux, net_flux, A, B, committor, backward_committor) are coarse-grained to sets. Notes ----- All user-specified sets will be split (if necessary) to preserve the boundary between A, B and the intermediate states. """ # coarse-grain sets (tpt_sets, Aindexes, Bindexes) = self._compute_coarse_sets(user_sets) nnew = len(tpt_sets) # coarse-grain fluxHere we should branch between sparse and dense implementations, but currently there is only a F_coarse = tptapi.coarsegrain(self._gross_flux, tpt_sets) Fnet_coarse = tptapi.to_netflux(F_coarse) # coarse-grain stationary probability and committors - this can be done all dense pstat_coarse = np.zeros((nnew)) forward_committor_coarse = np.zeros((nnew)) backward_committor_coarse = np.zeros((nnew)) for i in range(0, nnew): I = list(tpt_sets[i]) muI = self._mu[I] pstat_coarse[i] = np.sum(muI) partialI = muI / pstat_coarse[i] # normalized stationary probability over I forward_committor_coarse[i] = np.dot(partialI, self._qplus[I]) backward_committor_coarse[i] = np.dot(partialI, self._qminus[I]) res = ReactiveFlux(Aindexes, Bindexes, Fnet_coarse, mu=pstat_coarse, qminus=backward_committor_coarse, qplus=forward_committor_coarse, gross_flux=F_coarse) return (tpt_sets, res)
r"""Coarse-grains the flux onto user-defined sets. Parameters ---------- user_sets : list of int-iterables sets of states that shall be distinguished in the coarse-grained flux. Returns ------- (sets, tpt) : (list of int-iterables, tpt-object) sets contains the sets tpt is computed on. The tpt states of the new tpt object correspond to these sets of states in this order. Sets might be identical, if the user has already provided a complete partition that respects the boundary between A, B and the intermediates. If not, Sets will have more members than provided by the user, containing the "remainder" states and reflecting the splitting at the A and B boundaries. tpt contains a new tpt object for the coarse-grained flux. All its quantities (gross_flux, net_flux, A, B, committor, backward_committor) are coarse-grained to sets. Notes ----- All user-specified sets will be split (if necessary) to preserve the boundary between A, B and the intermediate states.
def serve_static(filename=None, prefix='', basedir=None): """Handler for static files: server filename in basedir/prefix. If not specified then basedir defaults to the local third_party directory. """ if not basedir: basedir = os.path.join(os.path.dirname(__file__), 'third_party') return send_from_directory(os.path.join(basedir, prefix), filename)
Handler for static files: server filename in basedir/prefix. If not specified then basedir defaults to the local third_party directory.
def get_translation(self, lang, field): """ Return the translation string of an specific field in a Translatable istance @type lang: string @param lang: a string with the name of the language @type field: string @param field: a string with the name that we try to get @rtype: string @return: Returns a translation string """ # Read from cache key = self._get_translation_cache_key(lang, field) trans = cache.get(key, '') if not trans: trans_obj = self.get_translation_obj(lang, field) trans = getattr(trans_obj, 'translation', '') # if there's no translation text fall back to the model field if not trans: trans = getattr(self, field, '') # update cache cache.set(key, trans) return trans
Return the translation string of an specific field in a Translatable istance @type lang: string @param lang: a string with the name of the language @type field: string @param field: a string with the name that we try to get @rtype: string @return: Returns a translation string
def est_propensity(self, lin='all', qua=None): """ Estimates the propensity scores given list of covariates to include linearly or quadratically. The propensity score is the conditional probability of receiving the treatment given the observed covariates. Estimation is done via a logistic regression. Parameters ---------- lin: string or list, optional Column numbers (zero-based) of variables of the original covariate matrix X to include linearly. Defaults to the string 'all', which uses whole covariate matrix. qua: list, optional Tuples indicating which columns of the original covariate matrix to multiply and include. E.g., [(1,1), (2,3)] indicates squaring the 2nd column and including the product of the 3rd and 4th columns. Default is to not include any quadratic terms. """ lin_terms = parse_lin_terms(self.raw_data['K'], lin) qua_terms = parse_qua_terms(self.raw_data['K'], qua) self.propensity = Propensity(self.raw_data, lin_terms, qua_terms) self.raw_data._dict['pscore'] = self.propensity['fitted'] self._post_pscore_init()
Estimates the propensity scores given list of covariates to include linearly or quadratically. The propensity score is the conditional probability of receiving the treatment given the observed covariates. Estimation is done via a logistic regression. Parameters ---------- lin: string or list, optional Column numbers (zero-based) of variables of the original covariate matrix X to include linearly. Defaults to the string 'all', which uses whole covariate matrix. qua: list, optional Tuples indicating which columns of the original covariate matrix to multiply and include. E.g., [(1,1), (2,3)] indicates squaring the 2nd column and including the product of the 3rd and 4th columns. Default is to not include any quadratic terms.
def expand_time(str_time, default_unit='s', multiplier=1): """ helper for above functions """ parser = re.compile(r'(\d+)([a-zA-Z]*)') parts = parser.findall(str_time) result = 0.0 for value, unit in parts: value = int(value) unit = unit.lower() if unit == '': unit = default_unit if unit == 'ms': result += value * 0.001 continue elif unit == 's': result += value continue elif unit == 'm': result += value * 60 continue elif unit == 'h': result += value * 60 * 60 continue elif unit == 'd': result += value * 60 * 60 * 24 continue elif unit == 'w': result += value * 60 * 60 * 24 * 7 continue else: raise ValueError( "String contains unsupported unit %s: %s" % (unit, str_time)) return int(result * multiplier)
helper for above functions
def form_valid(self, form, formsets): """ Response for valid form. In one transaction this will save the current form and formsets, log the action and message the user. Returns the results of calling the `success_response` method. """ # check if it's a new object before it save the form new_object = False if not self.object: new_object = True instance = getattr(form, 'instance', None) auto_tags, changed_tags, old_tags = tag_handler.get_tags_from_data( form.data, self.get_tags(instance)) tag_handler.set_auto_tags_for_form(form, auto_tags) with transaction.commit_on_success(): self.object = self.save_form(form) self.save_formsets(form, formsets, auto_tags=auto_tags) url = self.get_object_url() self.log_action(self.object, CMSLog.SAVE, url=url) msg = self.write_message() # get old and new tags if not new_object and changed_tags and old_tags: tag_handler.update_changed_tags(changed_tags, old_tags) return self.success_response(msg)
Response for valid form. In one transaction this will save the current form and formsets, log the action and message the user. Returns the results of calling the `success_response` method.
def addRnaQuantMetadata(self, fields): """ data elements are: Id, annotations, description, name, readGroupId where annotations is a comma separated list """ self._featureSetIds = fields["feature_set_ids"].split(',') self._description = fields["description"] self._name = fields["name"] self._biosampleId = fields.get("biosample_id", "") if fields["read_group_ids"] == "": self._readGroupIds = [] else: self._readGroupIds = fields["read_group_ids"].split(',') if fields["programs"] == "": self._programs = [] else: # Need to use program Id's here to generate a list of Programs # for now set to empty self._programs = []
data elements are: Id, annotations, description, name, readGroupId where annotations is a comma separated list
def bind(nodemask): """ Binds the current thread and its children to the nodes specified in nodemask. They will only run on the CPUs of the specified nodes and only be able to allocate memory from them. @param nodemask: node mask @type nodemask: C{set} """ mask = set_to_numa_nodemask(nodemask) bitmask = libnuma.numa_allocate_nodemask() libnuma.copy_nodemask_to_bitmask(byref(mask), bitmask) libnuma.numa_bind(bitmask) libnuma.numa_bitmask_free(bitmask)
Binds the current thread and its children to the nodes specified in nodemask. They will only run on the CPUs of the specified nodes and only be able to allocate memory from them. @param nodemask: node mask @type nodemask: C{set}
def cover_update(self, photo, **kwds): """ Endpoint: /album/<album_id>/cover/<photo_id>/update.json Update the cover photo of this album. """ result = self._client.album.cover_update(self, photo, **kwds) self._replace_fields(result.get_fields()) self._update_fields_with_objects()
Endpoint: /album/<album_id>/cover/<photo_id>/update.json Update the cover photo of this album.
def transcription(ref, est, **kwargs): r'''Note transcription evaluation Parameters ---------- ref : jams.Annotation Reference annotation object est : jams.Annotation Estimated annotation object kwargs Additional keyword arguments Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. See Also -------- mir_eval.transcription.evaluate Examples -------- >>> # Load in the JAMS objects >>> ref_jam = jams.load('reference.jams') >>> est_jam = jams.load('estimated.jams') >>> # Select the first relevant annotations. You can use any annotation >>> # type that can be converted to pitch_contour (such as pitch_midi) >>> ref_ann = ref_jam.search(namespace='pitch_contour')[0] >>> est_ann = est_jam.search(namespace='note_hz')[0] >>> scores = jams.eval.transcription(ref_ann, est_ann) ''' namespace = 'pitch_contour' ref = coerce_annotation(ref, namespace) est = coerce_annotation(est, namespace) ref_intervals, ref_p = ref.to_interval_values() est_intervals, est_p = est.to_interval_values() ref_pitches = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in ref_p]) est_pitches = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in est_p]) return mir_eval.transcription.evaluate( ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs)
r'''Note transcription evaluation Parameters ---------- ref : jams.Annotation Reference annotation object est : jams.Annotation Estimated annotation object kwargs Additional keyword arguments Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. See Also -------- mir_eval.transcription.evaluate Examples -------- >>> # Load in the JAMS objects >>> ref_jam = jams.load('reference.jams') >>> est_jam = jams.load('estimated.jams') >>> # Select the first relevant annotations. You can use any annotation >>> # type that can be converted to pitch_contour (such as pitch_midi) >>> ref_ann = ref_jam.search(namespace='pitch_contour')[0] >>> est_ann = est_jam.search(namespace='note_hz')[0] >>> scores = jams.eval.transcription(ref_ann, est_ann)
def iso_register(iso_code): """ Registers Calendar class as country or region in IsoRegistry. Registered country must set class variables ``iso`` using this decorator. >>> from workalendar.core import Calendar >>> @iso_register('MC-MR') >>> class MyRegion(Calendar): >>> 'My Region' Region calendar is then retrievable from registry: >>> calendar = registry.get_calendar_class('MC-MR') """ def wrapper(cls): registry.register(iso_code, cls) return cls return wrapper
Registers Calendar class as country or region in IsoRegistry. Registered country must set class variables ``iso`` using this decorator. >>> from workalendar.core import Calendar >>> @iso_register('MC-MR') >>> class MyRegion(Calendar): >>> 'My Region' Region calendar is then retrievable from registry: >>> calendar = registry.get_calendar_class('MC-MR')
def get_success_url(self): """ Returns the URL to redirect the user to upon valid form processing. """ messages.success(self.request, self.success_message) if self.object.is_topic_head and self.object.is_topic_tail: return reverse( 'forum:forum', kwargs={ 'slug': self.object.topic.forum.slug, 'pk': self.object.topic.forum.pk, }, ) return reverse( 'forum_conversation:topic', kwargs={ 'forum_slug': self.object.topic.forum.slug, 'forum_pk': self.object.topic.forum.pk, 'slug': self.object.topic.slug, 'pk': self.object.topic.pk, }, )
Returns the URL to redirect the user to upon valid form processing.
def replace_param_occurrences(string, params): """replace occurrences of the tuning params with their current value""" for k, v in params.items(): string = string.replace(k, str(v)) return string
replace occurrences of the tuning params with their current value
def IsFile(v): """Verify the file exists. >>> os.path.basename(IsFile()(__file__)).startswith('validators.py') True >>> with raises(FileInvalid, 'not a file'): ... IsFile()("random_filename_goes_here.py") >>> with raises(FileInvalid, 'Not a file'): ... IsFile()(None) """ try: if v: v = str(v) return os.path.isfile(v) else: raise FileInvalid('Not a file') except TypeError: raise FileInvalid('Not a file')
Verify the file exists. >>> os.path.basename(IsFile()(__file__)).startswith('validators.py') True >>> with raises(FileInvalid, 'not a file'): ... IsFile()("random_filename_goes_here.py") >>> with raises(FileInvalid, 'Not a file'): ... IsFile()(None)
def update_ca_bundle( target=None, source=None, opts=None, merge_files=None, ): ''' Attempt to update the CA bundle file from a URL If not specified, the local location on disk (``target``) will be auto-detected, if possible. If it is not found, then a new location on disk will be created and updated. The default ``source`` is: http://curl.haxx.se/ca/cacert.pem This is based on the information at: http://curl.haxx.se/docs/caextract.html A string or list of strings representing files to be appended to the end of the CA bundle file may also be passed through as ``merge_files``. ''' if opts is None: opts = {} if target is None: target = get_ca_bundle(opts) if target is None: log.error('Unable to detect location to write CA bundle to') return if source is None: source = opts.get('ca_bundle_url', 'http://curl.haxx.se/ca/cacert.pem') log.debug('Attempting to download %s to %s', source, target) query( source, text=True, decode=False, headers=False, status=False, text_out=target ) if merge_files is not None: if isinstance(merge_files, six.string_types): merge_files = [merge_files] if not isinstance(merge_files, list): log.error('A value was passed as merge_files which was not either ' 'a string or a list') return merge_content = '' for cert_file in merge_files: if os.path.exists(cert_file): log.debug( 'Queueing up %s to be appended to %s', cert_file, target ) try: with salt.utils.files.fopen(cert_file, 'r') as fcf: merge_content = '\n'.join((merge_content, fcf.read())) except IOError as exc: log.error( 'Reading from %s caused the following error: %s', cert_file, exc ) if merge_content: log.debug('Appending merge_files to %s', target) try: with salt.utils.files.fopen(target, 'a') as tfp: tfp.write('\n') tfp.write(merge_content) except IOError as exc: log.error( 'Writing to %s caused the following error: %s', target, exc )
Attempt to update the CA bundle file from a URL If not specified, the local location on disk (``target``) will be auto-detected, if possible. If it is not found, then a new location on disk will be created and updated. The default ``source`` is: http://curl.haxx.se/ca/cacert.pem This is based on the information at: http://curl.haxx.se/docs/caextract.html A string or list of strings representing files to be appended to the end of the CA bundle file may also be passed through as ``merge_files``.
def union_q(token): """ Appends all the Q() objects. """ query = Q() operation = 'and' negation = False for t in token: if type(t) is ParseResults: # See tokens recursively query &= union_q(t) else: if t in ('or', 'and'): # Set the new op and go to next token operation = t elif t == '-': # Next tokens needs to be negated negation = True else: # Append to query the token if negation: t = ~t if operation == 'or': query |= t else: query &= t return query
Appends all the Q() objects.
def _get_error(self, code, errors, indentation=0): """Get error and show the faulty line + some context Other GLIR implementations may omit this. """ # Init results = [] lines = None if code is not None: lines = [line.strip() for line in code.split('\n')] for error in errors.split('\n'): # Strip; skip empy lines error = error.strip() if not error: continue # Separate line number from description (if we can) linenr, error = self._parse_error(error) if None in (linenr, lines): results.append('%s' % error) else: results.append('on line %i: %s' % (linenr, error)) if linenr > 0 and linenr < len(lines): results.append(' %s' % lines[linenr - 1]) # Add indentation and return results = [' ' * indentation + r for r in results] return '\n'.join(results)
Get error and show the faulty line + some context Other GLIR implementations may omit this.
def new(self, string=None, *args, **kwargs): """Returns a `_ChainedHashAlgorithm` if the underlying tuple (specifying the list of algorithms) is not empty, otherwise a `_NopHashAlgorithm` instance is returned.""" if len(self): hobj = _ChainedHashAlgorithm(self, *args, **kwargs) else: hobj = _NopHashAlgorithm(*args, **kwargs) if string is not None: hobj.update(string) return hobj
Returns a `_ChainedHashAlgorithm` if the underlying tuple (specifying the list of algorithms) is not empty, otherwise a `_NopHashAlgorithm` instance is returned.
def extract_subjects(cert_pem): """Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: 2-tuple: - The primary subject string, extracted from the certificate DN. - A set of equivalent identities, group memberships and inferred symbolic subjects extracted from the SubjectInfo (if present.) - All returned subjects are DataONE compliant serializations. - A copy of the primary subject is always included in the set of equivalent identities. """ primary_str, subject_info_xml = d1_common.cert.x509.extract_subjects(cert_pem) equivalent_set = { primary_str, d1_common.const.SUBJECT_AUTHENTICATED, d1_common.const.SUBJECT_PUBLIC, } if subject_info_xml is not None: equivalent_set |= d1_common.cert.subject_info.extract_subjects( subject_info_xml, primary_str ) return primary_str, equivalent_set
Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: 2-tuple: - The primary subject string, extracted from the certificate DN. - A set of equivalent identities, group memberships and inferred symbolic subjects extracted from the SubjectInfo (if present.) - All returned subjects are DataONE compliant serializations. - A copy of the primary subject is always included in the set of equivalent identities.
def playlist(self, playlist_id, *, include_songs=False): """Get information about a playlist. Parameters: playlist_id (str): A playlist ID. include_songs (bool, Optional): Include songs from the playlist in the returned dict. Default: ``False`` Returns: dict: Playlist information. """ playlist_info = next( ( playlist for playlist in self.playlists(include_songs=include_songs) if playlist['id'] == playlist_id ), None ) return playlist_info
Get information about a playlist. Parameters: playlist_id (str): A playlist ID. include_songs (bool, Optional): Include songs from the playlist in the returned dict. Default: ``False`` Returns: dict: Playlist information.
def validatePrepare(self, prepare: Prepare, sender: str) -> bool: """ Return whether the PREPARE specified is valid. :param prepare: the PREPARE to validate :param sender: the name of the node that sent the PREPARE :return: True if PREPARE is valid, False otherwise """ key = (prepare.viewNo, prepare.ppSeqNo) primaryStatus = self.isPrimaryForMsg(prepare) ppReq = self.getPrePrepare(*key) # If a non primary replica and receiving a PREPARE request before a # PRE-PREPARE request, then proceed # PREPARE should not be sent from primary if self.isMsgFromPrimary(prepare, sender): raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare) # If non primary replica if primaryStatus is False: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE not received for the PREPARE, might be slow # network if not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False # If primary replica if primaryStatus is True: if self.prepares.hasPrepareFrom(prepare, sender): raise SuspiciousNode( sender, Suspicions.DUPLICATE_PR_SENT, prepare) # If PRE-PREPARE was not sent for this PREPARE, certainly # malicious behavior elif not ppReq: raise SuspiciousNode( sender, Suspicions.UNKNOWN_PR_SENT, prepare) if primaryStatus is None and not ppReq: self.enqueue_prepare(prepare, sender) self._setup_last_ordered_for_non_master() return False if prepare.digest != ppReq.digest: raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare) elif prepare.stateRootHash != ppReq.stateRootHash: raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG, prepare) elif prepare.txnRootHash != ppReq.txnRootHash: raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG, prepare) elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash: raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG, prepare) try: self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq) except Exception as ex: self.logger.warning('{} encountered exception in replica ' 'hook {} : {}'. format(self, ReplicaHooks.VALIDATE_PR, ex)) raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION, prepare) # BLS multi-sig: self._bls_bft_replica.validate_prepare(prepare, sender) return True
Return whether the PREPARE specified is valid. :param prepare: the PREPARE to validate :param sender: the name of the node that sent the PREPARE :return: True if PREPARE is valid, False otherwise
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None, author_date=None, commit_date=None): """Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information""" if parent_commits is None: try: parent_commits = [repo.head.commit] except ValueError: # empty repositories have no head commit parent_commits = [] # END handle parent commits else: for p in parent_commits: if not isinstance(p, cls): raise ValueError("Parent commit '%r' must be of type %s" % (p, cls)) # end check parent commit types # END if parent commits are unset # retrieve all additional information, create a commit object, and # serialize it # Generally: # * Environment variables override configuration values # * Sensible defaults are set according to the git documentation # COMMITER AND AUTHOR INFO cr = repo.config_reader() env = os.environ committer = committer or Actor.committer(cr) author = author or Actor.author(cr) # PARSE THE DATES unix_time = int(time()) is_dst = daylight and localtime().tm_isdst > 0 offset = altzone if is_dst else timezone author_date_str = env.get(cls.env_author_date, '') if author_date: author_time, author_offset = parse_date(author_date) elif author_date_str: author_time, author_offset = parse_date(author_date_str) else: author_time, author_offset = unix_time, offset # END set author time committer_date_str = env.get(cls.env_committer_date, '') if commit_date: committer_time, committer_offset = parse_date(commit_date) elif committer_date_str: committer_time, committer_offset = parse_date(committer_date_str) else: committer_time, committer_offset = unix_time, offset # END set committer time # assume utf8 encoding enc_section, enc_option = cls.conf_encoding.split('.') conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding) # if the tree is no object, make sure we create one - otherwise # the created commit object is invalid if isinstance(tree, str): tree = repo.tree(tree) # END tree conversion # CREATE NEW COMMIT new_commit = cls(repo, cls.NULL_BIN_SHA, tree, author, author_time, author_offset, committer, committer_time, committer_offset, message, parent_commits, conf_encoding) stream = BytesIO() new_commit._serialize(stream) streamlen = stream.tell() stream.seek(0) istream = repo.odb.store(IStream(cls.type, streamlen, stream)) new_commit.binsha = istream.binsha if head: # need late import here, importing git at the very beginning throws # as well ... import git.refs try: repo.head.set_commit(new_commit, logmsg=message) except ValueError: # head is not yet set to the ref our HEAD points to # Happens on first commit master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message) repo.head.set_reference(master, logmsg='commit: Switching to %s' % master) # END handle empty repositories # END advance head handling return new_commit
Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information
def generate_cot_body(context): """Generate the chain of trust dictionary. This is the unsigned and unformatted chain of trust artifact contents. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict: the unsignd and unformatted chain of trust artifact contents. Raises: ScriptWorkerException: on error. """ try: cot = { 'artifacts': get_cot_artifacts(context), 'chainOfTrustVersion': 1, 'runId': context.claim_task['runId'], 'task': context.task, 'taskId': context.claim_task['status']['taskId'], 'workerGroup': context.claim_task['workerGroup'], 'workerId': context.config['worker_id'], 'workerType': context.config['worker_type'], 'environment': get_cot_environment(context), } except (KeyError, ) as exc: raise ScriptWorkerException("Can't generate chain of trust! {}".format(str(exc))) return cot
Generate the chain of trust dictionary. This is the unsigned and unformatted chain of trust artifact contents. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict: the unsignd and unformatted chain of trust artifact contents. Raises: ScriptWorkerException: on error.
def _one_to_many_query(cls, query_obj, search4, model_attrib): """extends and returns a SQLAlchemy query object to allow one-to-many queries :param query_obj: SQL Alchemy query object :param str search4: search string :param model_attrib: attribute in model """ model = model_attrib.parent.class_ already_joined_tables = [mapper.class_ for mapper in query_obj._join_entities] if isinstance(search4, (str, int, Iterable)) and model not in already_joined_tables: query_obj = query_obj.join(model) if isinstance(search4, str): query_obj = query_obj.filter(model_attrib.like(search4)) elif isinstance(search4, int): query_obj = query_obj.filter(model_attrib == search4) elif isinstance(search4, Iterable): query_obj = query_obj.filter(model_attrib.in_(search4)) return query_obj
extends and returns a SQLAlchemy query object to allow one-to-many queries :param query_obj: SQL Alchemy query object :param str search4: search string :param model_attrib: attribute in model
def getContactTypes(self): """ Return an iterator of L{IContactType} providers available to this organizer's store. """ yield VIPPersonContactType() yield EmailContactType(self.store) yield PostalContactType() yield PhoneNumberContactType() yield NotesContactType() for getContactTypes in self._gatherPluginMethods('getContactTypes'): for contactType in getContactTypes(): self._checkContactType(contactType) yield contactType
Return an iterator of L{IContactType} providers available to this organizer's store.
def get_params(self, deep=True): """ Get parameters for the estimator. Args: deep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns: params : mapping of string to any contained subobjects that are estimators. """ params = {'weights':self.coef_, 'bias':self.intercept_} if deep: for key, value in self.B.items(): params['b_'+str(key)] = value return params
Get parameters for the estimator. Args: deep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns: params : mapping of string to any contained subobjects that are estimators.
def list_gewesten(self, sort=1): ''' List all `gewesten` in Belgium. :param integer sort: What field to sort on. :rtype: A :class`list` of class: `Gewest`. ''' def creator(): res = crab_gateway_request(self.client, 'ListGewesten', sort) tmp = {} for r in res.GewestItem: if r.GewestId not in tmp: tmp[r.GewestId] = {} tmp[r.GewestId][r.TaalCodeGewestNaam] = r.GewestNaam return[ Gewest( k, v )for k, v in tmp.items() ] if self.caches['permanent'].is_configured: key = 'ListGewesten#%s' % sort gewesten = self.caches['permanent'].get_or_create(key, creator) else: gewesten = creator() for g in gewesten: g.set_gateway(self) return gewesten
List all `gewesten` in Belgium. :param integer sort: What field to sort on. :rtype: A :class`list` of class: `Gewest`.
def hook_wrapper_23(stdin, stdout, prompt): u'''Wrap a Python readline so it behaves like GNU readline.''' try: # call the Python hook res = ensure_str(readline_hook(prompt)) # make sure it returned the right sort of thing if res and not isinstance(res, str): raise TypeError, u'readline must return a string.' except KeyboardInterrupt: # GNU readline returns 0 on keyboard interrupt return 0 except EOFError: # It returns an empty string on EOF res = u'' except: print >>sys.stderr, u'Readline internal error' traceback.print_exc() res = u'\n' # we have to make a copy because the caller expects to free the result n = len(res) p = Console.PyMem_Malloc(n + 1) _strncpy(cast(p, c_char_p), res, n + 1) return p
u'''Wrap a Python readline so it behaves like GNU readline.
def _contained_parameters(expression): """ Determine which parameters are contained in this expression. :param Expression expression: expression involving parameters :return: set of parameters contained in this expression :rtype: set """ if isinstance(expression, BinaryExp): return _contained_parameters(expression.op1) | _contained_parameters(expression.op2) elif isinstance(expression, Function): return _contained_parameters(expression.expression) elif isinstance(expression, Parameter): return {expression} else: return set()
Determine which parameters are contained in this expression. :param Expression expression: expression involving parameters :return: set of parameters contained in this expression :rtype: set
def compress(self, input_path): """ Compress the contents of the given directory. :param string input_path: path of the input directory :raises: TypeError: if the container path has not been set :raises: ValueError: if ``input_path`` is not an existing directory :raises: OSError: if an error occurred compressing the given container (e.g., empty file, damaged file, etc.) """ self.log([u"Compressing '%s' into this container", input_path]) if self.file_path is None: self.log_exc(u"The container path has not been set", None, True, TypeError) if self.actual_container is None: self.log_exc(u"The actual container object has not been set", None, True, TypeError) if not gf.directory_exists(input_path): self.log_exc(u"The input path is not an existing directory", None, True, ValueError) gf.ensure_parent_directory(input_path) self.actual_container.compress(input_path)
Compress the contents of the given directory. :param string input_path: path of the input directory :raises: TypeError: if the container path has not been set :raises: ValueError: if ``input_path`` is not an existing directory :raises: OSError: if an error occurred compressing the given container (e.g., empty file, damaged file, etc.)
def predict_density(self, Xnew, Ynew): """ Compute the (log) density of the data Ynew at the points Xnew Note that this computes the log density of the data individually, ignoring correlations between them. The result is a matrix the same shape as Ynew containing the log densities. """ pred_f_mean, pred_f_var = self._build_predict(Xnew) return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew)
Compute the (log) density of the data Ynew at the points Xnew Note that this computes the log density of the data individually, ignoring correlations between them. The result is a matrix the same shape as Ynew containing the log densities.
def num(value): """Convert a value from one of several bases to an int.""" if re_hex_num.match(value): return int(value, base=16) else: return int(value)
Convert a value from one of several bases to an int.
def get_requirements_transform_cfme(config): """Return requirement transformation function for CFME.""" def requirement_transform(requirement): """Requirements transform for CFME.""" requirement = copy.deepcopy(requirement) if "id" in requirement: del requirement["id"] return requirement return requirement_transform
Return requirement transformation function for CFME.
def iter(self, match="*", count=1000): """ :see::meth:RedisMap.iter """ for field, value in self._client.hscan_iter( self.key_prefix, match=match, count=count): yield self._decode(field)
:see::meth:RedisMap.iter
def is_inbound_presence_filter(cb): """ Return true if `cb` has been decorated with :func:`inbound_presence_filter`. """ try: handlers = get_magic_attr(cb) except AttributeError: return False hs = HandlerSpec( (_apply_inbound_presence_filter, ()) ) return hs in handlers
Return true if `cb` has been decorated with :func:`inbound_presence_filter`.
def process_module(self, node): """process a module the module's content is accessible via the stream object stream must implement the readlines method """ with node.stream() as stream: self.append_stream(self.linter.current_name, stream, node.file_encoding)
process a module the module's content is accessible via the stream object stream must implement the readlines method
def _delete_network(self, request, network): """Delete the created network when subnet creation failed.""" try: api.neutron.network_delete(request, network.id) LOG.debug('Delete the created network %s ' 'due to subnet creation failure.', network.id) msg = _('Delete the created network "%s" ' 'due to subnet creation failure.') % network.name redirect = self.get_failure_url() messages.info(request, msg) raise exceptions.Http302(redirect) except Exception as e: LOG.info('Failed to delete network %(id)s: %(exc)s', {'id': network.id, 'exc': e}) msg = _('Failed to delete network "%s"') % network.name redirect = self.get_failure_url() exceptions.handle(request, msg, redirect=redirect)
Delete the created network when subnet creation failed.
def enable_compression(self, force: Optional[Union[bool, ContentCoding]]=None ) -> None: """Enables response compression encoding.""" # Backwards compatibility for when force was a bool <0.17. if type(force) == bool: force = ContentCoding.deflate if force else ContentCoding.identity warnings.warn("Using boolean for force is deprecated #3318", DeprecationWarning) elif force is not None: assert isinstance(force, ContentCoding), ("force should one of " "None, bool or " "ContentEncoding") self._compression = True self._compression_force = force
Enables response compression encoding.
def load_config(config_file=None, profile=None, client=None, endpoint=None, token=None, solver=None, proxy=None): """Load D-Wave Cloud Client configuration based on a configuration file. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`load_config()`. These values replace values read from a configuration file, and therefore must be **strings**, including float values for timeouts, boolean flags (tested for "truthiness"), and solver feature constraints (a dictionary encoded as JSON). 2. Values specified as environment variables. 3. Values specified in the configuration file. Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file(s). If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If `False`, loading from file(s) is skipped; if `True`, forces auto-detection (regardless of the `DWAVE_CONFIG_FILE` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from `DWAVE_PROFILE` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides `[defaults]`, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are `qpu` for :class:`dwave.cloud.qpu.Client` and `sw` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): :term:`solver` features, as a JSON-encoded dictionary of feature constraints, the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for semantics of supported feature constraints. If undefined, the client uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. For backward compatibility, solver name in string format is accepted and converted to ``{"name": <solver name>}``. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. Returns: dict: Mapping of configuration keys to values for the profile (section), as read from the configuration file and optionally overridden by environment values and specified keyword arguments. Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy` keys. Raises: :exc:`ValueError`: Invalid (non-existing) profile name. :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples This example loads the configuration from an auto-detected configuration file in the home directory of a Windows system user. >>> import dwave.cloud as dc >>> dc.config.load_config() {'client': u'qpu', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'proxy': None, 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> See which configuration file was loaded >>> dc.config.get_configfile_paths() [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] Additional examples are given in :mod:`dwave.cloud.config`. """ if profile is None: profile = os.getenv("DWAVE_PROFILE") if config_file == False: # skip loading from file altogether section = {} elif config_file == True: # force auto-detection, disregarding DWAVE_CONFIG_FILE section = load_profile_from_files(None, profile) else: # auto-detect if not specified with arg or env if config_file is None: # note: both empty and undefined DWAVE_CONFIG_FILE treated as None config_file = os.getenv("DWAVE_CONFIG_FILE") # handle ''/None/str/[str] for `config_file` (after env) filenames = None if config_file: if isinstance(config_file, six.string_types): filenames = [config_file] else: filenames = config_file section = load_profile_from_files(filenames, profile) # override a selected subset of values via env or kwargs, # pass-through the rest unmodified section['client'] = client or os.getenv("DWAVE_API_CLIENT", section.get('client')) section['endpoint'] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get('endpoint')) section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token')) section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver')) section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy')) return section
Load D-Wave Cloud Client configuration based on a configuration file. Configuration values can be specified in multiple ways, ranked in the following order (with 1 the highest ranked): 1. Values specified as keyword arguments in :func:`load_config()`. These values replace values read from a configuration file, and therefore must be **strings**, including float values for timeouts, boolean flags (tested for "truthiness"), and solver feature constraints (a dictionary encoded as JSON). 2. Values specified as environment variables. 3. Values specified in the configuration file. Configuration-file format is described in :mod:`dwave.cloud.config`. If the location of the configuration file is not specified, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If a configuration file explicitly specified, via an argument or environment variable, does not exist or is unreadable, loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is readable but invalid as a configuration file. Similarly, if a profile explicitly specified, via an argument or environment variable, is not present in the loaded configuration, loading fails with :exc:`ValueError`. Explicit profile selection also fails if the configuration file is not explicitly specified, detected on the system, or defined via an environment variable. Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``. Environment variables are described in :mod:`dwave.cloud.config`. Args: config_file (str/[str]/None/False/True, default=None): Path to configuration file(s). If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment variable if defined. If the environment variable is undefined or empty, auto-detection searches for existing configuration files in the standard directories of :func:`get_configfile_paths`. If `False`, loading from file(s) is skipped; if `True`, forces auto-detection (regardless of the `DWAVE_CONFIG_FILE` environment variable). profile (str, default=None): Profile name (name of the profile section in the configuration file). If undefined, inferred from `DWAVE_PROFILE` environment variable if defined. If the environment variable is undefined or empty, a profile is selected in the following order: 1. From the default section if it includes a profile key. 2. The first section (after the default section). 3. If no other section is defined besides `[defaults]`, the defaults section is promoted and selected. client (str, default=None): Client type used for accessing the API. Supported values are `qpu` for :class:`dwave.cloud.qpu.Client` and `sw` for :class:`dwave.cloud.sw.Client`. endpoint (str, default=None): API endpoint URL. token (str, default=None): API authorization token. solver (str, default=None): :term:`solver` features, as a JSON-encoded dictionary of feature constraints, the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for semantics of supported feature constraints. If undefined, the client uses a solver definition from environment variables, a configuration file, or falls back to the first available online solver. For backward compatibility, solver name in string format is accepted and converted to ``{"name": <solver name>}``. proxy (str, default=None): URL for proxy to use in connections to D-Wave API. Can include username/password, port, scheme, etc. If undefined, client uses the system-level proxy, if defined, or connects directly to the API. Returns: dict: Mapping of configuration keys to values for the profile (section), as read from the configuration file and optionally overridden by environment values and specified keyword arguments. Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy` keys. Raises: :exc:`ValueError`: Invalid (non-existing) profile name. :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples This example loads the configuration from an auto-detected configuration file in the home directory of a Windows system user. >>> import dwave.cloud as dc >>> dc.config.load_config() {'client': u'qpu', 'endpoint': u'https://url.of.some.dwavesystem.com/sapi', 'proxy': None, 'solver': u'EXAMPLE_2000Q_SYSTEM_A', 'token': u'DEF-987654321987654321987654321'} >>> See which configuration file was loaded >>> dc.config.get_configfile_paths() [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] Additional examples are given in :mod:`dwave.cloud.config`.
def percbend(x, y, beta=.2): """ Percentage bend correlation (Wilcox 1994). Parameters ---------- x, y : array_like First and second set of observations. x and y must be independent. beta : float Bending constant for omega (0 <= beta <= 0.5). Returns ------- r : float Percentage bend correlation coefficient. pval : float Two-tailed p-value. Notes ----- Code inspired by Matlab code from Cyril Pernet and Guillaume Rousselet. References ---------- .. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient. Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395 .. [2] Pernet CR, Wilcox R, Rousselet GA. Robust Correlation Analyses: False Positive and Power Validation Using a New Open Source Matlab Toolbox. Frontiers in Psychology. 2012;3:606. doi:10.3389/fpsyg.2012.00606. """ from scipy.stats import t X = np.column_stack((x, y)) nx = X.shape[0] M = np.tile(np.median(X, axis=0), nx).reshape(X.shape) W = np.sort(np.abs(X - M), axis=0) m = int((1 - beta) * nx) omega = W[m - 1, :] P = (X - M) / omega P[np.isinf(P)] = 0 P[np.isnan(P)] = 0 # Loop over columns a = np.zeros((2, nx)) for c in [0, 1]: psi = P[:, c] i1 = np.where(psi < -1)[0].size i2 = np.where(psi > 1)[0].size s = X[:, c].copy() s[np.where(psi < -1)[0]] = 0 s[np.where(psi > 1)[0]] = 0 pbos = (np.sum(s) + omega[c] * (i2 - i1)) / (s.size - i1 - i2) a[c] = (X[:, c] - pbos) / omega[c] # Bend a[a <= -1] = -1 a[a >= 1] = 1 # Get r, tval and pval a, b = a r = (a * b).sum() / np.sqrt((a**2).sum() * (b**2).sum()) tval = r * np.sqrt((nx - 2) / (1 - r**2)) pval = 2 * t.sf(abs(tval), nx - 2) return r, pval
Percentage bend correlation (Wilcox 1994). Parameters ---------- x, y : array_like First and second set of observations. x and y must be independent. beta : float Bending constant for omega (0 <= beta <= 0.5). Returns ------- r : float Percentage bend correlation coefficient. pval : float Two-tailed p-value. Notes ----- Code inspired by Matlab code from Cyril Pernet and Guillaume Rousselet. References ---------- .. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient. Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395 .. [2] Pernet CR, Wilcox R, Rousselet GA. Robust Correlation Analyses: False Positive and Power Validation Using a New Open Source Matlab Toolbox. Frontiers in Psychology. 2012;3:606. doi:10.3389/fpsyg.2012.00606.
def _add_gateway_node(self, gw_type, routing_node_gateway, network=None): """ Add a gateway node to existing routing tree. Gateways are only added if they do not already exist. If they do exist, check the destinations of the existing gateway and add destinations that are not already there. A current limitation is that if a gateway doesn't exist and the destinations specified do not have IP addresses that are valid, they are still added (i.e. IPv4 gateway with IPv6 destination is considered invalid). :param Routing self: the routing node, should be the interface routing node :param str gw_type: type of gateway, i.e. netlink, ospfv2_area, etc :param RoutingNodeGateway route_node_gateway: gateway element :param str network: network to bind to. If none, all networks :return: Whether a change was made or not :rtype: bool """ if self.level != 'interface': raise ModificationAborted('You must make this change from the ' 'interface routing level. Current node: {}'.format(self)) if self.related_element_type == 'tunnel_interface': return self._add_gateway_node_on_tunnel(routing_node_gateway) # Find any existing gateways routing_node = list(gateway_by_type(self, type=gw_type, on_network=network)) _networks = [netwk for netwk in self if netwk.ip == network] if network is \ not None else list(self) # Routing Node Gateway to add as Element gateway_element_type = routing_node_gateway.routing_node_element modified = False for network in _networks: # Short circuit for dynamic interfaces if getattr(network, 'dynamic_classid', None): network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True break # Used for comparison to this_network_node = network.routing_node_element if routing_node and any(netwk for _intf, netwk, gw in routing_node if netwk.routing_node_element == this_network_node and gateway_element_type == gw.routing_node_element): # A gateway exists on this network for gw in network: if gw.routing_node_element == gateway_element_type: existing_dests = [node.routing_node_element for node in gw] for destination in routing_node_gateway.destinations: is_valid_destination = False if destination not in existing_dests: dest_ipv4, dest_ipv6 = _which_ip_protocol(destination) if len(network.ip.split(':')) > 1: # IPv6 if dest_ipv6: is_valid_destination = True else: if dest_ipv4: is_valid_destination = True if is_valid_destination: gw.data.setdefault('routing_node', []).append( {'level': 'any', 'href': destination.href, 'name': destination.name}) modified = True else: # Gateway doesn't exist gw_ipv4, gw_ipv6 = _which_ip_protocol(gateway_element_type) # ipv4, ipv6 or both if len(network.ip.split(':')) > 1: if gw_ipv6: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True else: # IPv4 if gw_ipv4: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True if modified: self.update() return modified
Add a gateway node to existing routing tree. Gateways are only added if they do not already exist. If they do exist, check the destinations of the existing gateway and add destinations that are not already there. A current limitation is that if a gateway doesn't exist and the destinations specified do not have IP addresses that are valid, they are still added (i.e. IPv4 gateway with IPv6 destination is considered invalid). :param Routing self: the routing node, should be the interface routing node :param str gw_type: type of gateway, i.e. netlink, ospfv2_area, etc :param RoutingNodeGateway route_node_gateway: gateway element :param str network: network to bind to. If none, all networks :return: Whether a change was made or not :rtype: bool
def _make_passage_kwargs(urn, reference): """ Little helper used by CapitainsCtsPassage here to comply with parents args :param urn: URN String :param reference: Reference String :return: Dictionary of arguments with URN based on identifier and reference """ kwargs = {} if urn is not None: if reference is not None: kwargs["urn"] = URN("{}:{}".format(urn.upTo(URN.VERSION), reference)) else: kwargs["urn"] = urn return kwargs
Little helper used by CapitainsCtsPassage here to comply with parents args :param urn: URN String :param reference: Reference String :return: Dictionary of arguments with URN based on identifier and reference
def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method""" # seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
Computes weights for one reordering using stick-breaking method
def Luv_to_LCHuv(cobj, *args, **kwargs): """ Convert from CIE Luv to LCH(uv). """ lch_l = cobj.luv_l lch_c = math.sqrt(math.pow(cobj.luv_u, 2.0) + math.pow(cobj.luv_v, 2.0)) lch_h = math.atan2(float(cobj.luv_v), float(cobj.luv_u)) if lch_h > 0: lch_h = (lch_h / math.pi) * 180 else: lch_h = 360 - (math.fabs(lch_h) / math.pi) * 180 return LCHuvColor( lch_l, lch_c, lch_h, observer=cobj.observer, illuminant=cobj.illuminant)
Convert from CIE Luv to LCH(uv).
def _setPrivate(self, private): """This is here to make testing easier""" self.private = private self.public = pow(self.generator, self.private, self.modulus)
This is here to make testing easier
def from_file(path): """ Returns an AudioSegment object from the given file based on its file extension. If the extension is wrong, this will throw some sort of error. :param path: The path to the file, including the file extension. :returns: An AudioSegment instance from the file. """ _name, ext = os.path.splitext(path) ext = ext.lower()[1:] seg = pydub.AudioSegment.from_file(path, ext) return AudioSegment(seg, path)
Returns an AudioSegment object from the given file based on its file extension. If the extension is wrong, this will throw some sort of error. :param path: The path to the file, including the file extension. :returns: An AudioSegment instance from the file.
def example_delete_topics(a, topics): """ delete topics """ # Call delete_topics to asynchronously delete topics, a future is returned. # By default this operation on the broker returns immediately while # topics are deleted in the background. But here we give it some time (30s) # to propagate in the cluster before returning. # # Returns a dict of <topic,future>. fs = a.delete_topics(topics, operation_timeout=30) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} deleted".format(topic)) except Exception as e: print("Failed to delete topic {}: {}".format(topic, e))
delete topics
def ConsultarDepositosAcopio(self, sep="||"): "Retorna los depósitos de acopio pertenencientes al contribuyente" ret = self.client.consultarDepositosAcopio( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['respuesta'] self.__analizar_errores(ret) array = ret.get('acopio', []) if sep is None: return array else: return [("%s %%s %s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep, sep)) % (it['codigo'], it['direccion'], it['localidad'], it['codigoPostal']) for it in array]
Retorna los depósitos de acopio pertenencientes al contribuyente
def _checkMemberName(name): """ See if a member name indicates that it should be private. Private variables in Python (starting with a double underscore but not ending in a double underscore) and bed lumps (variables that are not really private but are by common convention treated as protected because they begin with a single underscore) get Doxygen tags labeling them appropriately. """ assert isinstance(name, str) restrictionLevel = None if not name.endswith('__'): if name.startswith('__'): restrictionLevel = 'private' elif name.startswith('_'): restrictionLevel = 'protected' return restrictionLevel
See if a member name indicates that it should be private. Private variables in Python (starting with a double underscore but not ending in a double underscore) and bed lumps (variables that are not really private but are by common convention treated as protected because they begin with a single underscore) get Doxygen tags labeling them appropriately.
def copy(self, target_parent, name=None, include_children=True, include_instances=True): """ Copy the `Part` to target parent, both of them having the same category. .. versionadded:: 2.3 :param target_parent: `Part` object under which the desired `Part` is copied :type target_parent: :class:`Part` :param name: how the copied top-level `Part` should be called :type name: basestring :param include_children: True to copy also the descendants of `Part`. :type include_children: bool :param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent. :type include_instances: bool :returns: copied :class:`Part` model. :raises IllegalArgumentError: if part and target_parent have different `Category` :raises IllegalArgumentError: if part and target_parent are identical Example ------- >>> model_to_copy = client.model(name='Model to be copied') >>> bike = client.model('Bike') >>> model_to_copy.copy(target_parent=bike, name='Copied model', >>> include_children=True, >>> include_instances=True) """ if self.category == Category.MODEL and target_parent.category == Category.MODEL: # Cannot add a model under an instance or vice versa copied_model = relocate_model(part=self, target_parent=target_parent, name=name, include_children=include_children) if include_instances: instances_to_be_copied = list(self.instances()) parent_instances = list(target_parent.instances()) for parent_instance in parent_instances: for instance in instances_to_be_copied: instance.populate_descendants() move_part_instance(part_instance=instance, target_parent=parent_instance, part_model=self, name=instance.name, include_children=include_children) return copied_model elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE: copied_instance = relocate_instance(part=self, target_parent=target_parent, name=name, include_children=include_children) return copied_instance else: raise IllegalArgumentError('part "{}" and target parent "{}" must have the same category')
Copy the `Part` to target parent, both of them having the same category. .. versionadded:: 2.3 :param target_parent: `Part` object under which the desired `Part` is copied :type target_parent: :class:`Part` :param name: how the copied top-level `Part` should be called :type name: basestring :param include_children: True to copy also the descendants of `Part`. :type include_children: bool :param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent. :type include_instances: bool :returns: copied :class:`Part` model. :raises IllegalArgumentError: if part and target_parent have different `Category` :raises IllegalArgumentError: if part and target_parent are identical Example ------- >>> model_to_copy = client.model(name='Model to be copied') >>> bike = client.model('Bike') >>> model_to_copy.copy(target_parent=bike, name='Copied model', >>> include_children=True, >>> include_instances=True)
def _update_val(self, val, result_score_list): """Update values in the list of result scores and self._pbar during pipeline evaluation. Parameters ---------- val: float or "Timeout" CV scores result_score_list: list A list of CV scores Returns ------- result_score_list: list A updated list of CV scores """ self._update_pbar() if val == 'Timeout': self._update_pbar(pbar_msg=('Skipped pipeline #{0} due to time out. ' 'Continuing to the next pipeline.'.format(self._pbar.n))) result_score_list.append(-float('inf')) else: result_score_list.append(val) return result_score_list
Update values in the list of result scores and self._pbar during pipeline evaluation. Parameters ---------- val: float or "Timeout" CV scores result_score_list: list A list of CV scores Returns ------- result_score_list: list A updated list of CV scores
def keyed_ordering(cls): """Class decorator to generate all six rich comparison methods, based on a ``__key__`` method. Many simple classes are wrappers for very simple data, and want to defer comparisons to that data. Rich comparison is very flexible and powerful, but makes this simple case tedious to set up. There's the standard library's ``total_ordering`` decorator, but it still requires you to write essentially the same method twice, and doesn't correctly handle ``NotImplemented`` before 3.4. It also doesn't automatically generate ``__ne__`` from ``__eq__``, which is a common gotcha. With this decorator, comparisons will be done on the return value of ``__key__``, in much the same way as the ``key`` argument to ``sorted``. For example, if you have a class representing a span of time: .. code-block:: python @keyed_ordering class TimeSpan(object): def __init__(self, start, end): self.start = start self.end = end def __key__(self): return (self.start, self.end) This is equivalent to the following, assuming 3.4's ``total_ordering``: .. code-block:: python @total_ordering class TimeSpan(object): def __init__(self, start, end): self.start = start self.end = end def __eq__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) == (other.start, other.end) def __ne__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) != (other.start, other.end) def __lt__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) < (other.start, other.end) The ``NotImplemented`` check is based on the class being decorated, so subclassses can still be correctly compared. You may also implement some of the rich comparison methods in the decorated class, in which case they'll be left alone. """ if '__key__' not in cls.__dict__: raise TypeError("keyed_ordering requires a __key__ method") for name in ('__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__'): if name in cls.__dict__: continue method = _keyed_ordering_impl(name, cls) setattr(cls, name, method) return cls
Class decorator to generate all six rich comparison methods, based on a ``__key__`` method. Many simple classes are wrappers for very simple data, and want to defer comparisons to that data. Rich comparison is very flexible and powerful, but makes this simple case tedious to set up. There's the standard library's ``total_ordering`` decorator, but it still requires you to write essentially the same method twice, and doesn't correctly handle ``NotImplemented`` before 3.4. It also doesn't automatically generate ``__ne__`` from ``__eq__``, which is a common gotcha. With this decorator, comparisons will be done on the return value of ``__key__``, in much the same way as the ``key`` argument to ``sorted``. For example, if you have a class representing a span of time: .. code-block:: python @keyed_ordering class TimeSpan(object): def __init__(self, start, end): self.start = start self.end = end def __key__(self): return (self.start, self.end) This is equivalent to the following, assuming 3.4's ``total_ordering``: .. code-block:: python @total_ordering class TimeSpan(object): def __init__(self, start, end): self.start = start self.end = end def __eq__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) == (other.start, other.end) def __ne__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) != (other.start, other.end) def __lt__(self, other): if not isinstance(other, TimeSpan): return NotImplemented return (self.start, self.end) < (other.start, other.end) The ``NotImplemented`` check is based on the class being decorated, so subclassses can still be correctly compared. You may also implement some of the rich comparison methods in the decorated class, in which case they'll be left alone.
def discover(timeout=1, retries=1): """Discover Raumfeld devices in the network :param timeout: The timeout in seconds :param retries: How often the search should be retried :returns: A list of raumfeld devices, sorted by name """ locations = [] group = ('239.255.255.250', 1900) service = 'ssdp:urn:schemas-upnp-org:device:MediaRenderer:1' # 'ssdp:all' message = '\r\n'.join(['M-SEARCH * HTTP/1.1', 'HOST: {group[0]}:{group[1]}', 'MAN: "ssdp:discover"', 'ST: {st}', 'MX: 1', '', '']).format(group=group, st=service) socket.setdefaulttimeout(timeout) for _ in range(retries): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # socket options sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) # send group multicast sock.sendto(message.encode('utf-8'), group) while True: try: response = sock.recv(2048).decode('utf-8') for line in response.split('\r\n'): if line.startswith('Location: '): location = line.split(' ')[1].strip() if not location in locations: locations.append(location) except socket.timeout: break devices = [RaumfeldDevice(location) for location in locations] # only return 'Virtual Media Player' and sort the list return sorted([device for device in devices if device.model_description == 'Virtual Media Player'], key=lambda device: device.friendly_name)
Discover Raumfeld devices in the network :param timeout: The timeout in seconds :param retries: How often the search should be retried :returns: A list of raumfeld devices, sorted by name
def get_term_freq_mat(self): ''' Returns ------- np.array with columns as categories and rows as terms ''' freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()), dtype=int) for cat_i in range(self.get_num_categories()): freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0) return freq_mat
Returns ------- np.array with columns as categories and rows as terms
def set_bot_permissions(calendar_id, userid, level): """ :param calendar_id: an integer representing calendar ID :param userid: a string representing the user's UW NetID :param level: a string representing the permission level :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ url = _make_set_permissions_url( calendar_id, userid, level) return _process_resp(url, get_bot_resource(url), _is_permission_set )
:param calendar_id: an integer representing calendar ID :param userid: a string representing the user's UW NetID :param level: a string representing the permission level :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned.
def _is_current_user(self, some_user): """ Is the specified user the current user? :param some_user: RemoteUser user we want to check against the current user :return: boolean: True if the current user is the passed in user """ current_user = self.remote_store.get_current_user() return current_user.id == some_user.id
Is the specified user the current user? :param some_user: RemoteUser user we want to check against the current user :return: boolean: True if the current user is the passed in user
def scale(val, src, dst): """ Scale value from src range to dst range. If value outside bounds, it is clipped and set to the low or high bound of dst. Ex: scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0 scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0 """ if val < src[0]: return dst[0] if val > src[1]: return dst[1] return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
Scale value from src range to dst range. If value outside bounds, it is clipped and set to the low or high bound of dst. Ex: scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0 scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0
def xml2object(xml, verbose=False): """Generate XML object model from XML file or XML text This is the inverse operation to the __str__ representation (up to whitespace). Input xml can be either an * xml file * open xml file object Return XML_document instance. """ # FIXME - can we allow xml to be string? # This would depend on minidom's parse function # Input tests if isinstance(xml, basestring): fid = open(xml) else: fid = xml try: dom = parse(fid) except Exception as e: # Throw filename into dom exception msg = 'XML file "%s" could not be parsed.\n' %fid.name msg += 'Error message from parser: "%s"' %str(e) raise Exception, msg try: xml_object = dom2object(dom) except Exception as e: msg = 'Could not convert %s into XML object.\n' %fid.name msg += str(e) raise Exception, msg return xml_object
Generate XML object model from XML file or XML text This is the inverse operation to the __str__ representation (up to whitespace). Input xml can be either an * xml file * open xml file object Return XML_document instance.
def modified_lu(q): """Perform a modified LU decomposition of a matrix. This takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u. Args: q: A two dimensional orthonormal matrix q. Returns: A tuple of a lower triangular matrix l, an upper triangular matrix u, and a a vector representing a diagonal matrix s such that q - s = l * u. """ q = q.assemble() m, b = q.shape[0], q.shape[1] S = np.zeros(b) q_work = np.copy(q) for i in range(b): S[i] = -1 * np.sign(q_work[i, i]) q_work[i, i] -= S[i] # Scale ith column of L by diagonal element. q_work[(i + 1):m, i] /= q_work[i, i] # Perform Schur complement update. q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i], q_work[i, (i + 1):b]) L = np.tril(q_work) for i in range(b): L[i, i] = 1 U = np.triu(q_work)[:b, :] # TODO(rkn): Get rid of the put below. return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S
Perform a modified LU decomposition of a matrix. This takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u. Args: q: A two dimensional orthonormal matrix q. Returns: A tuple of a lower triangular matrix l, an upper triangular matrix u, and a a vector representing a diagonal matrix s such that q - s = l * u.
def save(store, *args, **kwargs): """Convenience function to save an array or group of arrays to the local file system. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. args : ndarray NumPy arrays with data to save. kwargs NumPy arrays with data to save. Examples -------- Save an array to a directory on the file system (uses a :class:`DirectoryStore`):: >>> import zarr >>> import numpy as np >>> arr = np.arange(10000) >>> zarr.save('data/example.zarr', arr) >>> zarr.load('data/example.zarr') array([ 0, 1, 2, ..., 9997, 9998, 9999]) Save an array to a Zip file (uses a :class:`ZipStore`):: >>> zarr.save('data/example.zip', arr) >>> zarr.load('data/example.zip') array([ 0, 1, 2, ..., 9997, 9998, 9999]) Save several arrays to a directory on the file system (uses a :class:`DirectoryStore` and stores arrays in a group):: >>> import zarr >>> import numpy as np >>> a1 = np.arange(10000) >>> a2 = np.arange(10000, 0, -1) >>> zarr.save('data/example.zarr', a1, a2) >>> loader = zarr.load('data/example.zarr') >>> loader <LazyLoader: arr_0, arr_1> >>> loader['arr_0'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['arr_1'] array([10000, 9999, 9998, ..., 3, 2, 1]) Save several arrays using named keyword arguments:: >>> zarr.save('data/example.zarr', foo=a1, bar=a2) >>> loader = zarr.load('data/example.zarr') >>> loader <LazyLoader: bar, foo> >>> loader['foo'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['bar'] array([10000, 9999, 9998, ..., 3, 2, 1]) Store several arrays in a single zip file (uses a :class:`ZipStore`):: >>> zarr.save('data/example.zip', foo=a1, bar=a2) >>> loader = zarr.load('data/example.zip') >>> loader <LazyLoader: bar, foo> >>> loader['foo'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['bar'] array([10000, 9999, 9998, ..., 3, 2, 1]) See Also -------- save_array, save_group """ if len(args) == 0 and len(kwargs) == 0: raise ValueError('at least one array must be provided') if len(args) == 1 and len(kwargs) == 0: save_array(store, args[0]) else: save_group(store, *args, **kwargs)
Convenience function to save an array or group of arrays to the local file system. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. args : ndarray NumPy arrays with data to save. kwargs NumPy arrays with data to save. Examples -------- Save an array to a directory on the file system (uses a :class:`DirectoryStore`):: >>> import zarr >>> import numpy as np >>> arr = np.arange(10000) >>> zarr.save('data/example.zarr', arr) >>> zarr.load('data/example.zarr') array([ 0, 1, 2, ..., 9997, 9998, 9999]) Save an array to a Zip file (uses a :class:`ZipStore`):: >>> zarr.save('data/example.zip', arr) >>> zarr.load('data/example.zip') array([ 0, 1, 2, ..., 9997, 9998, 9999]) Save several arrays to a directory on the file system (uses a :class:`DirectoryStore` and stores arrays in a group):: >>> import zarr >>> import numpy as np >>> a1 = np.arange(10000) >>> a2 = np.arange(10000, 0, -1) >>> zarr.save('data/example.zarr', a1, a2) >>> loader = zarr.load('data/example.zarr') >>> loader <LazyLoader: arr_0, arr_1> >>> loader['arr_0'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['arr_1'] array([10000, 9999, 9998, ..., 3, 2, 1]) Save several arrays using named keyword arguments:: >>> zarr.save('data/example.zarr', foo=a1, bar=a2) >>> loader = zarr.load('data/example.zarr') >>> loader <LazyLoader: bar, foo> >>> loader['foo'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['bar'] array([10000, 9999, 9998, ..., 3, 2, 1]) Store several arrays in a single zip file (uses a :class:`ZipStore`):: >>> zarr.save('data/example.zip', foo=a1, bar=a2) >>> loader = zarr.load('data/example.zip') >>> loader <LazyLoader: bar, foo> >>> loader['foo'] array([ 0, 1, 2, ..., 9997, 9998, 9999]) >>> loader['bar'] array([10000, 9999, 9998, ..., 3, 2, 1]) See Also -------- save_array, save_group
def format_argspec_plus(fn, grouped=True): """Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'} """ spec = callable(fn) and inspect.getargspec(fn) or fn args = inspect.formatargspec(*spec) if spec[0]: self_arg = spec[0][0] elif spec[1]: self_arg = '%s[0]' % spec[1] else: self_arg = None apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) defaulted_vals = spec[3] is not None and spec[0][0-len(spec[3]):] or () apply_kw = inspect.formatargspec(spec[0], spec[1], spec[2], defaulted_vals, formatvalue=lambda x: '=' + x) if grouped: return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw) else: return dict(args=args[1:-1], self_arg=self_arg, apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'}
def build_tables( table_names_to_dataframes, table_names_to_primary_keys={}, table_names_to_indices={}): """ Parameters ---------- table_names_to_dataframes : dict Dictionary mapping each table name to a DataFrame table_names_to_primary_keys : dict Dictionary mapping each table to its primary key table_names_to_indices : dict Dictionary mapping each table to a set of indices Returns list of DatabaseTable objects """ tables = [] for table_name, df in table_names_to_dataframes.items(): table_indices = table_names_to_indices.get(table_name, []) primary_key = table_names_to_primary_keys.get(table_name) table = DatabaseTable.from_dataframe( name=table_name, df=df, indices=table_indices, primary_key=primary_key) tables.append(table) return tables
Parameters ---------- table_names_to_dataframes : dict Dictionary mapping each table name to a DataFrame table_names_to_primary_keys : dict Dictionary mapping each table to its primary key table_names_to_indices : dict Dictionary mapping each table to a set of indices Returns list of DatabaseTable objects
def teams(self, page=None, year=None, simple=False, keys=False): """ Get list of teams. :param page: Page of teams to view. Each page contains 500 teams. :param year: View teams from a specific year. :param simple: Get only vital data. :param keys: Set to true if you only want the teams' keys rather than full data on them. :return: List of Team objects or string keys. """ # If the user has requested a specific page, get that page. if page is not None: if year: if keys: return self._get('teams/%s/%s/keys' % (year, page)) else: return [Team(raw) for raw in self._get('teams/%s/%s%s' % (year, page, '/simple' if simple else ''))] else: if keys: return self._get('teams/%s/keys' % page) else: return [Team(raw) for raw in self._get('teams/%s%s' % (page, '/simple' if simple else ''))] # If no page was specified, get all of them and combine. else: teams = [] target = 0 while True: page_teams = self.teams(page=target, year=year, simple=simple, keys=keys) if page_teams: teams.extend(page_teams) else: break target += 1 return teams
Get list of teams. :param page: Page of teams to view. Each page contains 500 teams. :param year: View teams from a specific year. :param simple: Get only vital data. :param keys: Set to true if you only want the teams' keys rather than full data on them. :return: List of Team objects or string keys.
def rmtree_p(self): """ Like :meth:`rmtree`, but does not raise an exception if the directory does not exist. """ try: self.rmtree() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOENT: raise return self
Like :meth:`rmtree`, but does not raise an exception if the directory does not exist.
def _remove_hidden_parts(projected_surface): """Removes parts of a projected surface that are not visible. Args: projected_surface (surface): the surface to use Returns: surface: A projected surface. """ surface = np.copy(projected_surface) surface[~_make_occlusion_mask(projected_surface)] = np.nan return surface
Removes parts of a projected surface that are not visible. Args: projected_surface (surface): the surface to use Returns: surface: A projected surface.
def pbkdf2(password, salt, outlen, digesttype="sha1", iterations=2000): """ Interface to PKCS5_PBKDF2_HMAC function Parameters: @param password - password to derive key from @param salt - random salt to use for key derivation @param outlen - number of bytes to derive @param digesttype - name of digest to use to use (default sha1) @param iterations - number of iterations to use @returns outlen bytes of key material derived from password and salt """ dgst = DigestType(digesttype) out = create_string_buffer(outlen) if isinstance(password,chartype): pwd = password.encode("utf-8") else: pwd = password res = libcrypto.PKCS5_PBKDF2_HMAC(pwd, len(pwd), salt, len(salt), iterations, dgst.digest, outlen, out) if res <= 0: raise LibCryptoError("error computing PBKDF2") return out.raw
Interface to PKCS5_PBKDF2_HMAC function Parameters: @param password - password to derive key from @param salt - random salt to use for key derivation @param outlen - number of bytes to derive @param digesttype - name of digest to use to use (default sha1) @param iterations - number of iterations to use @returns outlen bytes of key material derived from password and salt
def hdel(self, key, field, *fields): """Delete one or more hash fields.""" return self.execute(b'HDEL', key, field, *fields)
Delete one or more hash fields.
def _get_operation_input_field_values(self, metadata, file_input): """Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name value pairs """ # To determine input parameter type, we iterate through the # pipeline inputParameters. # The values come from the pipelineArgs inputs. input_args = metadata['request']['ephemeralPipeline']['inputParameters'] vals_dict = metadata['request']['pipelineArgs']['inputs'] # Get the names for files or envs names = [ arg['name'] for arg in input_args if ('localCopy' in arg) == file_input ] # Build the return dict return {name: vals_dict[name] for name in names if name in vals_dict}
Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name value pairs
def wait(self, timeout=-1): """Wait for the child to exit. Wait for at most *timeout* seconds, or indefinitely if *timeout* is None. Return the value of the :attr:`returncode` attribute. """ if self._process is None: raise RuntimeError('no child process') if timeout == -1: timeout = self._timeout if not self._child_exited.wait(timeout): raise Timeout('timeout waiting for child to exit') return self.returncode
Wait for the child to exit. Wait for at most *timeout* seconds, or indefinitely if *timeout* is None. Return the value of the :attr:`returncode` attribute.
def deploy(self, *lambdas): """Deploys lambdas to AWS""" if not self.role: logger.error('Missing AWS Role') raise ArgumentsError('Role required') logger.debug('Deploying lambda {}'.format(self.lambda_name)) zfh = self.package() if self.lambda_name in self.get_function_names(): logger.info('Updating {} lambda'.format(self.lambda_name)) response = self.client.update_function_code( FunctionName=self.lambda_name, ZipFile=zfh.getvalue(), Publish=True ) else: logger.info('Adding new {} lambda'.format(self.lambda_name)) response = self.client.create_function( FunctionName=self.lambda_name, Runtime=yaep.env( 'LAMBDA_RUNTIME', 'python2.7' ), Role=self.role, Handler=yaep.env( 'LAMBDA_HANDLER', 'lambda_function.lambda_handler' ), Code={ 'ZipFile': zfh.getvalue(), }, Description=yaep.env( 'LAMBDA_DESCRIPTION', 'Lambda code for {}'.format(self.lambda_name) ), Timeout=yaep.env( 'LAMBDA_TIMEOUT', 3, convert_booleans=False, type_class=int ), MemorySize=yaep.env( 'LAMBDA_MEMORY_SIZE', 128, convert_booleans=False, type_class=int ), Publish=True ) status_code = response.get( 'ResponseMetadata', {} ).get('HTTPStatusCode') if status_code in [200, 201]: logger.info('Successfully deployed {} version {}'.format( self.lambda_name, response.get('Version', 'Unkown') )) else: logger.error('Error deploying {}: {}'.format( self.lambda_name, response ))
Deploys lambdas to AWS
def get_tags(self, name): """ Returns a list of tags. @param str name: The name of the tag. :rtype: list[str] """ tags = list() for tag in self._tags: if tag[0] == name: tags.append(tag[1]) return tags
Returns a list of tags. @param str name: The name of the tag. :rtype: list[str]
def run(cmd): """ Run a shell command """ cmd = [pipes.quote(c) for c in cmd] cmd = " ".join(cmd) cmd += "; exit 0" # print("Running {} in {}".format(cmd, os.getcwd())) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as e: output = e.output output = output.decode('utf-8') output = output.strip() return output
Run a shell command
def state_probability(self, direction, repertoire, purview,): """Compute the probability of the purview in its current state given the repertoire. Collapses the dimensions of the repertoire that correspond to the purview nodes onto their state. All other dimension are already singular and thus receive 0 as the conditioning index. Returns: float: A single probabilty. """ purview_state = self.purview_state(direction) index = tuple(node_state if node in purview else 0 for node, node_state in enumerate(purview_state)) return repertoire[index]
Compute the probability of the purview in its current state given the repertoire. Collapses the dimensions of the repertoire that correspond to the purview nodes onto their state. All other dimension are already singular and thus receive 0 as the conditioning index. Returns: float: A single probabilty.
def add_auth_hook(self, event): """Register event hook on reception of add_auth_hook-event""" self.log('Adding authentication hook for', event.authenticator_name) self.auth_hooks[event.authenticator_name] = event.event
Register event hook on reception of add_auth_hook-event
def F_oneway(*lists): """ Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: F_oneway(*lists) where *lists is any number of lists, one per treatment group Returns: F value, one-tailed p-value """ a = len(lists) # ANOVA on 'a' groups, each in it's own list means = [0] * a vars = [0] * a ns = [0] * a alldata = [] tmp = lists means = map(mean, tmp) vars = map(var, tmp) ns = map(len, lists) for i in range(len(lists)): alldata = alldata + lists[i] bign = len(alldata) sstot = ss(alldata) - (square_of_sums(alldata) / float(bign)) ssbn = 0 for list in lists: ssbn = ssbn + square_of_sums(list) / float(len(list)) ssbn = ssbn - (square_of_sums(alldata) / float(bign)) sswn = sstot - ssbn dfbn = a - 1 dfwn = bign - a msb = ssbn / float(dfbn) msw = sswn / float(dfwn) f = msb / msw prob = fprob(dfbn, dfwn, f) return f, prob
Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: F_oneway(*lists) where *lists is any number of lists, one per treatment group Returns: F value, one-tailed p-value
def get_args(self): """ Gets the args for the query which will be escaped when being executed by the db. All inner queries are inspected and their args are combined with this query's args. :return: all args for this query as a dict :rtype: dict """ for table in self.tables + self.with_tables: if type(table) is QueryTable: self._where.args.update(table.query.get_args()) return self._where.args
Gets the args for the query which will be escaped when being executed by the db. All inner queries are inspected and their args are combined with this query's args. :return: all args for this query as a dict :rtype: dict
def get_assignable_gradebook_ids(self, gradebook_id): """Gets a list of gradebooks including and under the given gradebook node in which any grade system can be assigned. arg: gradebook_id (osid.id.Id): the ``Id`` of the ``Gradebook`` return: (osid.id.IdList) - list of assignable gradebook ``Ids`` raise: NullArgument - ``gradebook_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('GRADING', local=True) lookup_session = mgr.get_gradebook_lookup_session(proxy=self._proxy) gradebooks = lookup_session.get_gradebooks() id_list = [] for gradebook in gradebooks: id_list.append(gradebook.get_id()) return IdList(id_list)
Gets a list of gradebooks including and under the given gradebook node in which any grade system can be assigned. arg: gradebook_id (osid.id.Id): the ``Id`` of the ``Gradebook`` return: (osid.id.IdList) - list of assignable gradebook ``Ids`` raise: NullArgument - ``gradebook_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def format_subject(subject): """ Prepends 'Re:' to the subject. To avoid multiple 'Re:'s a counter is added. NOTE: Currently unused. First step to fix Issue #48. FIXME: Any hints how to make this i18n aware are very welcome. """ subject_prefix_re = r'^Re\[(\d*)\]:\ ' m = re.match(subject_prefix_re, subject, re.U) prefix = u"" if subject.startswith('Re: '): prefix = u"[2]" subject = subject[4:] elif m is not None: try: num = int(m.group(1)) prefix = u"[%d]" % (num+1) subject = subject[6+len(str(num)):] except: # if anything fails here, fall back to the old mechanism pass return ugettext(u"Re%(prefix)s: %(subject)s") % { 'subject': subject, 'prefix': prefix }
Prepends 'Re:' to the subject. To avoid multiple 'Re:'s a counter is added. NOTE: Currently unused. First step to fix Issue #48. FIXME: Any hints how to make this i18n aware are very welcome.
def get_window_size(self, windowHandle='current'): """ Gets the width and height of the current window. :Usage: :: driver.get_window_size() """ command = Command.GET_WINDOW_SIZE if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") size = self.get_window_rect() else: size = self.execute(command, {'windowHandle': windowHandle}) if size.get('value', None) is not None: size = size['value'] return {k: size[k] for k in ('width', 'height')}
Gets the width and height of the current window. :Usage: :: driver.get_window_size()
def create_detector(self, detector): """Creates a new detector. Args: detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (created detector model). """ resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX), data=detector) resp.raise_for_status() return resp.json()
Creates a new detector. Args: detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (created detector model).
def _set_fcoe(self, v, load=False): """ Setter method for fcoe, mapped from YANG variable /hardware/custom_profile/kap_custom_profile/fcoe (container) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fcoe.fcoe, is_container='container', presence=False, yang_name="fcoe", rest_name="fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FCOE protocol KAP parameters', u'cli-compact-syntax': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fcoe must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fcoe.fcoe, is_container='container', presence=False, yang_name="fcoe", rest_name="fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FCOE protocol KAP parameters', u'cli-compact-syntax': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""", }) self.__fcoe = t if hasattr(self, '_set'): self._set()
Setter method for fcoe, mapped from YANG variable /hardware/custom_profile/kap_custom_profile/fcoe (container) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe() directly.
def SetFlushInterval(self, flush_interval): """Set the flush interval. Args: flush_interval (int): number of events to buffer before doing a bulk insert. """ self._flush_interval = flush_interval logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))
Set the flush interval. Args: flush_interval (int): number of events to buffer before doing a bulk insert.
def modelshort(self): """ Short version of model name Dictionary defined in ``populations.py``:: SHORT_MODELNAMES = {'Planets':'pl', 'EBs':'eb', 'HEBs':'heb', 'BEBs':'beb', 'Blended Planets':'bpl', 'Specific BEB':'sbeb', 'Specific HEB':'sheb'} """ try: name = SHORT_MODELNAMES[self.model] #add index if specific model is indexed if hasattr(self,'index'): name += '-{}'.format(self.index) return name except KeyError: raise KeyError('No short name for model: %s' % self.model)
Short version of model name Dictionary defined in ``populations.py``:: SHORT_MODELNAMES = {'Planets':'pl', 'EBs':'eb', 'HEBs':'heb', 'BEBs':'beb', 'Blended Planets':'bpl', 'Specific BEB':'sbeb', 'Specific HEB':'sheb'}