code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _check_log_scale(base, sides, scales, coord): """ Check the log transforms Parameters ---------- base : float or None Base of the logarithm in which the ticks will be calculated. If ``None``, the base of the log transform the scale will be used. sides : str (default: bl) Sides onto which to draw the marks. Any combination chosen from the characters ``btlr``, for *bottom*, *top*, *left* or *right* side marks. If ``coord_flip()`` is used, these are the sides *after* the flip. scales : SimpleNamespace ``x`` and ``y`` scales. coord : coord Coordinate (e.g. coord_cartesian) system of the geom. Returns ------- out : tuple The bases (base_x, base_y) to use when generating the ticks. """ def is_log(trans): return (trans.__class__.__name__.startswith('log') and hasattr(trans, 'base')) base_x, base_y = base, base x_is_log = is_log(scales.x.trans) y_is_log = is_log(scales.y.trans) if isinstance(coord, coord_flip): x_is_log, y_is_log = y_is_log, x_is_log if 't' in sides or 'b' in sides: if base_x is None: base_x = scales.x.trans.base if not x_is_log: warnings.warn( "annotation_logticks for x-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif x_is_log and base_x != scales.x.trans.base: warnings.warn( "The x-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_x, scales.x.trans.base), PlotnineWarning) if 'l' in sides or 'r' in sides: if base_y is None: base_y = scales.y.trans.base if not y_is_log: warnings.warn( "annotation_logticks for y-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif y_is_log and base_y != scales.x.trans.base: warnings.warn( "The y-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_y, scales.x.trans.base), PlotnineWarning) return base_x, base_y
Check the log transforms Parameters ---------- base : float or None Base of the logarithm in which the ticks will be calculated. If ``None``, the base of the log transform the scale will be used. sides : str (default: bl) Sides onto which to draw the marks. Any combination chosen from the characters ``btlr``, for *bottom*, *top*, *left* or *right* side marks. If ``coord_flip()`` is used, these are the sides *after* the flip. scales : SimpleNamespace ``x`` and ``y`` scales. coord : coord Coordinate (e.g. coord_cartesian) system of the geom. Returns ------- out : tuple The bases (base_x, base_y) to use when generating the ticks.
def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3, end_gc=False, tm_parameters='cloning', overhangs=None, structure=False): '''Design primers for PCR amplifying any arbitrary sequence. :param dna: Input sequence. :type dna: coral.DNA :param tm: Ideal primer Tm in degrees C. :type tm: float :param min_len: Minimum primer length. :type min_len: int :param tm_undershoot: Allowed Tm undershoot. :type tm_undershoot: float :param tm_overshoot: Allowed Tm overshoot. :type tm_overshoot: float :param end_gc: Obey the 'end on G or C' rule. :type end_gc: bool :param tm_parameters: Melting temp calculator method to use. :type tm_parameters: string :param overhangs: 2-tuple of overhang sequences. :type overhangs: tuple :param structure: Evaluate each primer for structure, with warning for high structure. :type structure: bool :returns: A list primers (the output of primer). :rtype: list ''' if not overhangs: overhangs = [None, None] templates = [dna, dna.reverse_complement()] primer_list = [] for template, overhang in zip(templates, overhangs): primer_i = primer(template, tm=tm, min_len=min_len, tm_undershoot=tm_undershoot, tm_overshoot=tm_overshoot, end_gc=end_gc, tm_parameters=tm_parameters, overhang=overhang, structure=structure) primer_list.append(primer_i) return primer_list
Design primers for PCR amplifying any arbitrary sequence. :param dna: Input sequence. :type dna: coral.DNA :param tm: Ideal primer Tm in degrees C. :type tm: float :param min_len: Minimum primer length. :type min_len: int :param tm_undershoot: Allowed Tm undershoot. :type tm_undershoot: float :param tm_overshoot: Allowed Tm overshoot. :type tm_overshoot: float :param end_gc: Obey the 'end on G or C' rule. :type end_gc: bool :param tm_parameters: Melting temp calculator method to use. :type tm_parameters: string :param overhangs: 2-tuple of overhang sequences. :type overhangs: tuple :param structure: Evaluate each primer for structure, with warning for high structure. :type structure: bool :returns: A list primers (the output of primer). :rtype: list
def init_layer(self): """ initialize a layer object Returns ------- """ self.layer = self.vector.GetLayer() self.__features = [None] * self.nfeatures
initialize a layer object Returns -------
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: """Create a new file bundle.""" new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
Create a new file bundle.
def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=None, profile=None): ''' Returns a list of existing usage plans, optionally filtered to match a given plan name .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_usage_plans salt myminion boto_apigateway.describe_usage_plans name='usage plan name' salt myminion boto_apigateway.describe_usage_plans plan_id='usage plan id' ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) plans = _multi_call(conn.get_usage_plans, 'items') if name: plans = _filter_plans('name', name, plans) if plan_id: plans = _filter_plans('id', plan_id, plans) return {'plans': [_convert_datetime_str(plan) for plan in plans]} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Returns a list of existing usage plans, optionally filtered to match a given plan name .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_usage_plans salt myminion boto_apigateway.describe_usage_plans name='usage plan name' salt myminion boto_apigateway.describe_usage_plans plan_id='usage plan id'
def _calc_thumb_filename(self, thumb_name): """ Calculates the correct filename for a would-be (or potentially existing) thumbnail of the given size. NOTE: This includes the path leading up to the thumbnail. IE: uploads/cbid_images/photo.png size: (tuple) In the format of (width, height) Returns a string filename. """ filename_split = self.name.rsplit('.', 1) file_name = filename_split[0] file_extension = self.get_thumbnail_format() return '%s_%s.%s' % (file_name, thumb_name, file_extension)
Calculates the correct filename for a would-be (or potentially existing) thumbnail of the given size. NOTE: This includes the path leading up to the thumbnail. IE: uploads/cbid_images/photo.png size: (tuple) In the format of (width, height) Returns a string filename.
def calculate(self): """ calculates the estimated happiness of a person living in a world self._update_pref(self.person.prefs['tax_min'], self.person.prefs['tax_max'], self.world.tax_rate) self._update_pref(self.person.prefs['tradition'], self.person.prefs['tradition'], self.world.tradition) self._update_pref(self.person.prefs['equity'], self.person.prefs['equity'], self.world.equity) """ self.rating = 0 for f in self.factors: self._update_pref(f.min, f.max, self.world.tax_rate)
calculates the estimated happiness of a person living in a world self._update_pref(self.person.prefs['tax_min'], self.person.prefs['tax_max'], self.world.tax_rate) self._update_pref(self.person.prefs['tradition'], self.person.prefs['tradition'], self.world.tradition) self._update_pref(self.person.prefs['equity'], self.person.prefs['equity'], self.world.equity)
def values(self): """ in order """ tmp = self while tmp is not None: yield tmp.data tmp = tmp.next
in order
def run_type(self): """ Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs. TODO: Fix for other functional types like PW91, other vdW types, etc. """ METAGGA_TYPES = {"TPSS", "RTPSS", "M06L", "MBJL", "SCAN", "MS0", "MS1", "MS2"} if self.parameters.get("LHFCALC", False): rt = "HF" elif self.parameters.get("METAGGA", "").strip().upper() in METAGGA_TYPES: rt = self.parameters["METAGGA"].strip().upper() elif self.parameters.get("LUSE_VDW", False): vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88", "MK": "optB86b", "ML": "DF2"} gga = self.parameters.get("GGA").upper() rt = "vdW-" + vdw_gga[gga] elif self.potcar_symbols[0].split()[0] == 'PAW': rt = "LDA" else: rt = "GGA" if self.is_hubbard: rt += "+U" return rt
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs. TODO: Fix for other functional types like PW91, other vdW types, etc.
def check_rdn_deposits(raiden, user_deposit_proxy: UserDeposit): """ Check periodically for RDN deposits in the user-deposits contract """ while True: rei_balance = user_deposit_proxy.effective_balance(raiden.address, "latest") rdn_balance = to_rdn(rei_balance) if rei_balance < MIN_REI_THRESHOLD: click.secho( ( f'WARNING\n' f'Your account\'s RDN balance of {rdn_balance} is below the ' f'minimum threshold. Provided that you have either a monitoring ' f'service or a path finding service activated, your node is not going ' f'to be able to pay those services which may lead to denial of service or ' f'loss of funds.' ), fg='red', ) gevent.sleep(CHECK_RDN_MIN_DEPOSIT_INTERVAL)
Check periodically for RDN deposits in the user-deposits contract
def _variants_fills(fields, fills, info_types): """Utility function to determine fill values for variants fields with missing values.""" if fills is None: # no fills specified by user fills = dict() for f, vcf_type in zip(fields, info_types): if f == 'FILTER': fills[f] = False elif f not in fills: if f in config.STANDARD_VARIANT_FIELDS: fills[f] = config.DEFAULT_VARIANT_FILL[f] else: fills[f] = config.DEFAULT_FILL_MAP[vcf_type] # convert to tuple for zipping with fields fills = tuple(fills[f] for f in fields) return fills
Utility function to determine fill values for variants fields with missing values.
def add_message(self, msg_content, folder, **kwargs): """ Inject a message :params string msg_content: The entire message's content. :params string folder: Folder pathname (starts with '/') or folder ID """ content = {'m': kwargs} content['m']['l'] = str(folder) content['m']['content'] = {'_content': msg_content} return self.request('AddMsg', content)
Inject a message :params string msg_content: The entire message's content. :params string folder: Folder pathname (starts with '/') or folder ID
def randomize_args(self): '''Get new parameters for spirograph generation near agent's current location (*spiro_args*). ''' args = self.spiro_args + np.random.normal(0, self.move_radius, self.spiro_args.shape) np.clip(args, -199, 199, args) while args[0] == 0 or args[1] == 0: args = self.spiro_args + np.random.normal(0, self.move_radius, self.spiro_args.shape) np.clip(args, -199, 199, args) return args
Get new parameters for spirograph generation near agent's current location (*spiro_args*).
def _read_output(self, stream, callback, output_file): """ Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False. """ if (callback is None and output_file is None) or stream.closed: return False line = stream.readline() if line: if callback is not None: callback(line.decode(), self._data, self._store, self._signal, self._context) if output_file is not None: output_file.write(line) return True else: return False
Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False.
def _get_lsun(directory, category, split_name): """Downloads all lsun files to directory unless they are there.""" generator_utils.maybe_download(directory, _LSUN_DATA_FILENAME % (category, split_name), _LSUN_URL % (category, split_name))
Downloads all lsun files to directory unless they are there.
def evaluator(evaluate): """Return an inspyred evaluator function based on the given function. This function generator takes a function that evaluates only one candidate. The generator handles the iteration over each candidate to be evaluated. The given function ``evaluate`` must have the following signature:: fitness = evaluate(candidate, args) This function is most commonly used as a function decorator with the following usage:: @evaluator def evaluate(candidate, args): # Implementation of evaluation pass The generated function also contains an attribute named ``single_evaluation`` which holds the original evaluation function. In this way, the original single-candidate function can be retrieved if necessary. """ @functools.wraps(evaluate) def inspyred_evaluator(candidates, args): fitness = [] for candidate in candidates: fitness.append(evaluate(candidate, args)) return fitness inspyred_evaluator.single_evaluation = evaluate return inspyred_evaluator
Return an inspyred evaluator function based on the given function. This function generator takes a function that evaluates only one candidate. The generator handles the iteration over each candidate to be evaluated. The given function ``evaluate`` must have the following signature:: fitness = evaluate(candidate, args) This function is most commonly used as a function decorator with the following usage:: @evaluator def evaluate(candidate, args): # Implementation of evaluation pass The generated function also contains an attribute named ``single_evaluation`` which holds the original evaluation function. In this way, the original single-candidate function can be retrieved if necessary.
def info(self, channel_name): """ https://api.slack.com/methods/channels.info """ channel_id = self.get_channel_id(channel_name) self.params.update({'channel': channel_id}) return FromUrl('https://slack.com/api/channels.info', self._requests)(data=self.params).get()
https://api.slack.com/methods/channels.info
async def resume_dialog(self, dc, reason: DialogReason, result: object): """ Method called when an instance of the dialog is being returned to from another dialog that was started by the current instance using `begin_dialog()`. If this method is NOT implemented then the dialog will be automatically ended with a call to `end_dialog()`. Any result passed from the called dialog will be passed to the current dialog's parent. :param dc: The dialog context for the current turn of conversation. :param reason: Reason why the dialog resumed. :param result: (Optional) value returned from the dialog that was called. The type of the value returned is dependent on the dialog that was called. :return: """ # By default just end the current dialog. return await dc.EndDialog(result)
Method called when an instance of the dialog is being returned to from another dialog that was started by the current instance using `begin_dialog()`. If this method is NOT implemented then the dialog will be automatically ended with a call to `end_dialog()`. Any result passed from the called dialog will be passed to the current dialog's parent. :param dc: The dialog context for the current turn of conversation. :param reason: Reason why the dialog resumed. :param result: (Optional) value returned from the dialog that was called. The type of the value returned is dependent on the dialog that was called. :return:
def _years_in_date_range_within_decade(self, decade, begin_date, end_date): """Return a list of years in one decade which is covered by date range.""" begin_year = begin_date.year end_year = end_date.year if begin_year < decade: begin_year = decade if end_year > decade + 9: end_year = decade + 9 return list(range(begin_year, end_year + 1))
Return a list of years in one decade which is covered by date range.
def select(self, select, table_name, where=None, extra=None): """ Send a SELECT query to the database. :param str select: Attribute for the ``SELECT`` query. :param str table_name: |arg_select_table_name| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Result of the query execution. :rtype: sqlite3.Cursor :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """ self.verify_table_existence(table_name) return self.execute_query( six.text_type(Select(select, table_name, where, extra)), logging.getLogger().findCaller(), )
Send a SELECT query to the database. :param str select: Attribute for the ``SELECT`` query. :param str table_name: |arg_select_table_name| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Result of the query execution. :rtype: sqlite3.Cursor :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error|
def chunk(self, seek=None, lenient=False): """ Read the next PNG chunk from the input file returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's type as a byte string (all PNG chunk types are 4 bytes long). *data* is the chunk's data content, as a byte string. If the optional `seek` argument is specified then it will keep reading chunks until it either runs out of file or finds the chunk_type specified by the argument. Note that in general the order of chunks in PNGs is unspecified, so using `seek` can cause you to miss chunks. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions. """ self.validate_signature() while True: # http://www.w3.org/TR/PNG/#5Chunk-layout if not self.atchunk: self.atchunk = self.chunklentype() length, chunk_type = self.atchunk self.atchunk = None data = self.file.read(length) if len(data) != length: raise ChunkError('Chunk %s too short for required %i octets.' % (chunk_type, length)) checksum = self.file.read(4) if len(checksum) != 4: raise ChunkError('Chunk %s too short for checksum.', chunk_type) if seek and chunk_type != seek: continue verify = zlib.crc32(strtobytes(chunk_type)) verify = zlib.crc32(data, verify) # Whether the output from zlib.crc32 is signed or not varies # according to hideous implementation details, see # http://bugs.python.org/issue1202 . # We coerce it to be positive here (in a way which works on # Python 2.3 and older). verify &= 2**32 - 1 verify = struct.pack('!I', verify) if checksum != verify: (a, ) = struct.unpack('!I', checksum) (b, ) = struct.unpack('!I', verify) message = "Checksum error in %s chunk: 0x%08X != 0x%08X." %\ (chunk_type, a, b) if lenient: warnings.warn(message, RuntimeWarning) else: raise ChunkError(message) return chunk_type, data
Read the next PNG chunk from the input file returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's type as a byte string (all PNG chunk types are 4 bytes long). *data* is the chunk's data content, as a byte string. If the optional `seek` argument is specified then it will keep reading chunks until it either runs out of file or finds the chunk_type specified by the argument. Note that in general the order of chunks in PNGs is unspecified, so using `seek` can cause you to miss chunks. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
def _check_section_option(self, section, option): """ Private method to check section and option types """ if section is None: section = self.DEFAULT_SECTION_NAME elif not is_text_string(section): raise RuntimeError("Argument 'section' must be a string") if not is_text_string(option): raise RuntimeError("Argument 'option' must be a string") return section
Private method to check section and option types
def defaultAutoRangeMethods(inspector, intialItems=None): """ Creates an ordered dict with default autorange methods for an inspector. :param inspector: the range methods will work on (the sliced array) of this inspector. :param intialItems: will be passed on to the OrderedDict constructor. """ rangeFunctions = OrderedDict({} if intialItems is None else intialItems) rangeFunctions['use all data'] = partial(inspectorDataRange, inspector, 0.0) for percentage in [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]: label = "discard {}%".format(percentage) rangeFunctions[label] = partial(inspectorDataRange, inspector, percentage) return rangeFunctions
Creates an ordered dict with default autorange methods for an inspector. :param inspector: the range methods will work on (the sliced array) of this inspector. :param intialItems: will be passed on to the OrderedDict constructor.
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False): """Run freesasa on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-freesasa']`` Args: include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``. representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists """ for g in tqdm(self.genes): g.protein.get_freesasa_annotations(include_hetatms=include_hetatms, representative_only=representatives_only, force_rerun=force_rerun)
Run freesasa on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-freesasa']`` Args: include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``. representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists
def rec2csv(r, filename): """Export a recarray *r* to a CSV file *filename*""" names = r.dtype.names def translate(x): if x is None or str(x).lower == "none": x = "" return str(x) with open(filename, "w") as csv: csv.write(",".join([str(x) for x in names])+"\n") for data in r: csv.write(",".join([translate(x) for x in data])+"\n") #print "Wrote CSV table %r" % filename return filename
Export a recarray *r* to a CSV file *filename*
def __process_by_python(self): """! @brief Performs processing using python code. """ self.__scores = {} for k in range(self.__kmin, self.__kmax): clusters = self.__calculate_clusters(k) if len(clusters) != k: self.__scores[k] = float('nan') continue score = silhouette(self.__data, clusters).process().get_score() self.__scores[k] = sum(score) / len(score) if self.__scores[k] > self.__score: self.__score = self.__scores[k] self.__amount = k
! @brief Performs processing using python code.
def _prerun(self): """ To execute before running message """ self.check_required_params() self._set_status("RUNNING") logger.debug( "{}.PreRun: {}[{}]: running...".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.prerun()
To execute before running message
def owner(self): """ Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject """ obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;") if obj is None: return None else: return JavaObject(jobject=obj)
Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject
def findItems(self, data, cls=None, initpath=None, **kwargs): """ Load the specified data to find and build all items with the specified tag and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details on how this is used. """ # filter on cls attrs if specified if cls and cls.TAG and 'tag' not in kwargs: kwargs['etag'] = cls.TAG if cls and cls.TYPE and 'type' not in kwargs: kwargs['type'] = cls.TYPE # loop through all data elements to find matches items = [] for elem in data: if self._checkAttrs(elem, **kwargs): item = self._buildItemOrNone(elem, cls, initpath) if item is not None: items.append(item) return items
Load the specified data to find and build all items with the specified tag and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details on how this is used.
def create_apirack(self): """Get an instance of Api Rack Variables services facade.""" return ApiRack( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Rack Variables services facade.
def get_users_batch(self, ids): """ Ids: a list of ids that we want to return """ # Allowed maximum number of ids is 50 assert len(ids) <= 50 ids_ = ','.join(ids) url = _USERS_BATCH.format(c_api=_C_API_BEGINNING, api=_API_VERSION, ids=ids_, at=self.access_token) return _get_request(url)
Ids: a list of ids that we want to return
def source(self): """Return the source code for the definition.""" full_src = self._source[self._slice] def is_empty_or_comment(line): return line.strip() == '' or line.strip().startswith('#') filtered_src = dropwhile(is_empty_or_comment, reversed(full_src)) return ''.join(reversed(list(filtered_src)))
Return the source code for the definition.
def not_(self): ''' Negates this instance's query expression using MongoDB's ``$not`` operator **Example**: ``(User.name == 'Jeff').not_()`` .. note:: Another usage is via an operator, but parens are needed to get past precedence issues: ``~ (User.name == 'Jeff')`` ''' ret_obj = {} for k, v in self.obj.items(): if not isinstance(v, dict): ret_obj[k] = {'$ne' : v } continue num_ops = len([x for x in v if x[0] == '$']) if num_ops != len(v) and num_ops != 0: raise BadQueryException('$ operator used in field name') if num_ops == 0: ret_obj[k] = {'$ne' : v } continue for op, value in v.items(): k_dict = ret_obj.setdefault(k, {}) not_dict = k_dict.setdefault('$not', {}) not_dict[op] = value return QueryExpression(ret_obj)
Negates this instance's query expression using MongoDB's ``$not`` operator **Example**: ``(User.name == 'Jeff').not_()`` .. note:: Another usage is via an operator, but parens are needed to get past precedence issues: ``~ (User.name == 'Jeff')``
def minimize(grad_and_hessian_loss_fn, x_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=1, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): """Minimize using Hessian-informed proximal gradient descent. This function solves the regularized minimization problem ```none argmin{ Loss(x) + l1_regularizer * ||x||_1 + l2_regularizer * ||x||_2**2 : x in R^n } ``` where `Loss` is a convex C^2 function (typically, `Loss` is the negative log likelihood of a model and `x` is a vector of model coefficients). The `Loss` function does not need to be supplied directly, but this optimizer does need a way to compute the gradient and Hessian of the Loss function at a given value of `x`. The gradient and Hessian are often computationally expensive, and this optimizer calls them relatively few times compared with other algorithms. Args: grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor` of the same shape and dtype as `x_start` and returns the triple `(gradient_unregularized_loss, hessian_unregularized_loss_outer, hessian_unregularized_loss_middle)` as defined in the argument spec of `minimize_one_step`. x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial value of the argument to the `Loss` function. tolerance: scalar, `float` `Tensor` representing the tolerance for each optimization step; see the `tolerance` argument of `minimize_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying the maximum number of iterations of the outer loop of the optimizer. After this many iterations of the outer loop, the algorithm will terminate even if the return value `optimal_x` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of sweeps allowed in each iteration of the outer loop of the optimizer. Passed as the `maximum_full_sweeps` argument to `minimize_one_step`. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"minimize"`. Returns: x: `Tensor` of the same shape and dtype as `x_start`, representing the (batches of) computed values of `x` which minimizes `Loss(x)`. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged within the specified number of iterations across all batches. Here convergence means that an iteration of the inner loop (`minimize_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `minimize_one_step` before achieving convergence). #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ x_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, tolerance, learning_rate, ], with tf.compat.v1.name_scope(name, 'minimize', graph_deps): def _loop_cond(x_start, converged, iter_): del x_start return tf.logical_and(iter_ < maximum_iterations, tf.logical_not(converged)) def _loop_body(x_start, converged, iter_): # pylint: disable=missing-docstring g, h_outer, h_middle = grad_and_hessian_loss_fn(x_start) x_start, converged, _ = minimize_one_step( gradient_unregularized_loss=g, hessian_unregularized_loss_outer=h_outer, hessian_unregularized_loss_middle=h_middle, x_start=x_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_full_sweeps=maximum_full_sweeps_per_iteration, tolerance=tolerance, learning_rate=learning_rate) return x_start, converged, iter_ + 1 return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[ x_start, tf.zeros([], np.bool, name='converged'), tf.zeros([], np.int32, name='iter'), ])
Minimize using Hessian-informed proximal gradient descent. This function solves the regularized minimization problem ```none argmin{ Loss(x) + l1_regularizer * ||x||_1 + l2_regularizer * ||x||_2**2 : x in R^n } ``` where `Loss` is a convex C^2 function (typically, `Loss` is the negative log likelihood of a model and `x` is a vector of model coefficients). The `Loss` function does not need to be supplied directly, but this optimizer does need a way to compute the gradient and Hessian of the Loss function at a given value of `x`. The gradient and Hessian are often computationally expensive, and this optimizer calls them relatively few times compared with other algorithms. Args: grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor` of the same shape and dtype as `x_start` and returns the triple `(gradient_unregularized_loss, hessian_unregularized_loss_outer, hessian_unregularized_loss_middle)` as defined in the argument spec of `minimize_one_step`. x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial value of the argument to the `Loss` function. tolerance: scalar, `float` `Tensor` representing the tolerance for each optimization step; see the `tolerance` argument of `minimize_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying the maximum number of iterations of the outer loop of the optimizer. After this many iterations of the outer loop, the algorithm will terminate even if the return value `optimal_x` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of sweeps allowed in each iteration of the outer loop of the optimizer. Passed as the `maximum_full_sweeps` argument to `minimize_one_step`. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"minimize"`. Returns: x: `Tensor` of the same shape and dtype as `x_start`, representing the (batches of) computed values of `x` which minimizes `Loss(x)`. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged within the specified number of iterations across all batches. Here convergence means that an iteration of the inner loop (`minimize_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `minimize_one_step` before achieving convergence). #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
def to_unicode(string): """Convert a string (bytes, str or unicode) to unicode.""" assert isinstance(string, basestring) if sys.version_info[0] >= 3: if isinstance(string, bytes): return string.decode('utf-8') else: return string else: if isinstance(string, str): return string.decode('utf-8') else: return string
Convert a string (bytes, str or unicode) to unicode.
def _replace_placeholder_with(self, element): """ Substitute *element* for this placeholder element in the shapetree. This placeholder's `._element` attribute is set to |None| and its original element is free for garbage collection. Any attribute access (including a method call) on this placeholder after this call raises |AttributeError|. """ element._nvXxPr.nvPr._insert_ph(self._element.ph) self._element.addprevious(element) self._element.getparent().remove(self._element) self._element = None
Substitute *element* for this placeholder element in the shapetree. This placeholder's `._element` attribute is set to |None| and its original element is free for garbage collection. Any attribute access (including a method call) on this placeholder after this call raises |AttributeError|.
def connect(cls, host, public_key, private_key, verbose=0, use_cache=True): """ Connect the client with the given host and the provided credentials. Parameters ---------- host : str The Cytomine host (without protocol). public_key : str The Cytomine public key. private_key : str The Cytomine private key. verbose : int The verbosity level of the client. use_cache : bool True to use HTTP cache, False otherwise. Returns ------- client : Cytomine A connected Cytomine client. """ return cls(host, public_key, private_key, verbose, use_cache)
Connect the client with the given host and the provided credentials. Parameters ---------- host : str The Cytomine host (without protocol). public_key : str The Cytomine public key. private_key : str The Cytomine private key. verbose : int The verbosity level of the client. use_cache : bool True to use HTTP cache, False otherwise. Returns ------- client : Cytomine A connected Cytomine client.
def process_jwt(jwt): """ Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)`` """ header, claims, _ = jwt.split('.') parsed_header = json_decode(base64url_decode(header)) parsed_claims = json_decode(base64url_decode(claims)) return parsed_header, parsed_claims
Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)``
def getAllViewsAsDict(self): """Return all the stats views (dict).""" return {p: self._plugins[p].get_views() for p in self._plugins}
Return all the stats views (dict).
def add_z(xy: np.ndarray, z: float) -> np.ndarray: """ Turn a 2-D transform matrix into a 3-D transform matrix (scale/shift only, no rotation). :param xy: A two-dimensional transform matrix (a 3x3 numpy ndarray) in the following form: [ 1 0 x ] [ 0 1 y ] [ 0 0 1 ] :param z: a float for the z component :return: a three-dimensional transformation matrix (a 4x4 numpy ndarray) with x, y, and z from the function parameters, in the following form: [ 1 0 0 x ] [ 0 1 0 y ] [ 0 0 1 z ] [ 0 0 0 1 ] """ # First, insert a column of zeros as into the input matrix interm = insert(xy, 2, [0, 0, 0], axis=1) # Result: # [ 1 0 0 x ] # [ 0 1 0 y ] # [ 0 0 0 1 ] # Then, insert the z row to create a properly formed 3-D transform matrix: xyz = insert( interm, 2, [0, 0, 1, z], axis=0) # Result: # [ 1 0 0 x ] # [ 0 1 0 y ] # [ 0 0 1 z ] # [ 0 0 0 1 ] return xyz.round(11)
Turn a 2-D transform matrix into a 3-D transform matrix (scale/shift only, no rotation). :param xy: A two-dimensional transform matrix (a 3x3 numpy ndarray) in the following form: [ 1 0 x ] [ 0 1 y ] [ 0 0 1 ] :param z: a float for the z component :return: a three-dimensional transformation matrix (a 4x4 numpy ndarray) with x, y, and z from the function parameters, in the following form: [ 1 0 0 x ] [ 0 1 0 y ] [ 0 0 1 z ] [ 0 0 0 1 ]
def get_transcript_ids(ensembl, gene_id): """ gets transcript IDs for a gene. Args: ensembl: EnsemblRequest object to request data from ensembl gene_id: HGNC symbol for gene Returns: dictionary of transcript ID: transcript lengths for all transcripts for a given HGNC symbol. """ ensembl_genes = ensembl.get_genes_for_hgnc_id(gene_id) transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, [gene_id]) # sometimes we get HGNC symbols that do not match the ensembl rest version # that we are currentl using. We can look for earlier HGNC symbols for # the gene using the service at rest.genenames.org alt_symbols = [] if len(transcript_ids) == 0: alt_symbols = ensembl.get_previous_symbol(gene_id) genes = [ensembl.get_genes_for_hgnc_id(symbol) for symbol in alt_symbols] genes = [item for sublist in genes for item in sublist] ensembl_genes += genes symbols = [gene_id] + alt_symbols transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, symbols) return get_transcript_lengths(ensembl, transcript_ids)
gets transcript IDs for a gene. Args: ensembl: EnsemblRequest object to request data from ensembl gene_id: HGNC symbol for gene Returns: dictionary of transcript ID: transcript lengths for all transcripts for a given HGNC symbol.
def capture(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs): """ Captures and processes an event and pipes it off to Client.send. """ if event_type == "Exception": # never gather log stack for exceptions stack = False data = self._build_msg_for_logging( event_type, date=date, context=context, custom=custom, stack=stack, handled=handled, **kwargs ) if data: # queue data, and flush the queue if this is an unhandled exception self.queue(ERROR, data, flush=not handled) return data["id"]
Captures and processes an event and pipes it off to Client.send.
def exists(self, table_id): """ Check if a table exists in Google BigQuery Parameters ---------- table : str Name of table to be verified Returns ------- boolean true if table exists, otherwise false """ from google.api_core.exceptions import NotFound table_ref = self.client.dataset(self.dataset_id).table(table_id) try: self.client.get_table(table_ref) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
Check if a table exists in Google BigQuery Parameters ---------- table : str Name of table to be verified Returns ------- boolean true if table exists, otherwise false
def build_header(self, title): """Generate the header for the Markdown file.""" header = ['---', 'title: ' + title, 'author(s): ' + self.user, 'tags: ', 'created_at: ' + str(self.date_created), 'updated_at: ' + str(self.date_updated), 'tldr: ', 'thumbnail: ', '---'] self.out = header + self.out
Generate the header for the Markdown file.
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Remove an application GPG key. Unstash the local private key. Return {'status': True, ...} on success Return {'error': ...} on error If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take on the order of an hour to complete on the blockchain. A transaction ID will be returned to you on successful deletion, and it will be up to you to wait for the transaction to get confirmed. """ assert is_valid_appname(appname) assert is_valid_keyname(keyname) fq_key_name = "gpg.%s.%s" % (appname, keyname) result = {} dead_pubkey_dict = None dead_pubkey = None key_id = None if not immutable: # find the key first, so we can get the key ID and then remove it locally dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict else: # need the key ID so we can unstash locally dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict dead_pubkey_kv = dead_pubkey_dict['data'] assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ] key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir ) assert key_id is not None, "Failed to load pubkey fingerprint" # actually delete if not immutable: result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) else: result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy ) if 'error' in result: return result # unstash try: rc = gpg_unstash_key( appname, key_id, config_dir=config_dir ) assert rc, "Failed to unstash key" except: log.warning("Failed to remove private key for '%s'" % key_id ) result['warning'] = "Failed to remove private key" if os.environ.get('BLOCKSTACK_TEST') is not None: # make sure this never happens in testing raise return result
Remove an application GPG key. Unstash the local private key. Return {'status': True, ...} on success Return {'error': ...} on error If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take on the order of an hour to complete on the blockchain. A transaction ID will be returned to you on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
def _timedatectl(): ''' get the output of timedatectl ''' ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False) if ret['retcode'] != 0: msg = 'timedatectl failed: {0}'.format(ret['stderr']) raise CommandExecutionError(msg) return ret
get the output of timedatectl
def _lincomb(self, a, x1, b, x2, out): """Raw linear combination.""" self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor)
Raw linear combination.
def format2(self, raw, out = None, scheme = ''): """ Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string.""" string_output = 0 if out == 'str' or self.out == 'str' or \ isinstance(self.out,StringIO.StringIO): # XXX - I don't really like this state handling logic, but at this # point I don't want to make major changes, so adding the # isinstance() check is the simplest I can do to ensure correct # behavior. out_old = self.out self.out = StringIO.StringIO() string_output = 1 elif out is not None: self.out = out # Fast return of the unmodified input for NoColor scheme if scheme == 'NoColor': error = False self.out.write(raw) if string_output: return raw,error else: return None,error # local shorthands colors = self.color_table[scheme].colors self.colors = colors # put in object so __call__ sees it # Remove trailing whitespace and normalize tabs self.raw = raw.expandtabs().rstrip() # store line offsets in self.lines self.lines = [0, 0] pos = 0 raw_find = self.raw.find lines_append = self.lines.append while 1: pos = raw_find('\n', pos) + 1 if not pos: break lines_append(pos) lines_append(len(self.raw)) # parse the source and write it self.pos = 0 text = StringIO.StringIO(self.raw) error = False try: for atoken in generate_tokens(text.readline): self(*atoken) except tokenize.TokenError as ex: msg = ex.args[0] line = ex.args[1][0] self.out.write("%s\n\n*** ERROR: %s%s%s\n" % (colors[token.ERRORTOKEN], msg, self.raw[self.lines[line]:], colors.normal) ) error = True self.out.write(colors.normal+'\n') if string_output: output = self.out.getvalue() self.out = out_old return (output, error) return (None, error)
Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string.
def _ref_prop_matches(prop, target_classname, ref_classname, resultclass_names, role): """ Test filters for a reference property Returns `True` if matches the criteria. Returns `False` if it does not match. The match criteria are: - target_classname == prop_reference_class - if result_classes are not None, ref_classname is in result_classes - If role is not None, prop name matches role """ assert prop.type == 'reference' if prop.reference_class.lower() == target_classname.lower(): if resultclass_names and ref_classname not in resultclass_names: return False if role and prop.name.lower() != role: return False return True return False
Test filters for a reference property Returns `True` if matches the criteria. Returns `False` if it does not match. The match criteria are: - target_classname == prop_reference_class - if result_classes are not None, ref_classname is in result_classes - If role is not None, prop name matches role
def _make_tuple(self, env): """Instantiate the Tuple based on this TupleNode.""" t = runtime.Tuple(self, env, dict2tuple) # A tuple also provides its own schema spec schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
Instantiate the Tuple based on this TupleNode.
def select(self): """ Select the current bitmap into this wxDC instance """ if sys.platform=='win32': self.dc.SelectObject(self.bitmap) self.IsSelected = True
Select the current bitmap into this wxDC instance
def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret
Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
def parse( data = None, template = None, data_file = None, template_file = None, interp = None, debug = False, predefines = True, int3 = True, keep_successful = False, printf = True, ): """Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM """ if data is None and data_file is None: raise Exception("No input data was specified") if data is not None and data_file is not None: raise Exception("Only one input data may be specified") if isinstance(data, six.string_types): data = six.StringIO(data) if data_file is not None: data = open(os.path.expanduser(data_file), "rb") if template is None and template_file is None: raise Exception("No template specified!") if template is not None and template_file is not None: raise Exception("Only one template may be specified!") orig_filename = "string" if template_file is not None: orig_filename = template_file try: with open(os.path.expanduser(template_file), "r") as f: template = f.read() except Exception as e: raise Exception("Could not open template file '{}'".format(template_file)) # the user may specify their own instance of PfpInterp to be # used if interp is None: interp = pfp.interp.PfpInterp( debug = debug, parser = PARSER, int3 = int3, ) # so we can consume single bits at a time data = BitwrappedStream(data) dom = interp.parse( data, template, predefines = predefines, orig_filename = orig_filename, keep_successful = keep_successful, printf = printf, ) # close the data stream if a data_file was specified if data_file is not None: data.close() return dom
Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM
def get_current_price(crypto, fiat, services=None, convert_to=None, helper_prices=None, **modes): """ High level function for getting current exchange rate for a cryptocurrency. If the fiat value is not explicitly defined, it will try the wildcard service. if that does not work, it tries converting to an intermediate cryptocurrency if available. """ fiat = fiat.lower() args = {'crypto': crypto, 'fiat': fiat, 'convert_to': convert_to} if not services: services = get_optimal_services(crypto, 'current_price') if fiat in services: # first, try service with explicit fiat support try_services = services[fiat] result = _try_price_fetch(try_services, args, modes) if not isinstance(result, Exception): return result if '*' in services: # then try wildcard service try_services = services['*'] result = _try_price_fetch(try_services, args, modes) if not isinstance(result, Exception): return result def _do_composite_price_fetch(crypto, convert_crypto, fiat, helpers, modes): before = modes.get('report_services', False) modes['report_services'] = True services1, converted_price = get_current_price(crypto, convert_crypto, **modes) if not helpers or convert_crypto not in helpers[fiat]: services2, fiat_price = get_current_price(convert_crypto, fiat, **modes) else: services2, fiat_price = helpers[fiat][convert_crypto] modes['report_services'] = before if modes.get('report_services', False): #print("composit service:", crypto, fiat, services1, services2) serv = CompositeService(services1, services2, convert_crypto) return [serv], converted_price * fiat_price else: return converted_price * fiat_price all_composite_cryptos = ['btc', 'ltc', 'doge', 'uno'] if crypto in all_composite_cryptos: all_composite_cryptos.remove(crypto) for composite_attempt in all_composite_cryptos: if composite_attempt in services and services[composite_attempt]: result = _do_composite_price_fetch( crypto, composite_attempt, fiat, helper_prices, modes ) if not isinstance(result, Exception): return result raise result
High level function for getting current exchange rate for a cryptocurrency. If the fiat value is not explicitly defined, it will try the wildcard service. if that does not work, it tries converting to an intermediate cryptocurrency if available.
def add_email_address(self, email, hidden=None): """Add email address. Args: :param email: email of the author. :type email: string :param hidden: if email is public or not. :type hidden: boolean """ existing_emails = get_value(self.obj, 'email_addresses', []) found_email = next( (existing_email for existing_email in existing_emails if existing_email.get('value') == email), None ) if found_email is None: new_email = {'value': email} if hidden is not None: new_email['hidden'] = hidden self._append_to('email_addresses', new_email) elif hidden is not None: found_email['hidden'] = hidden
Add email address. Args: :param email: email of the author. :type email: string :param hidden: if email is public or not. :type hidden: boolean
def brokers(self): """Get all BrokerMetadata Returns: set: {BrokerMetadata, ...} """ return set(self._brokers.values()) or set(self._bootstrap_brokers.values())
Get all BrokerMetadata Returns: set: {BrokerMetadata, ...}
def bfx(value, msb, lsb): """! @brief Extract a value from a bitfield.""" mask = bitmask((msb, lsb)) return (value & mask) >> lsb
! @brief Extract a value from a bitfield.
def _to_bel_lines_header(graph) -> Iterable[str]: """Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph """ yield '# This document was created by PyBEL v{} and bel-resources v{} on {}\n'.format( VERSION, bel_resources.constants.VERSION, time.asctime() ) yield from make_knowledge_header( namespace_url=graph.namespace_url, namespace_patterns=graph.namespace_pattern, annotation_url=graph.annotation_url, annotation_patterns=graph.annotation_pattern, annotation_list=graph.annotation_list, **graph.document )
Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph
def _lu_reconstruct_assertions(lower_upper, perm, validate_args): """Returns list of assertions related to `lu_reconstruct` assumptions.""" assertions = [] message = 'Input `lower_upper` must have at least 2 dimensions.' if lower_upper.shape.ndims is not None: if lower_upper.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(lower_upper, rank=2, message=message)) message = '`rank(lower_upper)` must equal `rank(perm) + 1`' if lower_upper.shape.ndims is not None and perm.shape.ndims is not None: if lower_upper.shape.ndims != perm.shape.ndims + 1: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank( lower_upper, rank=tf.rank(perm) + 1, message=message)) message = '`lower_upper` must be square.' if lower_upper.shape[:-2].is_fully_defined(): if lower_upper.shape[-2] != lower_upper.shape[-1]: raise ValueError(message) elif validate_args: m, n = tf.split(tf.shape(input=lower_upper)[-2:], num_or_size_splits=2) assertions.append(tf.compat.v1.assert_equal(m, n, message=message)) return assertions
Returns list of assertions related to `lu_reconstruct` assumptions.
def is_model_mpttmeta_subclass(node): """Checks that node is derivative of MPTTMeta class.""" if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef): return False parents = ('django.db.models.base.Model', '.Model', # for the transformed version used in this plugin 'django.forms.forms.Form', '.Form', 'django.forms.models.ModelForm', '.ModelForm') return node_is_subclass(node.parent, *parents)
Checks that node is derivative of MPTTMeta class.
def _remap_input(self, operation, path, *args, **kw): """Called for path inputs""" if operation in self.write_ops and not self._ok(path): self._violation(operation, os.path.realpath(path), *args, **kw) return path
Called for path inputs
def from_path_by_criterion(dir_path, criterion, keepboth=False): """Create a new FileCollection, and select some files from ``dir_path``. How to construct your own criterion function:: def filter_image(winfile): if winfile.ext in [".jpg", ".png", ".bmp"]: return True else: return False fc = FileCollection.from_path_by_criterion(dir_path, filter_image) :param dir_path: path of a directory :type dir_path: string :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 直接选取dir_path目录下所有文件, 根据criterion中的规则, 生成 FileCollection。 """ if keepboth: fc_yes, fc_no = FileCollection(), FileCollection() for winfile in FileCollection.yield_all_winfile(dir_path): if criterion(winfile): fc_yes.files.setdefault(winfile.abspath, winfile) else: fc_no.files.setdefault(winfile.abspath, winfile) return fc_yes, fc_no else: fc = FileCollection() for winfile in FileCollection.yield_all_winfile(dir_path): if criterion(winfile): fc.files.setdefault(winfile.abspath, winfile) return fc
Create a new FileCollection, and select some files from ``dir_path``. How to construct your own criterion function:: def filter_image(winfile): if winfile.ext in [".jpg", ".png", ".bmp"]: return True else: return False fc = FileCollection.from_path_by_criterion(dir_path, filter_image) :param dir_path: path of a directory :type dir_path: string :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 直接选取dir_path目录下所有文件, 根据criterion中的规则, 生成 FileCollection。
def pkt_check(*args, func=None): """Check if arguments are valid packets.""" func = func or inspect.stack()[2][3] for var in args: dict_check(var, func=func) dict_check(var.get('frame'), func=func) enum_check(var.get('protocol'), func=func) real_check(var.get('timestamp'), func=func) ip_check(var.get('src'), var.get('dst'), func=func) bool_check(var.get('syn'), var.get('fin'), func=func) int_check(var.get('srcport'), var.get('dstport'), var.get('index'), func=func)
Check if arguments are valid packets.
def fit(self, index, n_nodes, tau_matrix, previous_tree, edges=None): """Fits tree object. Args: :param index: index of the tree :param n_nodes: number of nodes in the tree :tau_matrix: kendall's tau matrix of the data :previous_tree: tree object of previous level :type index: int :type n_nodes: int :type tau_matrix: np.ndarray of size n_nodes*n_nodes """ self.level = index + 1 self.n_nodes = n_nodes self.tau_matrix = tau_matrix self.previous_tree = previous_tree self.edges = edges or [] if not self.edges: if self.level == 1: self.u_matrix = previous_tree self._build_first_tree() else: self._build_kth_tree() self.prepare_next_tree() self.fitted = True
Fits tree object. Args: :param index: index of the tree :param n_nodes: number of nodes in the tree :tau_matrix: kendall's tau matrix of the data :previous_tree: tree object of previous level :type index: int :type n_nodes: int :type tau_matrix: np.ndarray of size n_nodes*n_nodes
def validate_empty_values(self, data): """ Validate empty values, and either: * Raise `ValidationError`, indicating invalid data. * Raise `SkipField`, indicating that the field should be ignored. * Return (True, data), indicating an empty value that should be returned without any further validation being applied. * Return (False, data), indicating a non-empty value, that should have validation applied as normal. """ if self.read_only: return (True, self.get_default()) if data is empty: if getattr(self.root, 'partial', False): raise SkipField() if self.required: self.fail('required') return (True, self.get_default()) if data is None: if not self.allow_null: self.fail('null') return (True, None) return (False, data)
Validate empty values, and either: * Raise `ValidationError`, indicating invalid data. * Raise `SkipField`, indicating that the field should be ignored. * Return (True, data), indicating an empty value that should be returned without any further validation being applied. * Return (False, data), indicating a non-empty value, that should have validation applied as normal.
def collect_gaps(blast, use_subject=False): """ Collect the gaps between adjacent HSPs in the BLAST file. """ key = lambda x: x.sstart if use_subject else x.qstart blast.sort(key=key) for a, b in zip(blast, blast[1:]): if use_subject: if a.sstop < b.sstart: yield b.sstart - a.sstop else: if a.qstop < b.qstart: yield b.qstart - a.qstop
Collect the gaps between adjacent HSPs in the BLAST file.
def activate(request, activation_key, template_name='accounts/activate_fail.html', success_url=None, extra_context=None): """ Activate a user with an activation key. The key is a SHA1 string. When the SHA1 is found with an :class:`AccountsSignup`, the :class:`User` of that account will be activated. After a successful activation the view will redirect to ``success_url``. If the SHA1 is not found, the user will be shown the ``template_name`` template displaying a fail message. :param activation_key: String of a SHA1 string of 40 characters long. A SHA1 is always 160bit long, with 4 bits per character this makes it --160/4-- 40 characters long. :param template_name: String containing the template name that is used when the ``activation_key`` is invalid and the activation fails. Defaults to ``accounts/activation_fail.html``. :param success_url: String containing the URL where the user should be redirected to after a successful activation. Will replace ``%(username)s`` with string formatting if supplied. If ``success_url`` is left empty, will direct to ``accounts_profile_detail`` view. :param extra_context: Dictionary containing variables which could be added to the template context. Default to an empty dictionary. """ user = AccountsSignup.objects.activate_user(activation_key) if user: # Sign the user in. auth_user = authenticate(identification=user.email, check_password=False) login(request, auth_user) if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _('Your account has been activated and you have been signed in.'), fail_silently=True) if success_url: redirect_to = success_url % {'username': user.username} else: redirect_to = reverse('accounts_profile_detail', kwargs={'username': user.username}) return redirect(redirect_to) else: if not extra_context: extra_context = dict() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
Activate a user with an activation key. The key is a SHA1 string. When the SHA1 is found with an :class:`AccountsSignup`, the :class:`User` of that account will be activated. After a successful activation the view will redirect to ``success_url``. If the SHA1 is not found, the user will be shown the ``template_name`` template displaying a fail message. :param activation_key: String of a SHA1 string of 40 characters long. A SHA1 is always 160bit long, with 4 bits per character this makes it --160/4-- 40 characters long. :param template_name: String containing the template name that is used when the ``activation_key`` is invalid and the activation fails. Defaults to ``accounts/activation_fail.html``. :param success_url: String containing the URL where the user should be redirected to after a successful activation. Will replace ``%(username)s`` with string formatting if supplied. If ``success_url`` is left empty, will direct to ``accounts_profile_detail`` view. :param extra_context: Dictionary containing variables which could be added to the template context. Default to an empty dictionary.
def newDocText(self, content): """Creation of a new text node within a document. """ ret = libxml2mod.xmlNewDocText(self._o, content) if ret is None:raise treeError('xmlNewDocText() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new text node within a document.
def create_info_endpoint(self, name, data): """Create an endpoint to serve info GET requests.""" # make sure data is serializable data = make_serializable(data) # create generic restful resource to serve static JSON data class InfoBase(Resource): @staticmethod def get(): return data def info_factory(name): """Return an Info derivative resource.""" class NewClass(InfoBase): pass NewClass.__name__ = "{}_{}".format(name, InfoBase.__name__) return NewClass path = '/info/{}'.format(name) self.api.add_resource(info_factory(name), path) logger.info('Regestered informational resource to {} (available via GET)'.format(path)) logger.debug('Endpoint {} will now serve the following static data:\n{}'.format(path, data))
Create an endpoint to serve info GET requests.
async def set_tz(self): """ set the environment timezone to the timezone set in your twitter settings """ settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ['TZ'] = tz time.tzset()
set the environment timezone to the timezone set in your twitter settings
def _set_opts(self, schema=None, **options): """ Set named options (filter out those the value is None) """ if schema is not None: self.schema(schema) for k, v in options.items(): if v is not None: self.option(k, v)
Set named options (filter out those the value is None)
def compare(string1, string2): """Compare two strings while protecting against timing attacks :param str string1: the first string :param str string2: the second string :returns: True if the strings are equal, False if not :rtype: :obj:`bool` """ if len(string1) != len(string2): return False result = True for c1, c2 in izip(string1, string2): result &= c1 == c2 return result
Compare two strings while protecting against timing attacks :param str string1: the first string :param str string2: the second string :returns: True if the strings are equal, False if not :rtype: :obj:`bool`
def get_cost_per_mol(self, comp): """ Get best estimate of minimum cost/mol based on known data Args: comp: Composition as a pymatgen.core.structure.Composition Returns: float of cost/mol """ comp = comp if isinstance(comp, Composition) else Composition(comp) decomp = self.get_lowest_decomposition(comp) return sum(k.energy_per_atom * v * comp.num_atoms for k, v in decomp.items())
Get best estimate of minimum cost/mol based on known data Args: comp: Composition as a pymatgen.core.structure.Composition Returns: float of cost/mol
def prune_neighbors(self): """ If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections. Returns: CellDataFrame: A CellDataFrame with only valid cell-cell contacts """ def _neighbor_check(neighbors,valid): if not neighbors==neighbors: return np.nan valid_keys = set(valid)&set(neighbors.keys()) d = dict([(k,v) for k,v in neighbors.items() if k in valid_keys]) return d fixed = self.copy() valid = self.get_valid_cell_indecies() valid = pd.DataFrame(self).merge(valid,on=self.frame_columns).set_index(self.frame_columns+['cell_index']) valid = valid.apply(lambda x: _neighbor_check(x['neighbors'],x['valid']),1).reset_index().\ rename(columns={0:'new_neighbors'}) fixed = fixed.merge(valid,on=self.frame_columns+['cell_index']).drop(columns='neighbors').\ rename(columns={'new_neighbors':'neighbors'}) fixed.microns_per_pixel = self.microns_per_pixel fixed.db = self.db #fixed.loc[:,'neighbors'] = list(new_neighbors) return fixed
If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections. Returns: CellDataFrame: A CellDataFrame with only valid cell-cell contacts
def html_error_template(): """Provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. The template's default ``encoding_errors`` value is ``'htmlentityreplace'``. The template has two options. With the ``full`` option disabled, only a section of an HTML document is returned. With the ``css`` option disabled, the default stylesheet won't be included. """ import mako.template return mako.template.Template(r""" <%! from mako.exceptions import RichTraceback, syntax_highlight,\ pygments_html_formatter %> <%page args="full=True, css=True, error=None, traceback=None"/> % if full: <html> <head> <title>Mako Runtime Error</title> % endif % if css: <style> body { font-family:verdana; margin:10px 30px 10px 30px;} .stacktrace { margin:5px 5px 5px 5px; } .highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; } .nonhighlight { padding:0px; background-color:#DFDFDF; } .sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; } .sampleline { padding:0px 10px 0px 10px; } .sourceline { margin:5px 5px 10px 5px; font-family:monospace;} .location { font-size:80%; } .highlight { white-space:pre; } .sampleline { white-space:pre; } % if pygments_html_formatter: ${pygments_html_formatter.get_style_defs()} .linenos { min-width: 2.5em; text-align: right; } pre { margin: 0; } .syntax-highlighted { padding: 0 10px; } .syntax-highlightedtable { border-spacing: 1px; } .nonhighlight { border-top: 1px solid #DFDFDF; border-bottom: 1px solid #DFDFDF; } .stacktrace .nonhighlight { margin: 5px 15px 10px; } .sourceline { margin: 0 0; font-family:monospace; } .code { background-color: #F8F8F8; width: 100%; } .error .code { background-color: #FFBDBD; } .error .syntax-highlighted { background-color: #FFBDBD; } % endif </style> % endif % if full: </head> <body> % endif <h2>Error !</h2> <% tback = RichTraceback(error=error, traceback=traceback) src = tback.source line = tback.lineno if src: lines = src.split('\n') else: lines = None %> <h3>${tback.errorname}: ${tback.message|h}</h3> % if lines: <div class="sample"> <div class="nonhighlight"> % for index in range(max(0, line-4),min(len(lines), line+5)): <% if pygments_html_formatter: pygments_html_formatter.linenostart = index + 1 %> % if index + 1 == line: <% if pygments_html_formatter: old_cssclass = pygments_html_formatter.cssclass pygments_html_formatter.cssclass = 'error ' + old_cssclass %> ${lines[index] | syntax_highlight(language='mako')} <% if pygments_html_formatter: pygments_html_formatter.cssclass = old_cssclass %> % else: ${lines[index] | syntax_highlight(language='mako')} % endif % endfor </div> </div> % endif <div class="stacktrace"> % for (filename, lineno, function, line) in tback.reverse_traceback: <div class="location">${filename}, line ${lineno}:</div> <div class="nonhighlight"> <% if pygments_html_formatter: pygments_html_formatter.linenostart = lineno %> <div class="sourceline">${line | syntax_highlight(filename)}</div> </div> % endfor </div> % if full: </body> </html> % endif """, output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
Provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. The template's default ``encoding_errors`` value is ``'htmlentityreplace'``. The template has two options. With the ``full`` option disabled, only a section of an HTML document is returned. With the ``css`` option disabled, the default stylesheet won't be included.
def data(self, index, role): """QAbstractItemModel method implementation """ if role == Qt.DisplayRole and \ index.row() < len(self.words): text = self.words[index.row()] typed = text[:len(self._typedText)] canComplete = text[len(self._typedText):len(self._typedText) + len(self.canCompleteText)] rest = text[len(self._typedText) + len(self.canCompleteText):] if canComplete: # NOTE foreground colors are hardcoded, but I can't set background color of selected item (Qt bug?) # might look bad on some color themes return '<html>' \ '%s' \ '<font color="#e80000">%s</font>' \ '%s' \ '</html>' % (typed, canComplete, rest) else: return typed + rest else: return None
QAbstractItemModel method implementation
def kill_pane(self, pane): """ Kill the given pane, and remove it from the arrangement. """ assert isinstance(pane, Pane) # Send kill signal. if not pane.process.is_terminated: pane.process.kill() # Remove from layout. self.arrangement.remove_pane(pane)
Kill the given pane, and remove it from the arrangement.
def _getconf(self, rscpath, logger=None, conf=None): """Get specific conf from one driver path. :param str rscpath: resource path. :param Logger logger: logger to use. """ result = None resource = self.pathresource(rscpath=rscpath, logger=logger) if resource is not None: for cname in self._cnames(resource=resource): category = Category(name=cname) if result is None: result = Configuration() result += category for param in self._params(resource=resource, cname=cname): if conf is not None: confparam = None if cname in conf and param.name in conf[cname]: confparam = conf[cname][param.name] else: confparam = conf.param(pname=param.name) if confparam is not None: svalue = param.svalue param.update(confparam) if svalue is not None: param.svalue = svalue param.resolve() category += param return result
Get specific conf from one driver path. :param str rscpath: resource path. :param Logger logger: logger to use.
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.mturk.connection.MTurkConnection` :return: A connection to MTurk """ from boto.mturk.connection import MTurkConnection return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.mturk.connection.MTurkConnection` :return: A connection to MTurk
def delete_editor(userid): """ :param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ url = _make_del_account_url(userid) return _process_resp(url, get_sea_resource(url), _is_editor_deleted )
:param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned.
def send_to_observer(self, msg, frm): """ Send the message to the observer. :param msg: the message to send :param frm: the name of the node which sent this `msg` """ logger.debug("{} sending message to observer: {}". format(self, (msg, frm))) self._observer.append_input(msg, frm)
Send the message to the observer. :param msg: the message to send :param frm: the name of the node which sent this `msg`
def subsample(self, proposals, targets): """ This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList]) """ labels, regression_targets = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) proposals = list(proposals) # add corresponding label and regression_targets information to the bounding boxes for labels_per_image, regression_targets_per_image, proposals_per_image in zip( labels, regression_targets, proposals ): proposals_per_image.add_field("labels", labels_per_image) proposals_per_image.add_field( "regression_targets", regression_targets_per_image ) # distributed sampled proposals, that were obtained on all feature maps # concatenated via the fg_bg_sampler, into individual feature map levels for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals
This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList])
def get_item_project(self, eitem): """ Get project mapping enrichment field. Twitter mappings is pretty special so it needs a special implementacion. """ project = None eitem_project = {} ds_name = self.get_connector_name() # data source name in projects map if ds_name not in self.prjs_map: return eitem_project for tag in eitem['hashtags_analyzed']: # lcanas: hashtag provided in projects.json file should not be case sensitive T6876 tags2project = CaseInsensitiveDict(self.prjs_map[ds_name]) if tag in tags2project: project = tags2project[tag] break if project is None: project = DEFAULT_PROJECT eitem_project = {"project": project} eitem_project.update(self.add_project_levels(project)) return eitem_project
Get project mapping enrichment field. Twitter mappings is pretty special so it needs a special implementacion.
def ListNames(self): """List the names of all keys and values.""" # TODO: This check is flawed, because the current definition of # "IsDirectory" is the negation of "is a file". One registry path can # actually refer to a key ("directory"), a value of the same name ("file") # and the default value of the key at the same time. if not self.IsDirectory(): return # Handle the special case where no hive is specified and just list the hives if self.hive is None: for name in dir(winreg): if name.startswith("HKEY_"): yield name return try: with OpenKey(self.hive, self.local_path) as key: (self.number_of_keys, self.number_of_values, self.last_modified) = QueryInfoKey(key) found_keys = set() # First keys for i in range(self.number_of_keys): try: key_name = EnumKey(key, i) found_keys.add(key_name) yield key_name except OSError: pass # Now Values for i in range(self.number_of_values): try: name, unused_value, unused_value_type = EnumValue(key, i) # A key might contain a sub-key and value of the same name. Do not # yield the same name twice in this case. With only the name, # the caller cannot differentiate between a key and a value anyway. if name not in found_keys: yield name except OSError: pass except OSError as e: raise IOError("Unable to list key %s: %s" % (self.key_name, e))
List the names of all keys and values.
def print_help(self): """ Print the help menu. """ print('\n %s %s' % (self._title or self._name, self._version or '')) if self._usage: print('\n %s' % self._usage) else: cmd = self._name if hasattr(self, '_parent') and isinstance(self._parent, Command): cmd = '%s %s' % (self._parent._name, cmd) if self._command_list: usage = 'Usage: %s <command> [option]' % cmd else: usage = 'Usage: %s [option]' % cmd pos = ' '.join(['<%s>' % name for name in self._positional_list]) print('\n %s %s' % (usage, pos)) arglen = max(len(o.name) for o in self._option_list) arglen += 2 self.print_title('\n Options:\n') for o in self._option_list: print(' %s %s' % (_pad(o.name, arglen), o.description or '')) print('') if self._command_list: self.print_title(' Commands:\n') for cmd in self._command_list: if isinstance(cmd, Command): name = _pad(cmd._name, arglen) desc = cmd._description or '' print(' %s %s' % (_pad(name, arglen), desc)) print('') if self._help_footer: print(self._help_footer) print('') return self
Print the help menu.
def _complete_type_chain(self, symbol, fullsymbol): """Suggests completion for the end of a type chain.""" target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol) if target is None: return {} result = {} #We might know what kind of symbol to limit the completion by depending on whether #it was preceded by a "call " for example. Check the context's el_call if symbol != "": if self.context.el_call != "sub": for mkey in target.members: if self._symbol_in(symbol, mkey): result[mkey] = target.members[mkey] for ekey in target.executables: if (self._symbol_in(symbol, ekey)): if self.context.el_call == "sub": if (isinstance(target.executables[ekey], Subroutine)): result[ekey] = target.executables[ekey] else: if (isinstance(target.executables[ekey], Function)): result[ekey] = target.executables[ekey] else: if self.context.el_call != "sub": result.update(target.members) subdict = {k: target.executables[k] for k in target.executables if isinstance(target.executables[k].target, Function)} result.update(subdict) else: subdict = {k: target.executables[k] for k in target.executables if isinstance(target.executables[k].target, Subroutine)} result.update(subdict) return result
Suggests completion for the end of a type chain.
def device_query_update(self, query_id, body, **kwargs): # noqa: E501 """Update a device query # noqa: E501 Update a specifc device query. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_update(query_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str query_id: (required) :param DeviceQueryPostPutRequest body: Device query update object. (required) :return: DeviceQuery If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_query_update_with_http_info(query_id, body, **kwargs) # noqa: E501 else: (data) = self.device_query_update_with_http_info(query_id, body, **kwargs) # noqa: E501 return data
Update a device query # noqa: E501 Update a specifc device query. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_update(query_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str query_id: (required) :param DeviceQueryPostPutRequest body: Device query update object. (required) :return: DeviceQuery If the method is called asynchronously, returns the request thread.
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask): """Compute Pi_V conditioned on J. This function returns the Pi array from the model factors of the V genomic contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). V_usage_mask : list Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list Indices of the J alleles to be considered in the Pgen computation self.cutV_genomic_CDR3_segs : list of strings List of all the V genomic nucleotide sequences trimmed to begin at the conserved C residue and with the maximum number of palindromic insertions appended. self.PVdelV_nt_pos_vec : list of ndarrays For each V allele, format P(delV|V) into the correct form for a Pi array or V(J)_{x_1}. This is only done for the first and last position in each codon. self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts For each V allele, and each 'amino acid', format P(V)*P(delV|V) for positions in the middle of a codon into the correct form for a Pi array or V(J)_{x_1} given the 'amino acid'. self.PVJ : ndarray Joint probability distribution of V and J, P(V, J). Returns ------- Pi_V_given_J : list List of (4, 3L) ndarrays corresponding to V(J)_{x_1}. max_V_align: int Maximum alignment of the CDR3_seq to any genomic V allele allowed by V_usage_mask. """ #Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template. #furthermore, the genomic sequence should be pruned to start at the conserved C Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position alignment_lengths = [] for V_in in V_usage_mask: try: cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in] except IndexError: print 'Check provided V usage mask. Contains indicies out of allowed range.' continue current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg) alignment_lengths += [current_alignment_length] current_Pi_V = np.zeros((4, len(CDR3_seq)*3)) if current_alignment_length > 0: #For first and last nt in a codon use PVdelV_nt_pos_vec current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length] for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos] for j, J_in in enumerate(J_usage_mask): Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length] return Pi_V_given_J, max(alignment_lengths)
Compute Pi_V conditioned on J. This function returns the Pi array from the model factors of the V genomic contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). V_usage_mask : list Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list Indices of the J alleles to be considered in the Pgen computation self.cutV_genomic_CDR3_segs : list of strings List of all the V genomic nucleotide sequences trimmed to begin at the conserved C residue and with the maximum number of palindromic insertions appended. self.PVdelV_nt_pos_vec : list of ndarrays For each V allele, format P(delV|V) into the correct form for a Pi array or V(J)_{x_1}. This is only done for the first and last position in each codon. self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts For each V allele, and each 'amino acid', format P(V)*P(delV|V) for positions in the middle of a codon into the correct form for a Pi array or V(J)_{x_1} given the 'amino acid'. self.PVJ : ndarray Joint probability distribution of V and J, P(V, J). Returns ------- Pi_V_given_J : list List of (4, 3L) ndarrays corresponding to V(J)_{x_1}. max_V_align: int Maximum alignment of the CDR3_seq to any genomic V allele allowed by V_usage_mask.
def validate_url(self, url): """Validate the :class:`~urllib.parse.ParseResult` object. This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url` could work as expected even meet a unexpected URL string. :param url: the parsed url. :type url: :class:`~urllib.parse.ParseResult` """ # fix up the non-ascii path url_path = to_bytes_safe(url.path) url_path = urllib.parse.quote(url_path, safe=b"/%") # fix up the non-ascii query url_query = to_bytes_safe(url.query) url_query = urllib.parse.quote(url_query, safe=b"?=&") url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path, url.params, url_query, url.fragment) # validate the components of URL has_hostname = url.hostname is not None and len(url.hostname) > 0 has_http_scheme = url.scheme in ("http", "https") has_path = not len(url.path) or url.path.startswith("/") if not (has_hostname and has_http_scheme and has_path): raise NotSupported("invalid url: %s" % repr(url)) return url
Validate the :class:`~urllib.parse.ParseResult` object. This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url` could work as expected even meet a unexpected URL string. :param url: the parsed url. :type url: :class:`~urllib.parse.ParseResult`
def _persisted_last_epoch(self) -> int: """ Return number of last epoch already calculated """ epoch_number = 0 self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match('checkpoint_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > epoch_number: epoch_number = idx return epoch_number
Return number of last epoch already calculated
def init(FILE): """ Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str """ try: cfg.read(FILE) global _loaded _loaded = True except: file_not_found_message(FILE)
Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str
def list_themes(directory=None): """Gets a list of the installed themes.""" repo = require_repo(directory) path = os.path.join(repo, themes_dir) return os.listdir(path) if os.path.isdir(path) else None
Gets a list of the installed themes.
def fire_lasers( self, modules: Optional[List[str]] = None, verbose_report: bool = False, transaction_count: Optional[int] = None, ) -> Report: """ :param modules: The analysis modules which should be executed :param verbose_report: Gives out the transaction sequence of the vulnerability :param transaction_count: The amount of transactions to be executed :return: The Report class which contains the all the issues/vulnerabilities """ all_issues = [] # type: List[Issue] SolverStatistics().enabled = True exceptions = [] for contract in self.contracts: StartTime() # Reinitialize start time for new contracts try: sym = SymExecWrapper( contract, self.address, self.strategy, dynloader=DynLoader( self.eth, storage_loading=self.onchain_storage_access, contract_loading=self.dynld, ), max_depth=self.max_depth, execution_timeout=self.execution_timeout, create_timeout=self.create_timeout, transaction_count=transaction_count, modules=modules, compulsory_statespace=False, enable_iprof=self.enable_iprof, ) issues = fire_lasers(sym, modules) except KeyboardInterrupt: log.critical("Keyboard Interrupt") issues = retrieve_callback_issues(modules) except Exception: log.critical( "Exception occurred, aborting analysis. Please report this issue to the Mythril GitHub page.\n" + traceback.format_exc() ) issues = retrieve_callback_issues(modules) exceptions.append(traceback.format_exc()) for issue in issues: issue.add_code_info(contract) all_issues += issues log.info("Solver statistics: \n{}".format(str(SolverStatistics()))) source_data = Source() source_data.get_source_from_contracts_list(self.contracts) # Finally, output the results report = Report(verbose_report, contracts=self.contracts, exceptions=exceptions) for issue in all_issues: report.append_issue(issue) return report
:param modules: The analysis modules which should be executed :param verbose_report: Gives out the transaction sequence of the vulnerability :param transaction_count: The amount of transactions to be executed :return: The Report class which contains the all the issues/vulnerabilities
def finalize(self): """finalize for PathConsumer""" super(TransposedConsumer, self).finalize() self.result = map(list, zip(*self.result))
finalize for PathConsumer
def handler(event, context): """ Historical {{cookiecutter.technology_name}} event collector. This collector is responsible for processing Cloudwatch events and polling events. """ records = deserialize_records(event['Records']) # Split records into two groups, update and delete. # We don't want to query for deleted records. update_records, delete_records = group_records_by_type(records) capture_delete_records(delete_records) # filter out error events update_records = [e for e in update_records if not e['detail'].get('errorCode')] # group records by account for more efficient processing log.debug('Update Records: {records}'.format(records=records)) capture_update_records(update_records)
Historical {{cookiecutter.technology_name}} event collector. This collector is responsible for processing Cloudwatch events and polling events.
def get_mac(self): ''' Obtain the device's mac address. ''' ifreq = struct.pack('16sH14s', self.name, AF_UNIX, b'\x00'*14) res = fcntl.ioctl(sockfd, SIOCGIFHWADDR, ifreq) address = struct.unpack('16sH14s', res)[2] mac = struct.unpack('6B8x', address) return ":".join(['%02X' % i for i in mac])
Obtain the device's mac address.
def setPermanences(self, segments, presynapticCellsBySource, permanence): """ Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse """ permanences = np.repeat(np.float32(permanence), len(segments)) for source, connections in self.connectionsBySource.iteritems(): if source in presynapticCellsBySource: connections.matrix.setElements(segments, presynapticCellsBySource[source], permanences)
Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse
def load_indexed_audio(self, indexed_audio_file_abs_path): """ Parameters ---------- indexed_audio_file_abs_path : str """ with open(indexed_audio_file_abs_path, "rb") as f: self.__timestamps = pickle.load(f)
Parameters ---------- indexed_audio_file_abs_path : str
def forward(self, inputs, context, inference=False): """ Execute the decoder. :param inputs: tensor with inputs to the decoder :param context: state of encoder, encoder sequence lengths and hidden state of decoder's LSTM layers :param inference: if True stores and repackages hidden state """ self.inference = inference enc_context, enc_len, hidden = context hidden = self.init_hidden(hidden) x = self.embedder(inputs) x, h, attn, scores = self.att_rnn(x, hidden[0], enc_context, enc_len) self.append_hidden(h) x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[0](x, hidden[1]) self.append_hidden(h) for i in range(1, len(self.rnn_layers)): residual = x x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[i](x, hidden[i + 1]) self.append_hidden(h) x = x + residual x = self.classifier(x) hidden = self.package_hidden() return x, scores, [enc_context, enc_len, hidden]
Execute the decoder. :param inputs: tensor with inputs to the decoder :param context: state of encoder, encoder sequence lengths and hidden state of decoder's LSTM layers :param inference: if True stores and repackages hidden state
def search_kv_store(self, key): """Search for a key in the key-value store. :param key: string :rtype: string """ data = { 'operation': 'RETRIEVE', 'key': key } return self.post_json(self.make_url("/useragent-kv"), data)['value']
Search for a key in the key-value store. :param key: string :rtype: string