code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def create_logstore(self, project_name, logstore_name, ttl=30, shard_count=2, enable_tracking=False, append_meta=False, auto_split=True, max_split_shard=64, preserve_storage=False ): """ create log store Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type ttl: int :param ttl: the life cycle of log in the logstore in days, default 30, up to 3650 :type shard_count: int :param shard_count: the shard count of the logstore to create, default 2 :type enable_tracking: bool :param enable_tracking: enable web tracking, default is False :type append_meta: bool :param append_meta: allow to append meta info (server received time and IP for external IP to each received log) :type auto_split: bool :param auto_split: auto split shard, max_split_shard will be 64 by default is True :type max_split_shard: int :param max_split_shard: max shard to split, up to 64 :type preserve_storage: bool :param preserve_storage: if always persist data, TTL will be ignored. :return: CreateLogStoreResponse :raise: LogException """ if auto_split and (max_split_shard <= 0 or max_split_shard >= 64): max_split_shard = 64 if preserve_storage: ttl = 3650 params = {} resource = "/logstores" headers = {"x-log-bodyrawsize": '0', "Content-Type": "application/json"} body = {"logstoreName": logstore_name, "ttl": int(ttl), "shardCount": int(shard_count), "enable_tracking": enable_tracking, "autoSplit": auto_split, "maxSplitShard": max_split_shard, "appendMeta": append_meta } body_str = six.b(json.dumps(body)) try: (resp, header) = self._send("POST", project_name, body_str, resource, params, headers) except LogException as ex: if ex.get_error_code() == "LogStoreInfoInvalid" and ex.get_error_message() == "redundant key exist in json": logger.warning("LogStoreInfoInvalid, will retry with basic parameters. detail: {0}".format(ex)) body = {"logstoreName": logstore_name, "ttl": int(ttl), "shardCount": int(shard_count), "enable_tracking": enable_tracking } body_str = six.b(json.dumps(body)) (resp, header) = self._send("POST", project_name, body_str, resource, params, headers) else: raise return CreateLogStoreResponse(header, resp)
create log store Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type ttl: int :param ttl: the life cycle of log in the logstore in days, default 30, up to 3650 :type shard_count: int :param shard_count: the shard count of the logstore to create, default 2 :type enable_tracking: bool :param enable_tracking: enable web tracking, default is False :type append_meta: bool :param append_meta: allow to append meta info (server received time and IP for external IP to each received log) :type auto_split: bool :param auto_split: auto split shard, max_split_shard will be 64 by default is True :type max_split_shard: int :param max_split_shard: max shard to split, up to 64 :type preserve_storage: bool :param preserve_storage: if always persist data, TTL will be ignored. :return: CreateLogStoreResponse :raise: LogException
def normalize_underscore_case(name): """Normalize an underscore-separated descriptor to something more readable. i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes 'Host Components' """ normalized = name.lower() normalized = re.sub(r'_(\w)', lambda match: ' ' + match.group(1).upper(), normalized) return normalized[0].upper() + normalized[1:]
Normalize an underscore-separated descriptor to something more readable. i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes 'Host Components'
def socks_username(self, value): """ Sets socks proxy username setting. :Args: - value: The socks proxy username value. """ self._verify_proxy_type_compatibility(ProxyType.MANUAL) self.proxyType = ProxyType.MANUAL self.socksUsername = value
Sets socks proxy username setting. :Args: - value: The socks proxy username value.
def _add_thousand_g(self, variant_obj, info_dict): """Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary """ thousand_g = info_dict.get('1000GAF') if thousand_g: logger.debug("Updating thousand_g to: {0}".format( thousand_g)) variant_obj.thousand_g = float(thousand_g) variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))
Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def to_snake(camel): """TimeSkill -> time_skill""" if not camel: return camel return ''.join('_' + x if 'A' <= x <= 'Z' else x for x in camel).lower()[camel[0].isupper():]
TimeSkill -> time_skill
def create_type_variant(cls, type_name, type_converter): """ Create type variants for types with a cardinality field. The new type converters are based on the type converter with cardinality=1. .. code-block:: python import parse @parse.with_pattern(r'\d+') def parse_number(text): return int(text) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", parse_number) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", dict(Number=parse_number)) :param type_name: Type name with cardinality field suffix. :param type_converter: Type converter or type dictionary. :return: Type converter variant (function). :raises: ValueError, if type_name does not end with CardinalityField :raises: MissingTypeError, if type_converter is missing in type_dict """ assert isinstance(type_name, six.string_types) if not CardinalityField.matches_type(type_name): message = "type_name='%s' has no CardinalityField" % type_name raise ValueError(message) primary_name, cardinality = CardinalityField.split_type(type_name) if isinstance(type_converter, dict): type_dict = type_converter type_converter = type_dict.get(primary_name, None) if not type_converter: raise MissingTypeError(primary_name) assert callable(type_converter) type_variant = TypeBuilder.with_cardinality(cardinality, type_converter, listsep=cls.listsep) type_variant.name = type_name return type_variant
Create type variants for types with a cardinality field. The new type converters are based on the type converter with cardinality=1. .. code-block:: python import parse @parse.with_pattern(r'\d+') def parse_number(text): return int(text) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", parse_number) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", dict(Number=parse_number)) :param type_name: Type name with cardinality field suffix. :param type_converter: Type converter or type dictionary. :return: Type converter variant (function). :raises: ValueError, if type_name does not end with CardinalityField :raises: MissingTypeError, if type_converter is missing in type_dict
def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str """Convert an array like object to CSV. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to CSV. Returns: (str): object serialized to CSV """ stream = StringIO() np.savetxt(stream, array_like, delimiter=',', fmt='%s') return stream.getvalue()
Convert an array like object to CSV. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to CSV. Returns: (str): object serialized to CSV
def get_view(self, columns: Sequence[str], query: str=None) -> PopulationView: """Return a configured PopulationView Notes ----- Client code should only need this (and only through the version exposed as ``population_view`` on the builder during setup) if it uses dynamically generated column names that aren't known at definition time. Otherwise components should use ``uses_columns``. """ if 'tracked' not in columns: query_with_track = query + 'and tracked == True' if query else 'tracked == True' return PopulationView(self, columns, query_with_track) return PopulationView(self, columns, query)
Return a configured PopulationView Notes ----- Client code should only need this (and only through the version exposed as ``population_view`` on the builder during setup) if it uses dynamically generated column names that aren't known at definition time. Otherwise components should use ``uses_columns``.
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name """Express a Y.Z.Y single qubit gate as a Z.Y.Z gate. Solve the equation .. math:: Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda) for theta, phi, and lambda. Return a solution theta, phi, and lambda. """ quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], 'yzy') euler = quaternion_yzy.to_zyz() quaternion_zyz = quaternion_from_euler(euler, 'zyz') # output order different than rotation order out_angles = (euler[1], euler[0], euler[2]) abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data)) if not np.allclose(abs_inner, 1, eps): raise TranspilerError('YZY and ZYZ angles do not give same rotation matrix.') out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle for angle in out_angles) return out_angles
Express a Y.Z.Y single qubit gate as a Z.Y.Z gate. Solve the equation .. math:: Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda) for theta, phi, and lambda. Return a solution theta, phi, and lambda.
def host(environ): # pragma: no cover """ Reconstruct host from environment. A modified version of http://www.python.org/dev/peps/pep-0333/#url-reconstruction """ url = environ['wsgi.url_scheme'] + '://' if environ.get('HTTP_HOST'): url += environ['HTTP_HOST'] else: url += environ['SERVER_NAME'] if environ['wsgi.url_scheme'] == 'https': if environ['SERVER_PORT'] != '443': url += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': url += ':' + environ['SERVER_PORT'] return url + quote(environ.get('SCRIPT_NAME', ''))
Reconstruct host from environment. A modified version of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
def zip_built(outdir): """Packages the build folder into a zip""" print("Zipping the built files!") config_file_dir = os.path.join(cwd, "config.py") if not os.path.exists(config_file_dir): sys.exit( "There dosen't seem to be a configuration file. Have you run the init command?") else: sys.path.insert(0, cwd) try: from config import website_name except: sys.exit( "Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.") # Remove the build folder build_dir = os.path.join(cwd, outdir) zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" + str(datetime.now().date())) if os.path.exists(build_dir): shutil.make_archive(zip_dir, 'zip', build_dir) else: print("The " + outdir + "/ folder could not be found! Have you run 'blended build' yet?")
Packages the build folder into a zip
def count_empty(self, field): """ List of empty row indices """ try: df2 = self.df[[field]] vals = where(df2.applymap(lambda x: x == '')) num = len(vals[0]) except Exception as e: self.err(e, "Can not count empty values") return self.ok("Found", num, "empty rows in column " + field)
List of empty row indices
def tryMatchedAnchor(self, block, autoIndent): """ find out whether we pressed return in something like {} or () or [] and indent properly: {} becomes: { | } """ oposite = { ')': '(', '}': '{', ']': '['} char = self._firstNonSpaceChar(block) if not char in oposite.keys(): return None # we pressed enter in e.g. () try: foundBlock, foundColumn = self.findBracketBackward(block, 0, oposite[char]) except ValueError: return None if autoIndent: # when aligning only, don't be too smart and just take the indent level of the open anchor return self._blockIndent(foundBlock) lastChar = self._lastNonSpaceChar(block.previous()) charsMatch = ( lastChar == '(' and char == ')' ) or \ ( lastChar == '{' and char == '}' ) or \ ( lastChar == '[' and char == ']' ) indentation = None if (not charsMatch) and char != '}': # otherwise check whether the last line has the expected # indentation, if not use it instead and place the closing # anchor on the level of the opening anchor expectedIndentation = self._increaseIndent(self._blockIndent(foundBlock)) actualIndentation = self._increaseIndent(self._blockIndent(block.previous())) indentation = None if len(expectedIndentation) <= len(actualIndentation): if lastChar == ',': # use indentation of last line instead and place closing anchor # in same column of the opening anchor self._qpart.insertText((block.blockNumber(), self._firstNonSpaceColumn(block.text())), '\n') self._qpart.cursorPosition = (block.blockNumber(), len(actualIndentation)) # indent closing anchor self._setBlockIndent(block.next(), self._makeIndentAsColumn(foundBlock, foundColumn)) indentation = actualIndentation elif expectedIndentation == self._blockIndent(block.previous()): # otherwise don't add a new line, just use indentation of closing anchor line indentation = self._blockIndent(foundBlock) else: # otherwise don't add a new line, just align on closing anchor indentation = self._makeIndentAsColumn(foundBlock, foundColumn) dbg("tryMatchedAnchor: success in line %d" % foundBlock.blockNumber()) return indentation # otherwise we i.e. pressed enter between (), [] or when we enter before curly brace # increase indentation and place closing anchor on the next line indentation = self._blockIndent(foundBlock) self._qpart.replaceText((block.blockNumber(), 0), len(self._blockIndent(block)), "\n") self._qpart.cursorPosition = (block.blockNumber(), len(indentation)) # indent closing brace self._setBlockIndent(block.next(), indentation) dbg("tryMatchedAnchor: success in line %d" % foundBlock.blockNumber()) return self._increaseIndent(indentation)
find out whether we pressed return in something like {} or () or [] and indent properly: {} becomes: { | }
def _transform_variable_to_expression(expression, node, context): """Transform a Variable compiler expression into its SQLAlchemy expression representation. Args: expression: expression, Variable compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression. """ variable_name = expression.variable_name if not variable_name.startswith(u'$'): raise AssertionError(u'Unexpectedly received variable name {} that is not ' u'prefixed with "$"'.format(variable_name)) return bindparam(variable_name[1:])
Transform a Variable compiler expression into its SQLAlchemy expression representation. Args: expression: expression, Variable compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
def _get_template_texts(source_list=None, template='jinja', defaults=None, context=None, **kwargs): ''' Iterate a list of sources and process them as templates. Returns a list of 'chunks' containing the rendered templates. ''' ret = {'name': '_get_template_texts', 'changes': {}, 'result': True, 'comment': '', 'data': []} if source_list is None: return _error(ret, '_get_template_texts called with empty source_list') txtl = [] for (source, source_hash) in source_list: context_dict = defaults if defaults else {} if context: context_dict = salt.utils.dictupdate.merge(context_dict, context) rndrd_templ_fn = __salt__['cp.get_template']( source, '', template=template, saltenv=__env__, context=context_dict, **kwargs ) log.debug('cp.get_template returned %s (Called with: %s)', rndrd_templ_fn, source) if rndrd_templ_fn: tmplines = None with salt.utils.files.fopen(rndrd_templ_fn, 'rb') as fp_: tmplines = fp_.read() tmplines = salt.utils.stringutils.to_unicode(tmplines) tmplines = tmplines.splitlines(True) if not tmplines: msg = 'Failed to read rendered template file {0} ({1})'.format( rndrd_templ_fn, source ) log.debug(msg) ret['name'] = source return _error(ret, msg) txtl.append(''.join(tmplines)) else: msg = 'Failed to load template file {0}'.format(source) log.debug(msg) ret['name'] = source return _error(ret, msg) ret['data'] = txtl return ret
Iterate a list of sources and process them as templates. Returns a list of 'chunks' containing the rendered templates.
def bottom(self): """ The row index that marks the bottom extent of the vertical span of this cell. This is one greater than the index of the bottom-most row of the span, similar to how a slice of the cell's rows would be specified. """ if self.vMerge is not None: tc_below = self._tc_below if tc_below is not None and tc_below.vMerge == ST_Merge.CONTINUE: return tc_below.bottom return self._tr_idx + 1
The row index that marks the bottom extent of the vertical span of this cell. This is one greater than the index of the bottom-most row of the span, similar to how a slice of the cell's rows would be specified.
def frequency(data, output='spectraldensity', scaling='power', sides='one', taper=None, halfbandwidth=3, NW=None, duration=None, overlap=0.5, step=None, detrend='linear', n_fft=None, log_trans=False, centend='mean'): """Compute the power spectral density (PSD, output='spectraldensity', scaling='power'), or energy spectral density (ESD, output='spectraldensity', scaling='energy') or the complex fourier transform (output='complex', sides='two') Parameters ---------- data : instance of ChanTime one of the datatypes detrend : str None (no detrending), 'constant' (remove mean), 'linear' (remove linear trend) output : str 'spectraldensity' or 'csd' or 'complex' 'spectraldensity' meaning the autospectrum or auto-spectral density, a special case of 'csd' (cross-spectral density), where the signal is cross-correlated with itself if 'csd', both channels in data are used as input sides : str 'one' or 'two', where 'two' implies negative frequencies scaling : str 'power' (units: V ** 2 / Hz), 'energy' (units: V ** 2), 'fieldtrip', 'chronux' taper : str Taper to use, commonly used tapers are 'boxcar', 'hann', 'dpss' halfbandwidth : int (only if taper='dpss') Half bandwidth (in Hz), frequency smoothing will be from +halfbandwidth to -halfbandwidth NW : int (only if taper='dpss') Normalized half bandwidth (NW = halfbandwidth * dur). Number of DPSS tapers is 2 * NW - 1. If specified, NW takes precedence over halfbandwidth duration : float, in s If not None, it divides the signal in epochs of this length (in seconds) and then average over the PSD / ESD (not the complex result) overlap : float, between 0 and 1 The amount of overlap between epochs (0.5 = 50%, 0.95 = almost complete overlap). step : float, in s step in seconds between epochs (alternative to overlap) n_fft: int Length of FFT, in samples. If less than input axis, input is cropped. If longer than input axis, input is padded with zeros. If None, FFT length set to axis length. log_trans : bool If True, spectral values will be natural log-transformed. The transformation is applied before averaging (or taking the median). centend : str (only if duration is not None). Central tendency measure to use, either mean (arithmetic) or median. Returns ------- instance of ChanFreq If output='complex', there is an additional dimension ('taper') which is useful for 'dpss' but it's also present for all the other tapers. Raises ------ TypeError If the data does not have a 'time' axis. It might work in the future on other axes, but I cannot imagine how. ValueError If you use duration (to create multiple epochs) and output='complex', because it does not average the complex output of multiple epochs. Notes ----- See extensive notes at wonambi.trans.frequency._fft It uses sampling frequency as specified in s_freq, it does not recompute the sampling frequency based on the time axis. Use of log or median for Welch's method is included based on recommendations from Izhikevich et al., bioRxiv, 2018. """ if output not in ('spectraldensity', 'complex', 'csd'): raise TypeError(f'output can be "spectraldensity", "complex" or "csd",' ' not "{output}"') if 'time' not in data.list_of_axes: raise TypeError('\'time\' is not in the axis ' + str(data.list_of_axes)) if len(data.list_of_axes) != data.index_of('time') + 1: raise TypeError('\'time\' should be the last axis') # this might be improved if duration is not None and output == 'complex': raise ValueError('cannot average the complex spectrum over multiple epochs') if output == 'csd' and data.number_of('chan') != 2: raise ValueError('CSD can only be computed between two channels') if duration is not None: nperseg = int(duration * data.s_freq) if step is not None: nstep = int(step * data.s_freq) else: nstep = nperseg - int(overlap * nperseg) freq = ChanFreq() freq.attr = deepcopy(data.attr) freq.s_freq = data.s_freq freq.start_time = data.start_time freq.axis['chan'] = copy(data.axis['chan']) freq.axis['freq'] = empty(data.number_of('trial'), dtype='O') if output == 'complex': freq.axis['taper'] = empty(data.number_of('trial'), dtype='O') freq.data = empty(data.number_of('trial'), dtype='O') for i in range(data.number_of('trial')): x = data(trial=i) if duration is not None: x = _create_subepochs(x, nperseg, nstep) f, Sxx = _fft(x, s_freq=data.s_freq, detrend=detrend, taper=taper, output=output, sides=sides, scaling=scaling, halfbandwidth=halfbandwidth, NW=NW, n_fft=n_fft) if log_trans: Sxx = log(Sxx) if duration is not None: if centend == 'mean': Sxx = Sxx.mean(axis=-2) elif centend == 'median': Sxx = median(Sxx, axis=-2) else: raise ValueError('Invalid central tendency measure. ' 'Use mean or median.') freq.axis['freq'][i] = f if output == 'complex': freq.axis['taper'][i] = arange(Sxx.shape[-1]) if output == 'csd': newchan = ' * '.join(freq.axis['chan'][i]) freq.axis['chan'][i] = asarray([newchan], dtype='U') freq.data[i] = Sxx return freq
Compute the power spectral density (PSD, output='spectraldensity', scaling='power'), or energy spectral density (ESD, output='spectraldensity', scaling='energy') or the complex fourier transform (output='complex', sides='two') Parameters ---------- data : instance of ChanTime one of the datatypes detrend : str None (no detrending), 'constant' (remove mean), 'linear' (remove linear trend) output : str 'spectraldensity' or 'csd' or 'complex' 'spectraldensity' meaning the autospectrum or auto-spectral density, a special case of 'csd' (cross-spectral density), where the signal is cross-correlated with itself if 'csd', both channels in data are used as input sides : str 'one' or 'two', where 'two' implies negative frequencies scaling : str 'power' (units: V ** 2 / Hz), 'energy' (units: V ** 2), 'fieldtrip', 'chronux' taper : str Taper to use, commonly used tapers are 'boxcar', 'hann', 'dpss' halfbandwidth : int (only if taper='dpss') Half bandwidth (in Hz), frequency smoothing will be from +halfbandwidth to -halfbandwidth NW : int (only if taper='dpss') Normalized half bandwidth (NW = halfbandwidth * dur). Number of DPSS tapers is 2 * NW - 1. If specified, NW takes precedence over halfbandwidth duration : float, in s If not None, it divides the signal in epochs of this length (in seconds) and then average over the PSD / ESD (not the complex result) overlap : float, between 0 and 1 The amount of overlap between epochs (0.5 = 50%, 0.95 = almost complete overlap). step : float, in s step in seconds between epochs (alternative to overlap) n_fft: int Length of FFT, in samples. If less than input axis, input is cropped. If longer than input axis, input is padded with zeros. If None, FFT length set to axis length. log_trans : bool If True, spectral values will be natural log-transformed. The transformation is applied before averaging (or taking the median). centend : str (only if duration is not None). Central tendency measure to use, either mean (arithmetic) or median. Returns ------- instance of ChanFreq If output='complex', there is an additional dimension ('taper') which is useful for 'dpss' but it's also present for all the other tapers. Raises ------ TypeError If the data does not have a 'time' axis. It might work in the future on other axes, but I cannot imagine how. ValueError If you use duration (to create multiple epochs) and output='complex', because it does not average the complex output of multiple epochs. Notes ----- See extensive notes at wonambi.trans.frequency._fft It uses sampling frequency as specified in s_freq, it does not recompute the sampling frequency based on the time axis. Use of log or median for Welch's method is included based on recommendations from Izhikevich et al., bioRxiv, 2018.
def choose(self): """Marks the item as the one the user is in.""" if not self.choosed: self.choosed = True self.pos = self.pos + Sep(5, 0)
Marks the item as the one the user is in.
def pkcs7_unpad(buf): # type: (bytes) -> bytes """Removes PKCS7 padding a decrypted object :param bytes buf: buffer to remove padding :rtype: bytes :return: buffer without PKCS7_PADDING """ unpadder = cryptography.hazmat.primitives.padding.PKCS7( cryptography.hazmat.primitives.ciphers. algorithms.AES.block_size).unpadder() return unpadder.update(buf) + unpadder.finalize()
Removes PKCS7 padding a decrypted object :param bytes buf: buffer to remove padding :rtype: bytes :return: buffer without PKCS7_PADDING
def _validate_gain_A_value(self, gain_A): """ validate a given value for gain_A :type gain_A: int :raises: ValueError """ if gain_A not in self._valid_gains_for_channel_A: raise ParameterValidationError("{gain_A} is not a valid gain".format(gain_A=gain_A))
validate a given value for gain_A :type gain_A: int :raises: ValueError
def server_prepare_root_bin_dir(): '''Install custom commands for user root at '/root/bin/'.''' commands = ['run_backup'] for command in commands: install_file_legacy(flo('/root/bin/{command}'), sudo=True) sudo(flo('chmod 755 /root/bin/{command}')) if command == 'run_backup': sudo('ln -snf /root/bin/run_backup /etc/cron.daily/run_backup')
Install custom commands for user root at '/root/bin/'.
def opt(self, x_init, f_fp=None, f=None, fp=None): """ Run the TNC optimizer """ tnc_rcstrings = ['Local minimum', 'Converged', 'XConverged', 'Maximum number of f evaluations reached', 'Line search failed', 'Function is constant'] assert f_fp != None, "TNC requires f_fp" opt_dict = {} if self.xtol is not None: opt_dict['xtol'] = self.xtol if self.ftol is not None: opt_dict['ftol'] = self.ftol if self.gtol is not None: opt_dict['pgtol'] = self.gtol opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages, maxfun=self.max_f_eval, **opt_dict) self.x_opt = opt_result[0] self.f_opt = f_fp(self.x_opt)[0] self.funct_eval = opt_result[1] self.status = tnc_rcstrings[opt_result[2]]
Run the TNC optimizer
def indent(text, num=4): """Indet the given string""" str_indent = ' ' * num return str_indent + ('\n' + str_indent).join(text.splitlines())
Indet the given string
def bezier_point(p, t): """Evaluates the Bezier curve given by it's control points, p, at t. Note: Uses Horner's rule for cubic and lower order Bezier curves. Warning: Be concerned about numerical stability when using this function with high order curves.""" # begin arc support block ######################## try: p.large_arc return p.point(t) except: pass # end arc support block ########################## deg = len(p) - 1 if deg == 3: return p[0] + t*( 3*(p[1] - p[0]) + t*( 3*(p[0] + p[2]) - 6*p[1] + t*( -p[0] + 3*(p[1] - p[2]) + p[3]))) elif deg == 2: return p[0] + t*( 2*(p[1] - p[0]) + t*( p[0] - 2*p[1] + p[2])) elif deg == 1: return p[0] + t*(p[1] - p[0]) elif deg == 0: return p[0] else: bern = bernstein(deg, t) return sum(bern[k]*p[k] for k in range(deg+1))
Evaluates the Bezier curve given by it's control points, p, at t. Note: Uses Horner's rule for cubic and lower order Bezier curves. Warning: Be concerned about numerical stability when using this function with high order curves.
def set_socket_address(self): """ Set a random port to be used by zmq """ Global.LOGGER.debug('defining socket addresses for zmq') random.seed() default_port = random.randrange(5001, 5999) internal_0mq_address = "tcp://127.0.0.1" internal_0mq_port_subscriber = str(default_port) internal_0mq_port_publisher = str(default_port) Global.LOGGER.info(str.format( f"zmq subsystem subscriber on {internal_0mq_port_subscriber} port")) Global.LOGGER.info(str.format( f"zmq subsystem publisher on {internal_0mq_port_publisher} port")) self.subscriber_socket_address = f"{internal_0mq_address}:{internal_0mq_port_subscriber}" self.publisher_socket_address = f"{internal_0mq_address}:{internal_0mq_port_publisher}"
Set a random port to be used by zmq
def filter_since_tag(self, all_tags): """ Filter tags according since_tag option. :param list(dict) all_tags: All tags. :rtype: list(dict) :return: Filtered tags. """ tag = self.detect_since_tag() if not tag or tag == REPO_CREATED_TAG_NAME: return copy.deepcopy(all_tags) filtered_tags = [] tag_names = [t["name"] for t in all_tags] try: idx = tag_names.index(tag) except ValueError: self.warn_if_tag_not_found(tag, "since-tag") return copy.deepcopy(all_tags) since_tag = all_tags[idx] since_date = self.get_time_of_tag(since_tag) for t in all_tags: tag_date = self.get_time_of_tag(t) if since_date <= tag_date: filtered_tags.append(t) return filtered_tags
Filter tags according since_tag option. :param list(dict) all_tags: All tags. :rtype: list(dict) :return: Filtered tags.
def parse_function(fn): """Get the source of a function and return its AST.""" try: return parse_string(inspect.getsource(fn)) except (IOError, OSError) as e: raise ValueError( 'Cannot differentiate function: %s. Tangent must be able to access the ' 'source code of the function. Functions defined in a Python ' 'interpreter and functions backed by C extension modules do not ' 'have accessible source code.' % e)
Get the source of a function and return its AST.
def extractall(self, directory, auto_create_dir=False, patool_path=None): ''' :param directory: directory to extract to :param auto_create_dir: auto create directory :param patool_path: the path to the patool backend ''' log.debug('extracting %s into %s (backend=%s)', self.filename, directory, self.backend) is_zipfile = zipfile.is_zipfile(self.filename) directory = _fullpath(directory) if not os.path.exists(self.filename): raise ValueError( 'archive file does not exist:' + str(self.filename)) if not os.path.exists(directory): if auto_create_dir: os.makedirs(directory) else: raise ValueError('directory does not exist:' + str(directory)) if self.backend == 'auto': if is_zipfile: self.extractall_zipfile(directory) else: self.extractall_patool(directory, patool_path) if self.backend == 'zipfile': if not is_zipfile: raise ValueError('file is not zip file:' + str(self.filename)) self.extractall_zipfile(directory) if self.backend == 'patool': self.extractall_patool(directory, patool_path)
:param directory: directory to extract to :param auto_create_dir: auto create directory :param patool_path: the path to the patool backend
def get_object(self, obj_class, data=None, subset=None): """Return a subclassed JSSObject instance by querying for existing objects or posting a new object. Args: obj_class: The JSSObject subclass type to search for or create. data: The data parameter performs different operations depending on the type passed. None: Perform a list operation, or for non-container objects, return all data. int: Retrieve an object with ID of <data>. str: Retrieve an object with name of <str>. For some objects, this may be overridden to include searching by other criteria. See those objects for more info. xml.etree.ElementTree.Element: Create a new object from xml. subset: A list of XML subelement tags to request (e.g. ['general', 'purchasing']), OR an '&' delimited string (e.g. 'general&purchasing'). This is not supported for all JSSObjects. Returns: JSSObjectList: for empty or None arguments to data. JSSObject: Returns an object of type obj_class for searches and new objects. (FUTURE) Will return None if nothing is found that match the search criteria. Raises: TypeError: if subset not formatted properly. JSSMethodNotAllowedError: if you try to perform an operation not supported by that object type. JSSGetError: If object searched for is not found. JSSPostError: If attempted object creation fails. """ if subset: if not isinstance(subset, list): if isinstance(subset, basestring): subset = subset.split("&") else: raise TypeError if data is None: return self.get_list(obj_class, data, subset) elif isinstance(data, (basestring, int)): return self.get_individual_object(obj_class, data, subset) elif isinstance(data, ElementTree.Element): return self.get_new_object(obj_class, data) else: raise ValueError
Return a subclassed JSSObject instance by querying for existing objects or posting a new object. Args: obj_class: The JSSObject subclass type to search for or create. data: The data parameter performs different operations depending on the type passed. None: Perform a list operation, or for non-container objects, return all data. int: Retrieve an object with ID of <data>. str: Retrieve an object with name of <str>. For some objects, this may be overridden to include searching by other criteria. See those objects for more info. xml.etree.ElementTree.Element: Create a new object from xml. subset: A list of XML subelement tags to request (e.g. ['general', 'purchasing']), OR an '&' delimited string (e.g. 'general&purchasing'). This is not supported for all JSSObjects. Returns: JSSObjectList: for empty or None arguments to data. JSSObject: Returns an object of type obj_class for searches and new objects. (FUTURE) Will return None if nothing is found that match the search criteria. Raises: TypeError: if subset not formatted properly. JSSMethodNotAllowedError: if you try to perform an operation not supported by that object type. JSSGetError: If object searched for is not found. JSSPostError: If attempted object creation fails.
def getHeight(self): ''' Gets the height. ''' if self.useUiAutomator: return self.map['bounds'][1][1] - self.map['bounds'][0][1] else: try: return int(self.map[self.heightProperty]) except: return 0
Gets the height.
def _head(self, client_kwargs): """ Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header. """ return _handle_http_errors( self.client.request( 'HEAD', timeout=self._TIMEOUT, **client_kwargs)).headers
Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def reduced_chi2(self, model, error_map=0): """ returns reduced chi2 :param model: :param error_map: :return: """ chi2 = self.reduced_residuals(model, error_map) return np.sum(chi2**2) / self.num_data_evaluate()
returns reduced chi2 :param model: :param error_map: :return:
def set_value(self, index, value): """Set value""" self._data[ self.keys[index.row()] ] = value self.showndata[ self.keys[index.row()] ] = value self.sizes[index.row()] = get_size(value) self.types[index.row()] = get_human_readable_type(value) self.sig_setting_data.emit()
Set value
def destroy_dns(app='', env='dev', **_): """Destroy DNS records. Args: app (str): Spinnaker Application name. env (str): Deployment environment. regions (str): AWS region. Returns: bool: True upon successful completion. """ client = boto3.Session(profile_name=env).client('route53') generated = get_details(app=app, env=env) record = generated.dns_elb() zone_ids = get_dns_zone_ids(env=env, facing='external') for zone_id in zone_ids: record_sets = client.list_resource_record_sets( HostedZoneId=zone_id, StartRecordName=record, StartRecordType='CNAME', MaxItems='1') for found_record in record_sets['ResourceRecordSets']: assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id) return True
Destroy DNS records. Args: app (str): Spinnaker Application name. env (str): Deployment environment. regions (str): AWS region. Returns: bool: True upon successful completion.
def enable(self, ospf_profile=None, router_id=None): """ Enable OSPF on this engine. For master engines, enable OSPF on the virtual firewall. Once enabled on the engine, add an OSPF area to an interface:: engine.dynamic_routing.ospf.enable() interface = engine.routing.get(0) interface.add_ospf_area(OSPFArea('myarea')) :param str,OSPFProfile ospf_profile: OSPFProfile element or str href; if None, use default profile :param str router_id: single IP address router ID :raises ElementNotFound: OSPF profile not found :return: None """ ospf_profile = element_resolver(ospf_profile) if ospf_profile \ else OSPFProfile('Default OSPFv2 Profile').href self.data.update( enabled=True, ospfv2_profile_ref=ospf_profile, router_id=router_id)
Enable OSPF on this engine. For master engines, enable OSPF on the virtual firewall. Once enabled on the engine, add an OSPF area to an interface:: engine.dynamic_routing.ospf.enable() interface = engine.routing.get(0) interface.add_ospf_area(OSPFArea('myarea')) :param str,OSPFProfile ospf_profile: OSPFProfile element or str href; if None, use default profile :param str router_id: single IP address router ID :raises ElementNotFound: OSPF profile not found :return: None
def compute_deflections_at_next_plane(plane_index, total_planes): """This function determines whether the tracer should compute the deflections at the next plane. This is True if there is another plane after this plane, else it is False.. Parameters ----------- plane_index : int The index of the plane we are deciding if we should compute its deflections. total_planes : int The total number of planes.""" if plane_index < total_planes - 1: return True elif plane_index == total_planes - 1: return False else: raise exc.RayTracingException('A galaxy was not correctly allocated its previous / next redshifts')
This function determines whether the tracer should compute the deflections at the next plane. This is True if there is another plane after this plane, else it is False.. Parameters ----------- plane_index : int The index of the plane we are deciding if we should compute its deflections. total_planes : int The total number of planes.
def append(self, row): """Append a result row and check its length. >>> x = Results(['title', 'type']) >>> x.append(('Konosuba', 'TV')) >>> x Results(['title', 'type'], [('Konosuba', 'TV')]) >>> x.append(('Konosuba',)) Traceback (most recent call last): ... ValueError: Wrong result row length """ row = tuple(row) if len(row) != self.table_width: raise ValueError('Wrong result row length') self.results.append(row)
Append a result row and check its length. >>> x = Results(['title', 'type']) >>> x.append(('Konosuba', 'TV')) >>> x Results(['title', 'type'], [('Konosuba', 'TV')]) >>> x.append(('Konosuba',)) Traceback (most recent call last): ... ValueError: Wrong result row length
def takeChild(self, index): """ Removes the child at the given index from this item. :param index | <int> """ item = super(XGanttWidgetItem, self).takeChild(index) if item: item.removeFromScene() return item
Removes the child at the given index from this item. :param index | <int>
def _new_pivot_query(self): """ Create a new query builder for the pivot table. :rtype: eloquent.orm.Builder """ query = super(MorphToMany, self)._new_pivot_query() return query.where(self._morph_type, self._morph_class)
Create a new query builder for the pivot table. :rtype: eloquent.orm.Builder
def style_defs(cls): """ Return the CSS style definitions required by the formatted snippet. """ formatter = HtmlFormatter() formatter.style.highlight_color = cls.VIOLATION_COLOR return formatter.get_style_defs()
Return the CSS style definitions required by the formatted snippet.
def pub_view(request, docid, configuration): """The initial view, does not provide the document content yet""" if 'autodeclare' in settings.CONFIGURATIONS[configuration]: for annotationtype, set in settings.CONFIGURATIONS['configuration']['autodeclare']: try: r = flat.comm.query(request, "USE pub/" + docid + " DECLARE " + annotationtype + " OF " + set) except Exception as e: return fatalerror(request,e) return initdoc(request, 'pub',docid, 'editor', 'editor.html', configuration=configuration)
The initial view, does not provide the document content yet
def set_debug(self, debuglevel): """ Change the debug level of the API **Returns:** No item returned. """ if isinstance(debuglevel, int): self._debuglevel = debuglevel if self._debuglevel == 1: logging.basicConfig(level=logging.INFO, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") api_logger.setLevel(logging.INFO) elif self._debuglevel == 2: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) elif self._debuglevel >= 3: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) urllib3_logger = logging.getLogger("requests.packages.urllib3") urllib3_logger.setLevel(logging.DEBUG) urllib3_logger.propagate = True else: # Remove all handlers for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set logging level to default requests.cookies.cookielib.debug = False api_logger.setLevel(logging.WARNING) return
Change the debug level of the API **Returns:** No item returned.
def _lsm_load_pages(self): """Load and fix all pages from LSM file.""" # cache all pages to preserve corrected values pages = self.pages pages.cache = True pages.useframes = True # use first and second page as keyframes pages.keyframe = 1 pages.keyframe = 0 # load remaining pages as frames pages._load(keyframe=None) # fix offsets and bytecounts first self._lsm_fix_strip_offsets() self._lsm_fix_strip_bytecounts() # assign keyframes for data and thumbnail series keyframe = pages[0] for page in pages[::2]: page.keyframe = keyframe keyframe = pages[1] for page in pages[1::2]: page.keyframe = keyframe
Load and fix all pages from LSM file.
def get_end_date(self, obj): """ Returns the end date for a model instance """ obj_date = getattr(obj, self.get_end_date_field()) try: obj_date = obj_date.date() except AttributeError: # It's a date rather than datetime, so we use it as is pass return obj_date
Returns the end date for a model instance
def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- head : string Module name, table of contents. body : string Function and class docstrings. ''' # get the names of all classes and functions functions, classes = self._parse_module_with_import(uri) if not len(functions) and not len(classes) and DEBUG: print('WARNING: Empty -', uri) # dbg # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' body = '' # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri_short: title = 'Module: :mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[2] * len(title) else: title = ':mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[1] * len(title) head += '\n.. automodule:: ' + uri + '\n' head += '\n.. currentmodule:: ' + uri + '\n' body += '\n.. currentmodule:: ' + uri + '\n' for c in classes: body += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[3] * \ (len(c)+9) + '\n\n' body += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working body += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ '\n' \ ' .. automethod:: __init__\n\n' head += '.. autosummary::\n\n' for f in classes + functions: head += ' ' + f + '\n' head += '\n' for f in functions: # must NOT exclude from index to keep cross-refs working body += f + '\n' body += self.rst_section_levels[3] * len(f) + '\n' body += '\n.. autofunction:: ' + f + '\n\n' return head, body
Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- head : string Module name, table of contents. body : string Function and class docstrings.
def _double_fork(self): """Do the UNIX double-fork magic. See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # Exit first parent. sys.exit(0) return None except OSError as err: LOG.exception( "Fork #1 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None # Decouple from parent environment. os.chdir("/") os.setsid() os.umask(0) # Do second fork. try: pid = os.fork() if pid > 0: # Exit from second parent. sys.exit(0) except OSError as err: LOG.exception( "Fork #2 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None
Do the UNIX double-fork magic. See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
def ip_shell_after_exception(frame): """ Launches an IPython embedded shell in the namespace where an exception occurred. :param frame: :return: """ # let the user know, where this shell is 'waking up' # construct frame list # this will be printed in the header frame_info_list = [] frame_list = [] original_frame = frame = frame or inspect.currentframe() local_ns = frame.f_locals # global_ns = frame.f_globals # this is deprecated by IPython dummy_module = DummyMod() dummy_module.__dict__ = frame.f_globals while frame is not None: frame_list.append(frame) info = inspect.getframeinfo(frame) frame_info_list.append(info) frame = frame.f_back frame_info_list.reverse() frame_info_str_list = [format_frameinfo(fi) for fi in frame_info_list] custom_header1 = "----- frame list -----\n\n" frame_info_str = "\n--\n".join(frame_info_str_list[:-1]) custom_header2 = "\n----- ERROR -----\n" custom_header = "{0}{1}{2}".format(custom_header1, frame_info_str, custom_header2) # prevent IPython shell to be launched in IP-Notebook if len(frame_info_list) >= 2: test_str = str(frame_info_list[0]) + str(frame_info_list[1]) if 'IPython' in test_str and 'zmq' in test_str: print("\n- Not entering IPython embedded shell -\n") return # copied (and modified) from IPython/terminal/embed.py config = load_default_config() config.InteractiveShellEmbed = config.TerminalInteractiveShell # these two lines prevent problems in related to the initialization # of ultratb.FormattedTB below InteractiveShellEmbedWithoutBanner.clear_instance() InteractiveShellEmbedWithoutBanner._instance = None shell = InteractiveShellEmbedWithoutBanner.instance() shell(header=custom_header, stack_depth=2, local_ns=local_ns, module=dummy_module) # if `diff_index` is not None it will be interpreted as index increment for the frame_list in the except hook # "__mu" means "move up" diff_index = local_ns.get("__mu") if not isinstance(diff_index, int): diff_index = None return diff_index
Launches an IPython embedded shell in the namespace where an exception occurred. :param frame: :return:
def business_hours_schedule_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/schedules#delete-a-schedule" api_path = "/api/v2/business_hours/schedules/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
https://developer.zendesk.com/rest_api/docs/core/schedules#delete-a-schedule
def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x
Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.
def train(X_train, y_train, **kwargs): ''' >>> corpus = CorpusReader('annot.opcorpora.xml') >>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42) >>> crf = train(X_train, y_train) ''' crf = Trainer() crf.set_params({ 'c1': 1.0, 'c2': 0.001, 'max_iterations': 200, 'feature.possible_transitions': True, }) for xseq, yseq in zip(X_train, y_train): crf.append(xseq, yseq) crf.train(TOKENIZATION_MODEL_PATH) return crf
>>> corpus = CorpusReader('annot.opcorpora.xml') >>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42) >>> crf = train(X_train, y_train)
def getDatastreamProfile(self, dsid, date=None): """Get information about a particular datastream belonging to this object. :param dsid: datastream id :rtype: :class:`DatastreamProfile` """ # NOTE: used by DatastreamObject if self._create: return None r = self.api.getDatastream(self.pid, dsid, asOfDateTime=date) return parse_xml_object(DatastreamProfile, r.content, r.url)
Get information about a particular datastream belonging to this object. :param dsid: datastream id :rtype: :class:`DatastreamProfile`
def _explore(self, node, visited, skip_father=None): """ Explore the CFG and look for re-entrancy Heuristic: There is a re-entrancy if a state variable is written after an external call node.context will contains the external calls executed It contains the calls executed in father nodes if node.context is not empty, and variables are written, a re-entrancy is possible """ if node in visited: return visited = visited + [node] # First we add the external calls executed in previous nodes # send_eth returns the list of calls sending value # calls returns the list of calls that can callback # read returns the variable read # read_prior_calls returns the variable read prior a call fathers_context = {'send_eth':set(), 'calls':set(), 'read':set(), 'read_prior_calls':{}} for father in node.fathers: if self.KEY in father.context: fathers_context['send_eth'] |= set([s for s in father.context[self.KEY]['send_eth'] if s!=skip_father]) fathers_context['calls'] |= set([c for c in father.context[self.KEY]['calls'] if c!=skip_father]) fathers_context['read'] |= set(father.context[self.KEY]['read']) fathers_context['read_prior_calls'] = union_dict(fathers_context['read_prior_calls'], father.context[self.KEY]['read_prior_calls']) # Exclude path that dont bring further information if node in self.visited_all_paths: if all(call in self.visited_all_paths[node]['calls'] for call in fathers_context['calls']): if all(send in self.visited_all_paths[node]['send_eth'] for send in fathers_context['send_eth']): if all(read in self.visited_all_paths[node]['read'] for read in fathers_context['read']): if dict_are_equal(self.visited_all_paths[node]['read_prior_calls'], fathers_context['read_prior_calls']): return else: self.visited_all_paths[node] = {'send_eth':set(), 'calls':set(), 'read':set(), 'read_prior_calls':{}} self.visited_all_paths[node]['send_eth'] = set(self.visited_all_paths[node]['send_eth'] | fathers_context['send_eth']) self.visited_all_paths[node]['calls'] = set(self.visited_all_paths[node]['calls'] | fathers_context['calls']) self.visited_all_paths[node]['read'] = set(self.visited_all_paths[node]['read'] | fathers_context['read']) self.visited_all_paths[node]['read_prior_calls'] = union_dict(self.visited_all_paths[node]['read_prior_calls'], fathers_context['read_prior_calls']) node.context[self.KEY] = fathers_context state_vars_read = set(node.state_variables_read) # All the state variables written state_vars_written = set(node.state_variables_written) slithir_operations = [] # Add the state variables written in internal calls for internal_call in node.internal_calls: # Filter to Function, as internal_call can be a solidity call if isinstance(internal_call, Function): state_vars_written |= set(internal_call.all_state_variables_written()) state_vars_read |= set(internal_call.all_state_variables_read()) slithir_operations += internal_call.all_slithir_operations() contains_call = False node.context[self.KEY]['written'] = set(state_vars_written) if self._can_callback(node.irs + slithir_operations): node.context[self.KEY]['calls'] = set(node.context[self.KEY]['calls'] | {node}) node.context[self.KEY]['read_prior_calls'][node] = set(node.context[self.KEY]['read_prior_calls'].get(node, set()) | node.context[self.KEY]['read'] |state_vars_read) contains_call = True if self._can_send_eth(node.irs + slithir_operations): node.context[self.KEY]['send_eth'] = set(node.context[self.KEY]['send_eth'] | {node}) node.context[self.KEY]['read'] = set(node.context[self.KEY]['read'] | state_vars_read) sons = node.sons if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]: if self._filter_if(node): son = sons[0] self._explore(son, visited, node) sons = sons[1:] else: son = sons[1] self._explore(son, visited, node) sons = [sons[0]] for son in sons: self._explore(son, visited)
Explore the CFG and look for re-entrancy Heuristic: There is a re-entrancy if a state variable is written after an external call node.context will contains the external calls executed It contains the calls executed in father nodes if node.context is not empty, and variables are written, a re-entrancy is possible
def remove_item(self, **kwargs): """ Delete movies from a list that the user created. A valid session id is required. Args: media_id: A movie id. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('remove_item') kwargs.update({'session_id': self.session_id}) payload = { 'media_id': kwargs.pop('media_id', None), } response = self._POST(path, kwargs, payload) self._set_attrs_to_values(response) return response
Delete movies from a list that the user created. A valid session id is required. Args: media_id: A movie id. Returns: A dict respresentation of the JSON returned from the API.
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: RecordingSettingsContext for this RecordingSettingsInstance :rtype: twilio.rest.video.v1.recording_settings.RecordingSettingsContext """ if self._context is None: self._context = RecordingSettingsContext(self._version, ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: RecordingSettingsContext for this RecordingSettingsInstance :rtype: twilio.rest.video.v1.recording_settings.RecordingSettingsContext
def _check_is_chained_assignment_possible(self): """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(stacklevel=4, t='referant', force=True) return True elif self._is_copy: self._check_setitem_copy(stacklevel=4, t='referant') return False
Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting.
def add_pos_indicator(self): """Adds a new position indicator.""" body = new_body(name="pos_indicator") body.append( new_geom( "sphere", [0.03], rgba=[1, 0, 0, 0.5], group=1, contype="0", conaffinity="0", ) ) body.append(new_joint(type="free", name="pos_indicator")) self.worldbody.append(body)
Adds a new position indicator.
def set(self, **kargs): """ Reset default keyword parameters. Assigns new default values from dictionary ``kargs`` to the fitter's keyword parameters. Keywords for the underlying :mod:`lsqfit` fitters can also be included (or grouped together in dictionary ``fitterargs``). Returns tuple ``(kargs, oldkargs)`` where ``kargs`` is a dictionary containing all :class:`lsqfit.MultiFitter` keywords after they have been updated, and ``oldkargs`` contains the original values for these keywords. Use ``fitter.set(**oldkargs)`` to restore the original values. """ kwords = set([ 'mopt', 'fast', 'ratio', 'wavg_kargs', 'wavg_all', 'fitterargs', 'fitname', ]) kargs = dict(kargs) oldkargs = {} fargs = {} # changed for k in list(kargs.keys()): # list() needed since changing kargs if k in kwords: oldkargs[k] = getattr(self, k) setattr(self, k, kargs[k]) kwords.remove(k) else: fargs[k] = kargs[k] del kargs[k] # unchanged for k in kwords: kargs[k] = getattr(self, k) # manage fitterargs if 'fitterargs' in kwords: # means wasn't in kargs initially oldkargs['fitterargs'] = self.fitterargs self.fitterargs = dict(self.fitterargs) if len(fargs) > 0: self.fitterargs.update(fargs) kargs['fitterargs'] = dict(self.fitterargs) return kargs, oldkargs
Reset default keyword parameters. Assigns new default values from dictionary ``kargs`` to the fitter's keyword parameters. Keywords for the underlying :mod:`lsqfit` fitters can also be included (or grouped together in dictionary ``fitterargs``). Returns tuple ``(kargs, oldkargs)`` where ``kargs`` is a dictionary containing all :class:`lsqfit.MultiFitter` keywords after they have been updated, and ``oldkargs`` contains the original values for these keywords. Use ``fitter.set(**oldkargs)`` to restore the original values.
def _make_parser_func(sep): """Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object. """ def parser_func( filepath_or_buffer, sep=sep, delimiter=None, header="infer", names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression="infer", thousands=None, decimal=b".", lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) if not kwargs.get("sep", sep): kwargs["sep"] = "\t" return _read(**kwargs) return parser_func
Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object.
def beautify(string, *args, **kwargs): """ Convenient interface to the ecstasy package. Arguments: string (str): The string to beautify with ecstasy. args (list): The positional arguments. kwargs (dict): The keyword ('always') arguments. """ parser = Parser(args, kwargs) return parser.beautify(string)
Convenient interface to the ecstasy package. Arguments: string (str): The string to beautify with ecstasy. args (list): The positional arguments. kwargs (dict): The keyword ('always') arguments.
def get_environ(self, key, default=None, cast=None): """Get value from environment variable using os.environ.get :param key: The name of the setting value, will always be upper case :param default: In case of not found it will be returned :param cast: Should cast in to @int, @float, @bool or @json ? or cast must be true to use cast inference :return: The value if found, default or None """ key = key.upper() data = self.environ.get(key, default) if data: if cast in converters: data = converters.get(cast)(data) if cast is True: data = parse_conf_data(data, tomlfy=True) return data
Get value from environment variable using os.environ.get :param key: The name of the setting value, will always be upper case :param default: In case of not found it will be returned :param cast: Should cast in to @int, @float, @bool or @json ? or cast must be true to use cast inference :return: The value if found, default or None
def randomLocation(cls, radius, width, height, origin=None): ''' :param: radius - float :param: width - float :param: height - float :param: origin - optional Point subclass :return: Rectangle ''' return cls(width, height, Point.randomLocation(radius, origin))
:param: radius - float :param: width - float :param: height - float :param: origin - optional Point subclass :return: Rectangle
def _set_medians_and_extremes(self): """ Sets median values for rtt and the offset of result packets. """ rtts = sorted([p.rtt for p in self.packets if p.rtt is not None]) if rtts: self.rtt_min = rtts[0] self.rtt_max = rtts[-1] self.rtt_median = self.calculate_median(rtts) offsets = sorted( [p.offset for p in self.packets if p.offset is not None] ) if offsets: self.offset_min = offsets[0] self.offset_max = offsets[-1] self.offset_median = self.calculate_median(offsets)
Sets median values for rtt and the offset of result packets.
def backtick (cmd, encoding='utf-8'): """Return decoded output from command.""" data = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0] return data.decode(encoding)
Return decoded output from command.
def batch_qs(qs, batch_size=1000): """ Returns a (start, end, total, queryset) tuple for each batch in the given queryset. Usage: # Make sure to order your queryset article_qs = Article.objects.order_by('id') for start, end, total, qs in batch_qs(article_qs): print "Now processing %s - %s of %s" % (start + 1, end, total) for article in qs: print article.body """ total = qs.count() for start in range(0, total, batch_size): end = min(start + batch_size, total) yield (start, end, total, qs[start:end])
Returns a (start, end, total, queryset) tuple for each batch in the given queryset. Usage: # Make sure to order your queryset article_qs = Article.objects.order_by('id') for start, end, total, qs in batch_qs(article_qs): print "Now processing %s - %s of %s" % (start + 1, end, total) for article in qs: print article.body
def receiveds_parsing(receiveds): """ This function parses the receiveds headers. Args: receiveds (list): list of raw receiveds headers Returns: a list of parsed receiveds headers with first hop in first position """ parsed = [] receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds] n = len(receiveds) log.debug("Nr. of receiveds. {}".format(n)) for idx, received in enumerate(receiveds): log.debug("Parsing received {}/{}".format(idx + 1, n)) log.debug("Try to parse {!r}".format(received)) try: # try to parse the current received header... values_by_clause = parse_received(received) except MailParserReceivedParsingError: # if we can't, let's append the raw parsed.append({'raw': received}) else: # otherwise append the full values_by_clause dict parsed.append(values_by_clause) log.debug("len(receiveds) %s, len(parsed) %s" % ( len(receiveds), len(parsed))) if len(receiveds) != len(parsed): # something really bad happened, # so just return raw receiveds with hop indices log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \ parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed)) return receiveds_not_parsed(receiveds) else: # all's good! we have parsed or raw receiveds for each received header return receiveds_format(parsed)
This function parses the receiveds headers. Args: receiveds (list): list of raw receiveds headers Returns: a list of parsed receiveds headers with first hop in first position
def fetch(context, data): """Do an HTTP GET on the ``url`` specified in the inbound data.""" url = data.get('url') attempt = data.pop('retry_attempt', 1) try: result = context.http.get(url, lazy=True) rules = context.get('rules', {'match_all': {}}) if not Rule.get_rule(rules).apply(result): context.log.info('Fetch skip: %r', result.url) return if not result.ok: err = (result.url, result.status_code) context.emit_warning("Fetch fail [%s]: HTTP %s" % err) if not context.params.get('emit_errors', False): return else: context.log.info("Fetched [%s]: %r", result.status_code, result.url) data.update(result.serialize()) if url != result.url: tag = make_key(context.run_id, url) context.set_tag(tag, None) context.emit(data=data) except RequestException as ce: retries = int(context.get('retry', 3)) if retries >= attempt: context.log.warn("Retry: %s (error: %s)", url, ce) data['retry_attempt'] = attempt + 1 context.recurse(data=data, delay=2 ** attempt) else: context.emit_warning("Fetch fail [%s]: %s" % (url, ce))
Do an HTTP GET on the ``url`` specified in the inbound data.
def decode_jwt_token(token, secret): """ Validates and decodes the JWT token Token checked for - signature of JWT token - token issued date is valid :param token: jwt token :param secret: client specific secret :return boolean: True if valid token, False otherwise :raises TokenIssuerError: if iss field not present :raises TokenIssuedAtError: if iat field not present :raises jwt.DecodeError: If signature validation fails """ try: # check signature of the token decoded_token = jwt.decode( token, key=secret.encode(), verify=True, algorithms=[__algorithm__], leeway=__bound__ ) # token has all the required fields if 'iss' not in decoded_token: raise TokenIssuerError if 'iat' not in decoded_token: raise TokenIssuedAtError # check iat time is within bounds now = epoch_seconds() iat = int(decoded_token['iat']) if now > (iat + __bound__): raise TokenExpiredError("Token has expired", decoded_token) if iat > (now + __bound__): raise TokenExpiredError("Token can not be in the future", decoded_token) return True except jwt.InvalidIssuedAtError: raise TokenExpiredError("Token has invalid iat field", decode_token(token)) except jwt.DecodeError: raise TokenDecodeError
Validates and decodes the JWT token Token checked for - signature of JWT token - token issued date is valid :param token: jwt token :param secret: client specific secret :return boolean: True if valid token, False otherwise :raises TokenIssuerError: if iss field not present :raises TokenIssuedAtError: if iat field not present :raises jwt.DecodeError: If signature validation fails
def _get_version_from_git_tag(path): """Return a PEP440-compliant version derived from the git status. If that fails for any reason, return the changeset hash. """ m = GIT_DESCRIBE_REGEX.match(_git_describe_tags(path) or '') if m is None: return None version, post_commit, hash = m.groups() return version if post_commit == '0' else "{0}.post{1}+{2}".format(version, post_commit, hash)
Return a PEP440-compliant version derived from the git status. If that fails for any reason, return the changeset hash.
def assign_params(sess, params, network): """Assign the given parameters to the TensorLayer network. Parameters ---------- sess : Session TensorFlow Session. params : list of array A list of parameters (array) in order. network : :class:`Layer` The network to be assigned. Returns -------- list of operations A list of tf ops in order that assign params. Support sess.run(ops) manually. Examples -------- - See ``tl.files.save_npz`` References ---------- - `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__ """ ops = [] for idx, param in enumerate(params): ops.append(network.all_params[idx].assign(param)) if sess is not None: sess.run(ops) return ops
Assign the given parameters to the TensorLayer network. Parameters ---------- sess : Session TensorFlow Session. params : list of array A list of parameters (array) in order. network : :class:`Layer` The network to be assigned. Returns -------- list of operations A list of tf ops in order that assign params. Support sess.run(ops) manually. Examples -------- - See ``tl.files.save_npz`` References ---------- - `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__
def send_messages(self, email_messages): """ Queue one or more EmailMessage objects and returns the number of email messages sent. """ from .mail import create from .utils import create_attachments if not email_messages: return for email_message in email_messages: subject = email_message.subject from_email = email_message.from_email message = email_message.body headers = email_message.extra_headers # Check whether email has 'text/html' alternative alternatives = getattr(email_message, 'alternatives', ()) for alternative in alternatives: if alternative[1].startswith('text/html'): html_message = alternative[0] break else: html_message = '' attachment_files = {} for attachment in email_message.attachments: if isinstance(attachment, MIMEBase): attachment_files[attachment.get_filename()] = { 'file': ContentFile(attachment.get_payload()), 'mimetype': attachment.get_content_type(), 'headers': OrderedDict(attachment.items()), } else: attachment_files[attachment[0]] = ContentFile(attachment[1]) email = create(sender=from_email, recipients=email_message.to, cc=email_message.cc, bcc=email_message.bcc, subject=subject, message=message, html_message=html_message, headers=headers) if attachment_files: attachments = create_attachments(attachment_files) email.attachments.add(*attachments) if get_default_priority() == 'now': email.dispatch()
Queue one or more EmailMessage objects and returns the number of email messages sent.
def check_for_launchpad(old_vendor, name, urls): """Check if the project is hosted on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: the name of the project on launchpad, or an empty string. """ if old_vendor != "pypi": # XXX This might work for other starting vendors # XXX but I didn't check. For now only allow # XXX pypi -> launchpad. return '' for url in urls: try: return re.match(r"https?://launchpad.net/([\w.\-]+)", url).groups()[0] except AttributeError: continue return ''
Check if the project is hosted on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: the name of the project on launchpad, or an empty string.
def _get_asconv_headers(mosaic): """ Getter for the asconv headers (asci header info stored in the dicom) """ asconv_headers = re.findall(r'### ASCCONV BEGIN(.*)### ASCCONV END ###', mosaic[Tag(0x0029, 0x1020)].value.decode(encoding='ISO-8859-1'), re.DOTALL)[0] return asconv_headers
Getter for the asconv headers (asci header info stored in the dicom)
def _activate_organization(organization): """ Activates an inactivated (soft-deleted) organization as well as any inactive relationships """ [_activate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)] [_activate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=False)]
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
def from_compact(cls: Type[TransactionType], currency: str, compact: str) -> TransactionType: """ Return Transaction instance from compact string format :param currency: Name of the currency :param compact: Compact format string :return: """ lines = compact.splitlines(True) n = 0 header_data = Transaction.re_header.match(lines[n]) if header_data is None: raise MalformedDocumentError("Compact TX header") version = int(header_data.group(1)) issuers_num = int(header_data.group(2)) inputs_num = int(header_data.group(3)) unlocks_num = int(header_data.group(4)) outputs_num = int(header_data.group(5)) has_comment = int(header_data.group(6)) locktime = int(header_data.group(7)) n += 1 blockstamp = None # type: Optional[BlockUID] if version >= 3: blockstamp = BlockUID.from_str(Transaction.parse_field("CompactBlockstamp", lines[n])) n += 1 issuers = [] inputs = [] unlocks = [] outputs = [] signatures = [] for i in range(0, issuers_num): issuer = Transaction.parse_field("Pubkey", lines[n]) issuers.append(issuer) n += 1 for i in range(0, inputs_num): input_source = InputSource.from_inline(version, lines[n]) inputs.append(input_source) n += 1 for i in range(0, unlocks_num): unlock = Unlock.from_inline(lines[n]) unlocks.append(unlock) n += 1 for i in range(0, outputs_num): output_source = OutputSource.from_inline(lines[n]) outputs.append(output_source) n += 1 comment = "" if has_comment == 1: data = Transaction.re_compact_comment.match(lines[n]) if data: comment = data.group(1) n += 1 else: raise MalformedDocumentError("Compact TX Comment") while n < len(lines): data = Transaction.re_signature.match(lines[n]) if data: signatures.append(data.group(1)) n += 1 else: raise MalformedDocumentError("Compact TX Signatures") return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
Return Transaction instance from compact string format :param currency: Name of the currency :param compact: Compact format string :return:
def _all_indexes_same(indexes): """ Determine if all indexes contain the same elements. Parameters ---------- indexes : list of Index objects Returns ------- bool True if all indexes contain the same elements, False otherwise. """ first = indexes[0] for index in indexes[1:]: if not first.equals(index): return False return True
Determine if all indexes contain the same elements. Parameters ---------- indexes : list of Index objects Returns ------- bool True if all indexes contain the same elements, False otherwise.
def listen_tta(self, target, timeout): """Listen as Type A Target in 106 kbps. Restrictions: * It is not possible to send short frames that are required for ACK and NAK responses. This means that a Type 2 Tag emulation can only implement a single sector memory model. * It can not be avoided that the chipset responds to SENSF_REQ commands. The driver configures the SENSF_RES response to all zero and ignores all Type F communication but eventually it depends on the remote device whether Type A Target activation will still be attempted. """ if not target.brty == '106A': info = "unsupported target bitrate: %r" % target.brty raise nfc.clf.UnsupportedTargetError(info) if target.rid_res: info = "listening for type 1 tag activation is not supported" raise nfc.clf.UnsupportedTargetError(info) if target.sens_res is None: raise ValueError("sens_res is required") if target.sdd_res is None: raise ValueError("sdd_res is required") if target.sel_res is None: raise ValueError("sel_res is required") if len(target.sens_res) != 2: raise ValueError("sens_res must be 2 byte") if len(target.sdd_res) != 4: raise ValueError("sdd_res must be 4 byte") if len(target.sel_res) != 1: raise ValueError("sel_res must be 1 byte") if target.sdd_res[0] != 0x08: raise ValueError("sdd_res[0] must be 08h") nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res log.debug("nfca_params %s", hexlify(nfca_params)) self.chipset.tg_set_rf("106A") self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults) self.chipset.tg_set_protocol(rf_off_error=False) time_to_return = time.time() + timeout tg_comm_rf_args = {'mdaa': True, 'nfca_params': nfca_params} tg_comm_rf_args['recv_timeout'] = min(int(1000 * timeout), 0xFFFF) def listen_tta_tt2(): recv_timeout = tg_comm_rf_args['recv_timeout'] while recv_timeout > 0: log.debug("wait %d ms for Type 2 Tag activation", recv_timeout) try: data = self.chipset.tg_comm_rf(**tg_comm_rf_args) except CommunicationError as error: log.debug(error) else: brty = ('106A', '212F', '424F')[data[0]-11] log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7))) if brty == "106A" and data[2] & 0x03 == 3: self.chipset.tg_set_protocol(rf_off_error=True) return nfc.clf.LocalTarget( "106A", sens_res=nfca_params[0:2], sdd_res=b'\x08'+nfca_params[2:5], sel_res=nfca_params[5:6], tt2_cmd=data[7:]) else: log.debug("not a 106A Type 2 Tag command") finally: recv_timeout = int(1000 * (time_to_return - time.time())) tg_comm_rf_args['recv_timeout'] = recv_timeout def listen_tta_tt4(): rats_cmd = rats_res = None recv_timeout = tg_comm_rf_args['recv_timeout'] while recv_timeout > 0: log.debug("wait %d ms for 106A TT4 command", recv_timeout) try: data = self.chipset.tg_comm_rf(**tg_comm_rf_args) tg_comm_rf_args['transmit_data'] = None except CommunicationError as error: tg_comm_rf_args['transmit_data'] = None rats_cmd = rats_res = None log.debug(error) else: brty = ('106A', '212F', '424F')[data[0]-11] log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7))) if brty == "106A" and data[2] == 3 and data[7] == 0xE0: (rats_cmd, rats_res) = (data[7:], target.rats_res) log.debug("rcvd RATS_CMD %s", hexlify(rats_cmd)) if rats_res is None: rats_res = bytearray.fromhex("05 78 80 70 02") log.debug("send RATS_RES %s", hexlify(rats_res)) tg_comm_rf_args['transmit_data'] = rats_res elif brty == "106A" and data[7] != 0xF0 and rats_cmd: did = rats_cmd[1] & 0x0F cmd = data[7:] ta_tb_tc = rats_res[2:] ta = ta_tb_tc.pop(0) if rats_res[1] & 0x10 else None tb = ta_tb_tc.pop(0) if rats_res[1] & 0x20 else None tc = ta_tb_tc.pop(0) if rats_res[1] & 0x40 else None if ta is not None: log.debug("TA(1) = {:08b}".format(ta)) if tb is not None: log.debug("TB(1) = {:08b}".format(tb)) if tc is not None: log.debug("TC(1) = {:08b}".format(tc)) if ta_tb_tc: log.debug("T({}) = {}".format( len(ta_tb_tc), hexlify(ta_tb_tc))) did_supported = tc is None or bool(tc & 0x02) cmd_with_did = bool(cmd[0] & 0x08) if (((cmd_with_did and did_supported and cmd[1] == did) or (did == 0 and not cmd_with_did))): if cmd[0] in (0xC2, 0xCA): log.debug("rcvd S(DESELECT) %s", hexlify(cmd)) tg_comm_rf_args['transmit_data'] = cmd log.debug("send S(DESELECT) %s", hexlify(cmd)) rats_cmd = rats_res = None else: log.debug("rcvd TT4_CMD %s", hexlify(cmd)) self.chipset.tg_set_protocol(rf_off_error=True) return nfc.clf.LocalTarget( "106A", sens_res=nfca_params[0:2], sdd_res=b'\x08'+nfca_params[2:5], sel_res=nfca_params[5:6], tt4_cmd=cmd, rats_cmd=rats_cmd, rats_res=rats_res) else: log.debug("skip TT4_CMD %s (DID)", hexlify(cmd)) else: log.debug("not a 106A TT4 command") finally: recv_timeout = int(1000 * (time_to_return - time.time())) tg_comm_rf_args['recv_timeout'] = recv_timeout if target.sel_res[0] & 0x60 == 0x00: return listen_tta_tt2() if target.sel_res[0] & 0x20 == 0x20: return listen_tta_tt4() reason = "sel_res does not indicate any tag target support" raise nfc.clf.UnsupportedTargetError(reason)
Listen as Type A Target in 106 kbps. Restrictions: * It is not possible to send short frames that are required for ACK and NAK responses. This means that a Type 2 Tag emulation can only implement a single sector memory model. * It can not be avoided that the chipset responds to SENSF_REQ commands. The driver configures the SENSF_RES response to all zero and ignores all Type F communication but eventually it depends on the remote device whether Type A Target activation will still be attempted.
def list_absent(name, acl_type, acl_names=None, recurse=False): ''' Ensure a Linux ACL list does not exist Takes a list of acl names and remove them from the given path name The acl path acl_type The type of the acl is used for, it can be 'user' or 'group' acl_names The list of users or groups perms Remove the permissions eg.: rwx recurse Set the permissions recursive in the path ''' if acl_names is None: acl_names = [] ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if not os.path.exists(name): ret['comment'] = '{0} does not exist'.format(name) ret['result'] = False return ret __current_perms = __salt__['acl.getfacl'](name) if acl_type.startswith(('d:', 'default:')): _acl_type = ':'.join(acl_type.split(':')[1:]) _current_perms = __current_perms[name].get('defaults', {}) _default = True else: _acl_type = acl_type _current_perms = __current_perms[name] _default = False # The getfacl execution module lists default with empty names as being # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if not acl_names: _search_names = set(__current_perms[name].get('comment').get(_acl_type, '')) else: _search_names = set(acl_names) if _current_perms.get(_acl_type, None) or _default: try: users = {} for i in _current_perms[_acl_type]: if i and next(six.iterkeys(i)) in _search_names: users.update(i) except (AttributeError, KeyError): users = None if users: ret['comment'] = 'Removing permissions' if __opts__['test']: ret['result'] = None return ret for acl_name in acl_names: __salt__['acl.delfacl'](acl_type, acl_name, name, recursive=recurse) else: ret['comment'] = 'Permissions are in the desired state' else: ret['comment'] = 'ACL Type does not exist' ret['result'] = False return ret
Ensure a Linux ACL list does not exist Takes a list of acl names and remove them from the given path name The acl path acl_type The type of the acl is used for, it can be 'user' or 'group' acl_names The list of users or groups perms Remove the permissions eg.: rwx recurse Set the permissions recursive in the path
def set_version(request, response): """Set version and revision to response """ settings = request.registry.settings resolver = DottedNameResolver() # get version config version_header = settings.get( 'api.version_header', 'X-Version', ) version_header_value = settings.get('api.version_header_value') if callable(version_header_value): version_header_value = version_header_value() elif version_header_value: version_header_value = resolver.resolve(version_header_value) # get revision config revision_header = settings.get( 'api.revision_header', 'X-Revision', ) revision_header_value = settings.get('api.revision_header_value') if callable(revision_header_value): revision_header_value = revision_header_value() elif revision_header_value: revision_header_value = resolver.resolve(revision_header_value) if version_header and version_header_value: response.headers[str(version_header)] = str(version_header_value) if revision_header and revision_header_value: response.headers[str(revision_header)] = str(revision_header_value)
Set version and revision to response
def p_expr_function(p): 'expr : FUNCTION is_reference LPAREN parameter_list RPAREN lexical_vars LBRACE inner_statement_list RBRACE' p[0] = ast.Closure(p[4], p[6], p[8], p[2], lineno=p.lineno(1))
expr : FUNCTION is_reference LPAREN parameter_list RPAREN lexical_vars LBRACE inner_statement_list RBRACE
def _check_operators(self, operators): """ Check Inputs This method cheks that the input operators and weights are correctly formatted Parameters ---------- operators : list, tuple or np.ndarray List of linear operator class instances Returns ------- np.array operators Raises ------ TypeError For invalid input type """ if not isinstance(operators, (list, tuple, np.ndarray)): raise TypeError('Invalid input type, operators must be a list, ' 'tuple or numpy array.') operators = np.array(operators) if not operators.size: raise ValueError('Operator list is empty.') for operator in operators: if not hasattr(operator, 'op'): raise ValueError('Operators must contain "op" method.') if not hasattr(operator, 'cost'): raise ValueError('Operators must contain "cost" method.') operator.op = check_callable(operator.op) operator.cost = check_callable(operator.cost) return operators
Check Inputs This method cheks that the input operators and weights are correctly formatted Parameters ---------- operators : list, tuple or np.ndarray List of linear operator class instances Returns ------- np.array operators Raises ------ TypeError For invalid input type
def land_cover_analysis_summary_report(feature, parent): """Retrieve an HTML land cover analysis table report from a multi exposure analysis. """ _ = feature, parent # NOQA analysis_dir = get_analysis_dir(exposure_land_cover['key']) if analysis_dir: return get_impact_report_as_string(analysis_dir) return None
Retrieve an HTML land cover analysis table report from a multi exposure analysis.
def to_workspace_value(self, result, assets): """ Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`. """ if self.dtype == int64_dtype: return super(Classifier, self).to_workspace_value(result, assets) assert isinstance(result.values, pd.Categorical), ( 'Expected a Categorical, got %r.' % type(result.values) ) with_missing = pd.Series( data=pd.Categorical( result.values, result.values.categories.union([self.missing_value]), ), index=result.index, ) return LabelArray( super(Classifier, self).to_workspace_value( with_missing, assets, ), self.missing_value, )
Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
def name(self, node, children): 'name = ~"[a-z]+" _' return self.env.get(node.text.strip(), -1)
name = ~"[a-z]+" _
def upsert(self, body, raise_exc=True, headers=False, files=None): '''Performs an HTTP PUT to the server. This is an idempotent call that will create the resource this navigator is pointing to, or will update it if it already exists. `body` may either be a string or a dictionary representing json `headers` are additional headers to send in the request ''' return self._request(PUT, body, raise_exc, headers, files)
Performs an HTTP PUT to the server. This is an idempotent call that will create the resource this navigator is pointing to, or will update it if it already exists. `body` may either be a string or a dictionary representing json `headers` are additional headers to send in the request
def save_current(self): """ Save current editor. If the editor.file.path is None, a save as dialog will be shown. """ if self.current_widget() is not None: editor = self.current_widget() self._save(editor)
Save current editor. If the editor.file.path is None, a save as dialog will be shown.
def link_source_files(generator): """ Processes each article/page object and formulates copy from and copy to destinations, as well as adding a source file URL as an attribute. """ # Get all attributes from the generator that are articles or pages posts = [ getattr(generator, attr, None) for attr in PROCESS if getattr(generator, attr, None) is not None] # Work on each item for post in posts[0]: if not 'SHOW_SOURCE_ON_SIDEBAR' in generator.settings and \ not 'SHOW_SOURCE_IN_SECTION' in generator.settings: return # Only try this when specified in metadata or SHOW_SOURCE_ALL_POSTS # override is present in settings if 'SHOW_SOURCE_ALL_POSTS' in generator.settings or \ 'show_source' in post.metadata: # Source file name can be optionally set in config show_source_filename = generator.settings.get( 'SHOW_SOURCE_FILENAME', '{}.txt'.format(post.slug) ) try: # Get the full path to the original source file source_out = os.path.join( post.settings['OUTPUT_PATH'], post.save_as ) # Get the path to the original source file source_out_path = os.path.split(source_out)[0] # Create 'copy to' destination for writing later copy_to = os.path.join( source_out_path, show_source_filename ) # Add file to published path source_url = urljoin( post.save_as, show_source_filename ) except Exception: return # Format post source dict & populate out = dict() out['copy_raw_from'] = post.source_path out['copy_raw_to'] = copy_to logger.debug('Linked %s to %s', post.source_path, copy_to) source_files.append(out) # Also add the source path to the post as an attribute for tpls post.show_source_url = source_url
Processes each article/page object and formulates copy from and copy to destinations, as well as adding a source file URL as an attribute.
def get_end_offset( self, value, parent=None, index=None ): """Return the end offset of the Field's data. Useful for chainloading. value Input Python object to process. parent Parent block object where this Field is defined. Used for e.g. evaluating Refs. index Index of the Python object to measure from. Used if the Field takes a list of objects. """ return self.get_start_offset( value, parent, index ) + self.get_size( value, parent, index )
Return the end offset of the Field's data. Useful for chainloading. value Input Python object to process. parent Parent block object where this Field is defined. Used for e.g. evaluating Refs. index Index of the Python object to measure from. Used if the Field takes a list of objects.
def _split(self, iterator, tmp_dir): """ Splits the file into several chunks. If the original file is too big to fit in the allocated space, the sorting will be split into several chunks, then merged. :param tmp_dir: Where to put the intermediate sorted results. :param orig_lines: The lines read before running out of space. :return: The names of the intermediate files. """ fnames = [] for i, lines in enumerate(iterator): lines = list(lines) out_fname = os.path.join(tmp_dir, self.TMP_FNAME.format(i + 1)) self._write(lines, out_fname) fnames.append(out_fname) if len(lines) < self.max_lines: break return fnames
Splits the file into several chunks. If the original file is too big to fit in the allocated space, the sorting will be split into several chunks, then merged. :param tmp_dir: Where to put the intermediate sorted results. :param orig_lines: The lines read before running out of space. :return: The names of the intermediate files.
def get(self, sid): """ Constructs a UserContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.user.UserContext :rtype: twilio.rest.chat.v2.service.user.UserContext """ return UserContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
Constructs a UserContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.user.UserContext :rtype: twilio.rest.chat.v2.service.user.UserContext
def unwrap(self): ''' Returns a nested python sequence. ''' red = [self.red[i]/65535.0 for i in range(self.size)] green = [self.green[i]/65535.0 for i in range(self.size)] blue = [self.blue[i]/65535.0 for i in range(self.size)] return red, green, blue
Returns a nested python sequence.
def get_flash_crypt_config(self): """ bit 3 in efuse_rd_disable[3:0] is mapped to flash_crypt_config this bit is at position 19 in EFUSE_BLK0_RDATA0_REG """ word0 = self.read_efuse(0) rd_disable = (word0 >> 19) & 0x1 if rd_disable == 0: """ we can read the flash_crypt_config efuse value so go & read it (EFUSE_BLK0_RDATA5_REG[31:28]) """ word5 = self.read_efuse(5) word5 = (word5 >> 28) & 0xF return word5 else: # if read of the efuse is disabled we assume it is set correctly return 0xF
bit 3 in efuse_rd_disable[3:0] is mapped to flash_crypt_config this bit is at position 19 in EFUSE_BLK0_RDATA0_REG
def set_prewarp(self, prewarp): """ Updates the prewarp configuration used to skew images in OpenALPR before processing. :param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3) :return: None """ prewarp = _convert_to_charp(prewarp) self._set_prewarp_func(self.alpr_pointer, prewarp)
Updates the prewarp configuration used to skew images in OpenALPR before processing. :param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3) :return: None
def open(self, key): """Implementation of :meth:`~simplekv.KeyValueStore.open`. If a cache miss occurs, the value is retrieved, stored in the cache, then then another open is issued on the cache. If the cache raises an :exc:`~exceptions.IOError`, the cache is ignored, and the backing store is consulted directly. It is possible for a caching error to occur while attempting to store the value in the cache. It will not be handled as well. """ try: return self.cache.open(key) except KeyError: # cache miss, load into cache fp = self._dstore.open(key) self.cache.put_file(key, fp) return self.cache.open(key) except IOError: # cache error, ignore completely and return from backend return self._dstore.open(key)
Implementation of :meth:`~simplekv.KeyValueStore.open`. If a cache miss occurs, the value is retrieved, stored in the cache, then then another open is issued on the cache. If the cache raises an :exc:`~exceptions.IOError`, the cache is ignored, and the backing store is consulted directly. It is possible for a caching error to occur while attempting to store the value in the cache. It will not be handled as well.
def _acceptance_prob(self, position, position_bar, momentum, momentum_bar): """ Returns the acceptance probability for given new position(position) and momentum """ # Parameters to help in evaluating Joint distribution P(position, momentum) _, logp = self.grad_log_pdf(position, self.model).get_gradient_log_pdf() _, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() # acceptance_prob = P(position_bar, momentum_bar)/ P(position, momentum) potential_change = logp_bar - logp # Negative change kinetic_change = 0.5 * np.float(np.dot(momentum_bar.T, momentum_bar) - np.dot(momentum.T, momentum)) # acceptance probability return np.exp(potential_change - kinetic_change)
Returns the acceptance probability for given new position(position) and momentum
def select_specimen(self, specimen): """ Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit. """ try: fit_index = self.pmag_results_data['specimens'][self.s].index( self.current_fit) except KeyError: fit_index = None except ValueError: fit_index = None # sets self.s to specimen calculates params etc. self.initialize_CART_rot(specimen) self.list_bound_loc = 0 if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] except IndexError: self.current_fit = None else: self.current_fit = None if self.s != self.specimens_box.GetValue(): self.specimens_box.SetValue(self.s)
Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit.
def _get_inline_translations(self, request, language_code, obj=None): """ Fetch the inline translations """ inline_instances = self.get_inline_instances(request, obj=obj) for inline in inline_instances: if issubclass(inline.model, TranslatableModelMixin): # leverage inlineformset_factory() to find the ForeignKey. # This also resolves the fk_name if it's set. fk = inline.get_formset(request, obj).fk rel_name = 'master__{0}'.format(fk.name) filters = { 'language_code': language_code, rel_name: obj } for translations_model in inline.model._parler_meta.get_all_models(): qs = translations_model.objects.filter(**filters) if obj is not None: qs = qs.using(obj._state.db) yield inline, qs
Fetch the inline translations
def insert_text(self, s, from_undo=False): """Natural numbers only.""" return super().insert_text( ''.join(c for c in s if c in '0123456789'), from_undo )
Natural numbers only.
def _inject_closure_values_fix_code(c, injected, **kwargs): """ Fix code objects, recursively fixing any closures """ # Add more closure variables c.freevars += injected # Replace LOAD_GLOBAL with LOAD_DEREF (fetch from closure cells) # for named variables for i, (opcode, value) in enumerate(c.code): if opcode == byteplay.LOAD_GLOBAL and value in kwargs: c.code[i] = byteplay.LOAD_DEREF, value _inject_closure_values_fix_closures(c, injected, **kwargs) return c
Fix code objects, recursively fixing any closures
def _open_list(self, list_type): """ Add an open list tag corresponding to the specification in the parser's LIST_TYPES. """ if list_type in LIST_TYPES.keys(): tag = LIST_TYPES[list_type] else: raise Exception('CustomSlackdownHTMLParser:_open_list: Not a valid list type.') html = '<{t} class="list-container-{c}">'.format( t=tag, c=list_type ) self.cleaned_html += html self.current_parent_element['tag'] = LIST_TYPES[list_type] self.current_parent_element['attrs'] = {'class': list_type}
Add an open list tag corresponding to the specification in the parser's LIST_TYPES.
def _drag_col(self, event): """Continue dragging a column""" x = self._dx + event.x # get dragged column new left x coordinate self._visual_drag.place_configure(x=x) # update column preview position # if one border of the dragged column is beyon the middle of the # neighboring column, swap them if (self._dragged_col_neighbor_widths[0] is not None and x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2): self._swap_columns('left') elif (self._dragged_col_neighbor_widths[1] is not None and x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2): self._swap_columns('right') # horizontal scrolling if the cursor reaches the side of the table if x < 0 and self.xview()[0] > 0: # scroll left and update dragged column x coordinate self.xview_scroll(-10, 'units') self._dragged_col_x += 10 elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1: # scroll right and update dragged column x coordinate self.xview_scroll(10, 'units') self._dragged_col_x -= 10
Continue dragging a column