code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def reply_regexp(self, user, regexp): """Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.""" if regexp in self.master._regexc["trigger"]: # Already compiled this one! return self.master._regexc["trigger"][regexp] # If the trigger is simply '*' then the * there needs to become (.*?) # to match the blank string too. regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp) # Filter in arrays. arrays = re.findall(RE.array, regexp) for array in arrays: rep = '' if array in self.master._array: rep = r'(?:' + '|'.join(self.expand_array(array)) + ')' regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp) # Simple replacements. regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?) regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?) regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?) regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket regexp = regexp.replace('<zerowidthstar>', r'(.*?)') # Optionals. optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip()) new.append(p) # If this optional had a star or anything in it, make it # non-matching. pipes = '|'.join(new) pipes = pipes.replace(r'(.+?)', r'(?:.+?)') pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)') pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)') regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*', '(?:' + pipes + r'|(?:\\s|\\b))', regexp) # _ wildcards can't match numbers! regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp) # Filter in bot variables. bvars = re.findall(RE.bot_tag, regexp) for var in bvars: rep = '' if var in self.master._var: rep = self.format_message(self.master._var[var]) regexp = regexp.replace('<bot {var}>'.format(var=var), rep) # Filter in user variables. uvars = re.findall(RE.get_tag, regexp) for var in uvars: rep = '' value = self.master.get_uservar(user, var) if value not in [None, "undefined"]: rep = utils.strip_nasties(value) regexp = regexp.replace('<get {var}>'.format(var=var), rep) # Filter in <input> and <reply> tags. This is a slow process, so only # do it if we have to! if '<input' in regexp or '<reply' in regexp: history = self.master.get_uservar(user, "__history__") for type in ['input', 'reply']: tags = re.findall(r'<' + type + r'([0-9])>', regexp) for index in tags: rep = self.format_message(history[type][int(index) - 1]) regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep) regexp = regexp.replace('<{type}>'.format(type=type), self.format_message(history[type][0])) # TODO: the Perl version doesn't do just <input>/<reply> in trigs! if self.utf8: return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE) else: return re.compile(r'^' + regexp.lower() + r'$')
Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.
def resize(self, shape): """ Resize all attached buffers with the given shape Parameters ---------- shape : tuple of two integers New buffer shape (h, w), to be applied to all currently attached buffers. For buffers that are a texture, the number of color channels is preserved. """ # Check if not (isinstance(shape, tuple) and len(shape) == 2): raise ValueError('RenderBuffer shape must be a 2-element tuple') # Resize our buffers for buf in (self.color_buffer, self.depth_buffer, self.stencil_buffer): if buf is None: continue shape_ = shape if isinstance(buf, Texture2D): shape_ = shape + (self.color_buffer.shape[-1], ) buf.resize(shape_, buf.format)
Resize all attached buffers with the given shape Parameters ---------- shape : tuple of two integers New buffer shape (h, w), to be applied to all currently attached buffers. For buffers that are a texture, the number of color channels is preserved.
def update(self, get_running_apps=True): """Get the state of the device, the current app, and the running apps. :param get_running_apps: whether or not to get the ``running_apps`` property :return state: the state of the device :return current_app: the current app :return running_apps: the running apps """ # The `screen_on`, `awake`, `wake_lock_size`, `current_app`, and `running_apps` properties. screen_on, awake, wake_lock_size, _current_app, running_apps = self.get_properties(get_running_apps=get_running_apps, lazy=True) # Check if device is off. if not screen_on: state = STATE_OFF current_app = None running_apps = None # Check if screen saver is on. elif not awake: state = STATE_IDLE current_app = None running_apps = None else: # Get the current app. if isinstance(_current_app, dict) and 'package' in _current_app: current_app = _current_app['package'] else: current_app = None # Get the running apps. if running_apps is None and current_app: running_apps = [current_app] # Get the state. # TODO: determine the state differently based on the `current_app`. if current_app in [PACKAGE_LAUNCHER, PACKAGE_SETTINGS]: state = STATE_STANDBY # Amazon Video elif current_app == AMAZON_VIDEO: if wake_lock_size == 5: state = STATE_PLAYING else: # wake_lock_size == 2 state = STATE_PAUSED # Netflix elif current_app == NETFLIX: if wake_lock_size > 3: state = STATE_PLAYING else: state = STATE_PAUSED # Check if `wake_lock_size` is 1 (device is playing). elif wake_lock_size == 1: state = STATE_PLAYING # Otherwise, device is paused. else: state = STATE_PAUSED return state, current_app, running_apps
Get the state of the device, the current app, and the running apps. :param get_running_apps: whether or not to get the ``running_apps`` property :return state: the state of the device :return current_app: the current app :return running_apps: the running apps
def ignore(name): ''' Ignore a specific program update. When an update is ignored the '-' and version number at the end will be omitted, so "SecUpd2014-001-1.0" becomes "SecUpd2014-001". It will be removed automatically if present. An update is successfully ignored when it no longer shows up after list_updates. :param name: The name of the update to add to the ignore list. :ptype: str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.ignore <update-name> ''' # remove everything after and including the '-' in the updates name. to_ignore = name.rsplit('-', 1)[0] cmd = ['softwareupdate', '--ignore', to_ignore] salt.utils.mac_utils.execute_return_success(cmd) return to_ignore in list_ignored()
Ignore a specific program update. When an update is ignored the '-' and version number at the end will be omitted, so "SecUpd2014-001-1.0" becomes "SecUpd2014-001". It will be removed automatically if present. An update is successfully ignored when it no longer shows up after list_updates. :param name: The name of the update to add to the ignore list. :ptype: str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.ignore <update-name>
def _prep_subsampled_bams(data, work_dir): """Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs. This attempts to minimize run times by pre-extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions: https://groups.google.com/d/msg/delly-users/xmia4lwOd1Q/uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data: https://github.com/cc2qe/speedseq/blob/ca624ba9affb0bd0fb88834ca896e9122639ec94/bin/speedseq#L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs. """ sr_bam, disc_bam = sshared.get_split_discordants(data, work_dir) ds_bam = bam.downsample(dd.get_align_bam(data), data, 1e8, read_filter="-F 'not secondary_alignment and proper_pair'", always_run=True, work_dir=work_dir) out_bam = "%s-final%s" % utils.splitext_plus(ds_bam) if not utils.file_exists(out_bam): bam.merge([ds_bam, sr_bam, disc_bam], out_bam, data["config"]) bam.index(out_bam, data["config"]) return [out_bam]
Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs. This attempts to minimize run times by pre-extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions: https://groups.google.com/d/msg/delly-users/xmia4lwOd1Q/uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data: https://github.com/cc2qe/speedseq/blob/ca624ba9affb0bd0fb88834ca896e9122639ec94/bin/speedseq#L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs.
def bool_value(self): """Infer the truth value for an Instance The truth value of an instance is determined by these conditions: * if it implements __bool__ on Python 3 or __nonzero__ on Python 2, then its bool value will be determined by calling this special method and checking its result. * when this method is not defined, __len__() is called, if it is defined, and the object is considered true if its result is nonzero. If a class defines neither __len__() nor __bool__(), all its instances are considered true. """ context = contextmod.InferenceContext() context.callcontext = contextmod.CallContext(args=[]) context.boundnode = self try: result = _infer_method_result_truth(self, BOOL_SPECIAL_METHOD, context) except (exceptions.InferenceError, exceptions.AttributeInferenceError): # Fallback to __len__. try: result = _infer_method_result_truth(self, "__len__", context) except (exceptions.AttributeInferenceError, exceptions.InferenceError): return True return result
Infer the truth value for an Instance The truth value of an instance is determined by these conditions: * if it implements __bool__ on Python 3 or __nonzero__ on Python 2, then its bool value will be determined by calling this special method and checking its result. * when this method is not defined, __len__() is called, if it is defined, and the object is considered true if its result is nonzero. If a class defines neither __len__() nor __bool__(), all its instances are considered true.
def add_op(state, op_func, *args, **kwargs): ''' Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts. Args: state (``pyinfra.api.State`` obj): the deploy state to add the operation to op_func (function): the operation function from one of the modules, ie ``server.user`` args/kwargs: passed to the operation function ''' frameinfo = get_caller_frameinfo() kwargs['frameinfo'] = frameinfo for host in state.inventory: op_func(state, host, *args, **kwargs)
Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts. Args: state (``pyinfra.api.State`` obj): the deploy state to add the operation to op_func (function): the operation function from one of the modules, ie ``server.user`` args/kwargs: passed to the operation function
def netloc_no_www(url): """ For a given URL return the netloc with any www. striped. """ ext = tldextract.extract(url) if ext.subdomain and ext.subdomain != 'www': return '%s.%s.%s' % (ext.subdomain, ext.domain, ext.tld) else: return '%s.%s' % (ext.domain, ext.tld)
For a given URL return the netloc with any www. striped.
def recipients(cls, bigchain): """Convert validator dictionary to a recipient list for `Transaction`""" recipients = [] for public_key, voting_power in cls.get_validators(bigchain).items(): recipients.append(([public_key], voting_power)) return recipients
Convert validator dictionary to a recipient list for `Transaction`
def read_uint(data, start, length): """Extract a uint from a position in a sequence.""" return int.from_bytes(data[start:start+length], byteorder='big')
Extract a uint from a position in a sequence.
def resizeEvent(self, event): """ Moves the widgets around the system. :param event | <QtGui.QResizeEvent> """ super(XWalkthroughWidget, self).resizeEvent(event) if self.isVisible(): self.autoLayout()
Moves the widgets around the system. :param event | <QtGui.QResizeEvent>
def remove_root_vault(self, vault_id): """Removes a root vault from this hierarchy. arg: vault_id (osid.id.Id): the ``Id`` of a vault raise: NotFound - ``vault_id`` not a parent of ``child_id`` raise: NullArgument - ``vault_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_root_bin_template if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=vault_id) return self._hierarchy_session.remove_root(id_=vault_id)
Removes a root vault from this hierarchy. arg: vault_id (osid.id.Id): the ``Id`` of a vault raise: NotFound - ``vault_id`` not a parent of ``child_id`` raise: NullArgument - ``vault_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def init_remote(self): ''' Initialize/attach to a remote using GitPython. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function. ''' new = False if not os.listdir(self.cachedir): # Repo cachedir is empty, initialize a new repo there self.repo = git.Repo.init(self.cachedir) new = True else: # Repo cachedir exists, try to attach try: self.repo = git.Repo(self.cachedir) except git.exc.InvalidGitRepositoryError: log.error(_INVALID_REPO, self.cachedir, self.url, self.role) return new self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git') self.enforce_git_config() return new
Initialize/attach to a remote using GitPython. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function.
def _get_horoscope(self, day='today'): """gets a horoscope from site html :param day: day for which to get horoscope. Default is 'today' :returns: dictionary of horoscope details """ if not is_valid_day(day): raise HoroscopeException("Invalid day. Allowed days: [today|yesterday|tomorrow]" ) horoscope = ''.join([str(s).strip() for s in self.tree.xpath('//*[@id="%s"]/p/text()' % day)]) if day is 'yesterday': date = self.date_today - timedelta(days=1) elif day is 'today': date = self.date_today elif day is 'tomorrow': date = self.date_today + timedelta(days=1) return { 'date': date.strftime("%Y-%m-%d"), 'sunsign': self.sunsign.capitalize(), 'horoscope': horoscope + "(c) Kelli Fox, The Astrologer, http://new.theastrologer.com", 'meta': self._get_horoscope_meta(day), 'credit': '(c) Kelli Fox, The Astrologer, http://new.theastrologer.com' }
gets a horoscope from site html :param day: day for which to get horoscope. Default is 'today' :returns: dictionary of horoscope details
def select_all(self, serial_numbers): """Select rows for identification for a list of serial_number. Args: serial_numbers: list (or ndarray) of serial numbers Returns: pandas.DataFrame """ sheet = self.table col = self.db_sheet_cols.id rows = sheet.loc[:, col].isin(serial_numbers) return sheet.loc[rows, :]
Select rows for identification for a list of serial_number. Args: serial_numbers: list (or ndarray) of serial numbers Returns: pandas.DataFrame
def filter_batch(self, batch): """ Receives the batch, filters it, and returns it. """ for item in batch: if self.filter(item): yield item else: self.set_metadata('filtered_out', self.get_metadata('filtered_out') + 1) self.total += 1 self._log_progress()
Receives the batch, filters it, and returns it.
def _set_transmitted_stp_type(self, v, load=False): """ Setter method for transmitted_stp_type, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_mst_detail/output/cist/port/transmitted_stp_type (stp-type) If this variable is read-only (config: false) in the source YANG file, then _set_transmitted_stp_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_transmitted_stp_type() directly. YANG Description: Transmitted (tx) stp type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 1}, u'rstp': {'value': 3}, u'mstp': {'value': 4}, u'rpvstp': {'value': 6}, u'pvstp': {'value': 5}, u'stp': {'value': 2}},), is_leaf=True, yang_name="transmitted-stp-type", rest_name="transmitted-stp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='stp-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """transmitted_stp_type must be of a type compatible with stp-type""", 'defined-type': "brocade-xstp-ext:stp-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 1}, u'rstp': {'value': 3}, u'mstp': {'value': 4}, u'rpvstp': {'value': 6}, u'pvstp': {'value': 5}, u'stp': {'value': 2}},), is_leaf=True, yang_name="transmitted-stp-type", rest_name="transmitted-stp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='stp-type', is_config=True)""", }) self.__transmitted_stp_type = t if hasattr(self, '_set'): self._set()
Setter method for transmitted_stp_type, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_mst_detail/output/cist/port/transmitted_stp_type (stp-type) If this variable is read-only (config: false) in the source YANG file, then _set_transmitted_stp_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_transmitted_stp_type() directly. YANG Description: Transmitted (tx) stp type
def rlmb_ppo_quick(): """Base setting but quicker with only 2 epochs.""" hparams = rlmb_ppo_base() hparams.epochs = 2 hparams.model_train_steps = 25000 hparams.ppo_epochs_num = 700 hparams.ppo_epoch_length = 50 return hparams
Base setting but quicker with only 2 epochs.
def expect(self, use_proportions=True): """ The Expectation step of the CEM algorithm """ changed = self.get_changed(self.partition, self.prev_partition) lk_table = self.generate_lktable(self.partition, changed, use_proportions) self.table = self.likelihood_table_to_probs(lk_table)
The Expectation step of the CEM algorithm
def get_identities(self, item): ''' Return the identities from an item ''' item = item['data'] # Creators if 'event_hosts' in item: user = self.get_sh_identity(item['event_hosts'][0]) yield user # rsvps rsvps = item.get('rsvps', []) for rsvp in rsvps: user = self.get_sh_identity(rsvp['member']) yield user # Comments for comment in item['comments']: user = self.get_sh_identity(comment['member']) yield user
Return the identities from an item
def Chisholm_Armand(x, rhol, rhog): r'''Calculates void fraction in two-phase flow according to the model presented in [1]_ based on that of [2]_ as shown in [3]_, [4]_, and [5]_. .. math:: \alpha = \frac{\alpha_h}{\alpha_h + (1-\alpha_h)^{0.5}} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- Examples -------- >>> Chisholm_Armand(.4, 800, 2.5) 0.9357814394262114 References ---------- .. [1] Chisholm, Duncan. Two-Phase Flow in Pipelines and Heat Exchangers. Institution of Chemical Engineers, 1983. .. [2] Armand, Aleksandr Aleksandrovich. The Resistance During the Movement of a Two-Phase System in Horizontal Pipes. Atomic Energy Research Establishment, 1959. .. [3] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [4] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of Void Fraction Models on the Two-Phase Friction Factor of R134a during Condensation in Vertical Downward Flow in a Smooth Tube." International Communications in Heat and Mass Transfer 35, no. 8 (October 2008): 921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001. .. [5] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void Fraction Correlations for Different Flow Patterns in Horizontal and Upward Inclined Pipes." International Journal of Multiphase Flow 33, no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004. ''' alpha_h = homogeneous(x, rhol, rhog) return alpha_h/(alpha_h + (1-alpha_h)**0.5)
r'''Calculates void fraction in two-phase flow according to the model presented in [1]_ based on that of [2]_ as shown in [3]_, [4]_, and [5]_. .. math:: \alpha = \frac{\alpha_h}{\alpha_h + (1-\alpha_h)^{0.5}} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- Examples -------- >>> Chisholm_Armand(.4, 800, 2.5) 0.9357814394262114 References ---------- .. [1] Chisholm, Duncan. Two-Phase Flow in Pipelines and Heat Exchangers. Institution of Chemical Engineers, 1983. .. [2] Armand, Aleksandr Aleksandrovich. The Resistance During the Movement of a Two-Phase System in Horizontal Pipes. Atomic Energy Research Establishment, 1959. .. [3] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [4] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of Void Fraction Models on the Two-Phase Friction Factor of R134a during Condensation in Vertical Downward Flow in a Smooth Tube." International Communications in Heat and Mass Transfer 35, no. 8 (October 2008): 921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001. .. [5] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void Fraction Correlations for Different Flow Patterns in Horizontal and Upward Inclined Pipes." International Journal of Multiphase Flow 33, no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.
def analysis(self): """Get ANALYSIS segment of the FCS file.""" if self._analysis is None: with open(self.path, 'rb') as f: self.read_analysis(f) return self._analysis
Get ANALYSIS segment of the FCS file.
def gross_lev(positions): """ Calculates the gross leverage of a strategy. Parameters ---------- positions : pd.DataFrame Daily net position values. - See full explanation in tears.create_full_tear_sheet. Returns ------- pd.Series Gross leverage. """ exposure = positions.drop('cash', axis=1).abs().sum(axis=1) return exposure / positions.sum(axis=1)
Calculates the gross leverage of a strategy. Parameters ---------- positions : pd.DataFrame Daily net position values. - See full explanation in tears.create_full_tear_sheet. Returns ------- pd.Series Gross leverage.
def _get_connection(self, uri, headers=None): """Opens a socket connection to the server to set up an HTTP request. Args: uri: The full URL for the request as a Uri object. headers: A dict of string pairs containing the HTTP headers for the request. """ connection = None if uri.scheme == 'https': if not uri.port: connection = httplib.HTTPSConnection(uri.host) else: connection = httplib.HTTPSConnection(uri.host, int(uri.port)) else: if not uri.port: connection = httplib.HTTPConnection(uri.host) else: connection = httplib.HTTPConnection(uri.host, int(uri.port)) return connection
Opens a socket connection to the server to set up an HTTP request. Args: uri: The full URL for the request as a Uri object. headers: A dict of string pairs containing the HTTP headers for the request.
def zyz_circuit(t0: float, t1: float, t2: float, q0: Qubit) -> Circuit: """Circuit equivalent of 1-qubit ZYZ gate""" circ = Circuit() circ += TZ(t0, q0) circ += TY(t1, q0) circ += TZ(t2, q0) return circ
Circuit equivalent of 1-qubit ZYZ gate
def set_hr_widths(result): """ We want the hrs indented by hirarchy... A bit 2 much effort to calc, maybe just fixed with 10 style seps would have been enough visually: β—ˆβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β—ˆ """ # set all hrs to max width of text: mw = 0 hrs = [] if not hr_marker in result: return result for line in result.splitlines(): if hr_marker in line: hrs.append(line) continue if len(line) < mw: continue l = len(clean_ansi(line)) if l > mw: mw = l for hr in hrs: # pos of hr marker is indent, derives full width: # (more indent = less '-'): hcl = clean_ansi(hr) ind = len(hcl) - len(hcl.split(hr_marker, 1)[1]) - 1 w = min(term_columns, mw) - 2 * ind hrf = hr.replace(hr_marker, hr_sep * w) result = result.replace(hr, hrf) return result
We want the hrs indented by hirarchy... A bit 2 much effort to calc, maybe just fixed with 10 style seps would have been enough visually: β—ˆβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β—ˆ
def _timestamp(when): """ Python 2 compatibility for `datetime.timestamp()`. """ return (time.mktime(when.timetuple()) if sys.version_info < (3,) else when.timestamp())
Python 2 compatibility for `datetime.timestamp()`.
def convert(self, value, *args, **kwargs): # pylint: disable=arguments-differ """Take a path with $HOME variables and resolve it to full path.""" value = os.path.expanduser(value) return super(ExpandPath, self).convert(value, *args, **kwargs)
Take a path with $HOME variables and resolve it to full path.
def description(self): """string or None if unknown""" name = None try: name = self._TYPE_NAMES[self.audioObjectType] except IndexError: pass if name is None: return if self.sbrPresentFlag == 1: name += "+SBR" if self.psPresentFlag == 1: name += "+PS" return text_type(name)
string or None if unknown
def valid_input(val): """ Ensure the input the user gave is of a valid format """ # looks for 3 nums followed by a dot 3 times and then ending with # 3 nums, can be proceeded by any number of spaces ip_value = re.compile(r'(\d{1,3}\.){3}\d{1,3}$') # looks for only numbers and commas (because priorities can have commas # between them), can be proceeded by any number of spaces all_num = re.compile(r'(\d,?\ *)+$') sections_comments = re.compile(r""" \ *\#.* # comments (any number of whitespace, then # # followed by anything) | \[[\w-]+\]$ # section headers (any combination of chars, nums, # underscores, and dashes between brackets) """, re.VERBOSE) # can't can be a comment on option side and value side can't have # [, ], {, or } otherwise it is turned over to literal_eval for # checkout options_values = re.compile(r'[^# ]+\ *=[^[\]{}]*$') line_num = 0 warning_str = '' error_str = '' trimmed_val = [] for entry in val.split('\n'): line_num += 1 # get rid of any extraneous commas at the end of a dict and remove # extra whitespace from input trimmed_val.append(re.sub(r',\ *}', '}', entry).strip()) # empty line if entry.strip() == '': continue # look at regular (non dictionary or list) option-value pairs if options_values.match(entry): value = entry.split('=', 1)[1] # deal with potentially more equals signs for val in value.split('='): val = val.strip() # empty val means malformed equals signs if val == '': error_str += '-You have a misplaced equals sign on' \ ' line ' + str(line_num) + '\n' # starts with a num; look for bad ip input or warn user # about having extraneous characters in number input if re.match('\ *\d', val): # bad ip syntax if val.find('.') >= 0 and not ip_value.match(val): error_str += '-You have an incorrectly' \ ' formatted ip address (bad syntax) at' \ ' line ' + str(line_num) + '\n' # possibly malformed numbers elif val.find('.') < 0 and not all_num.match(val): warning_str += '-Line starting with a number has' \ ' characters mixed in at line ' + \ str(line_num) + '\n' # bad ip values elif val.find('.') >= 0: for num in val.strip().split('.'): num = int(num) if num > 255 or num < 0: error_str += '-You have an incorrectly' \ ' formatted ip address (values' \ ' exceeding 255 or below 0) at' \ ' line ' + str(line_num) + '\n' # ensure no lines end with a comma (most likely extraneous # commas from groups or priorities) if re.search(',$', val): error_str += '-You have an incorrect comma at the' \ ' end of line ' + str(line_num) + '\n' # see if input is a header or comment, otherwise try to # literal_eval it to ensure correct structure elif not sections_comments.match(entry): lit_val = '' try: opt_val = entry.split('=', 1) if opt_val[0].strip() == '': error_str += '-You have nothing preceeding an' \ ' equals sign at line ' + str(line_num) + '\n' else: lit_val = opt_val[1].strip() except IndexError: lit_val = '' error_str += '-You have an incorrectly formatted' \ ' section header at line ' + str(line_num) + '\n' if lit_val: try: ast.literal_eval(lit_val) except SyntaxError: error_str += '-You have an incorrectly formatted' \ ' list/dictionary at line ' + str(line_num) + \ '\n' if error_str: npyscreen.notify_confirm('You have the following error(s) and' " can't proceed until they are fixed:" + '\n' + '-'*50 + '\n' + error_str, title='Error in input') return (False, '') elif warning_str: res = npyscreen.notify_yes_no('You have may have some error(s)' ' that you want to check before' ' proceeding:' + '\n' + '-'*50 + '\n' + warning_str + '\n' + '-'*50 + '\n' + 'Do you want to continue?', title='Double check') return (res, '\n'.join(trimmed_val)) return (True, '\n'.join(trimmed_val))
Ensure the input the user gave is of a valid format
def remove_allocated_node_name(self, name): """ Removes an allocated node name :param name: allocated node name """ if name in self._allocated_node_names: self._allocated_node_names.remove(name)
Removes an allocated node name :param name: allocated node name
def retrieve(self, request, *args, **kwargs): """ User fields can be updated by account owner or user with staff privilege (is_staff=True). Following user fields can be updated: - organization (deprecated, use `organization plugin <http://waldur_core-organization.readthedocs.org/en/stable/>`_ instead) - full_name - native_name - job_title - phone_number - email Can be done by **PUT**ing a new data to the user URI, i.e. */api/users/<UUID>/* by staff user or account owner. Valid request example (token is user specific): .. code-block:: http PUT /api/users/e0c058d06864441fb4f1c40dee5dd4fd/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "email": "example@example.com", "organization": "Bells organization", } """ return super(UserViewSet, self).retrieve(request, *args, **kwargs)
User fields can be updated by account owner or user with staff privilege (is_staff=True). Following user fields can be updated: - organization (deprecated, use `organization plugin <http://waldur_core-organization.readthedocs.org/en/stable/>`_ instead) - full_name - native_name - job_title - phone_number - email Can be done by **PUT**ing a new data to the user URI, i.e. */api/users/<UUID>/* by staff user or account owner. Valid request example (token is user specific): .. code-block:: http PUT /api/users/e0c058d06864441fb4f1c40dee5dd4fd/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "email": "example@example.com", "organization": "Bells organization", }
def check_qt(): """Check Qt binding requirements""" qt_infos = dict(pyqt5=("PyQt5", "5.6")) try: import qtpy package_name, required_ver = qt_infos[qtpy.API] actual_ver = qtpy.PYQT_VERSION if LooseVersion(actual_ver) < LooseVersion(required_ver): show_warning("Please check Spyder installation requirements:\n" "%s %s+ is required (found v%s)." % (package_name, required_ver, actual_ver)) except ImportError: show_warning("Failed to import qtpy.\n" "Please check Spyder installation requirements:\n\n" "qtpy 1.2.0+ and\n" "%s %s+\n\n" "are required to run Spyder." % (qt_infos['pyqt5']))
Check Qt binding requirements
def sign_extend(self, new_length): """ Unary operation: SignExtend :param new_length: New length after sign-extension :return: A new StridedInterval """ msb = self.extract(self.bits - 1, self.bits - 1).eval(2) if msb == [ 0 ]: # All positive numbers return self.zero_extend(new_length) if msb == [ 1 ]: # All negative numbers si = self.copy() si._bits = new_length mask = (2 ** new_length - 1) - (2 ** self.bits - 1) si._lower_bound |= mask si._upper_bound |= mask else: # Both positive numbers and negative numbers numbers = self._nsplit() # Since there are both positive and negative numbers, there must be two bounds after nsplit # assert len(numbers) == 2 all_resulting_intervals = list() assert len(numbers) > 0 for n in numbers: a, b = n.lower_bound, n.upper_bound mask_a = 0 mask_b = 0 mask_n = ((1 << (new_length - n.bits)) - 1) << n.bits if StridedInterval._get_msb(a, n.bits) == 1: mask_a = mask_n if StridedInterval._get_msb(b, n.bits) == 1: mask_b = mask_n si_ = StridedInterval(bits=new_length, stride=n.stride, lower_bound=a | mask_a, upper_bound=b | mask_b) all_resulting_intervals.append(si_) si = StridedInterval.least_upper_bound(*all_resulting_intervals).normalize() si.uninitialized = self.uninitialized return si
Unary operation: SignExtend :param new_length: New length after sign-extension :return: A new StridedInterval
def keep_folder(raw_path): """ Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`. """ keep = True for pattern in DIR_EXCLUDE_PATTERNS: if pattern in raw_path: LOGGER.debug('rejecting', raw_path) keep = False return keep
Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`.
def eval_permission(self, token, resource, scope, submit_request=False): """ Evalutes if user has permission for scope on resource. :param str token: client access token :param str resource: resource to access :param str scope: scope on resource :param boolean submit_request: submit request if not allowed to access? rtype: boolean """ return self.eval_permissions( token=token, resource_scopes_tuples=[(resource, scope)], submit_request=submit_request )
Evalutes if user has permission for scope on resource. :param str token: client access token :param str resource: resource to access :param str scope: scope on resource :param boolean submit_request: submit request if not allowed to access? rtype: boolean
def paths(self): """Iterates through all files in the set""" if self.format is None: raise ArcanaFileFormatError( "Cannot get paths of fileset ({}) that hasn't had its format " "set".format(self)) if self.format.directory: return chain(*((op.join(root, f) for f in files) for root, _, files in os.walk(self.path))) else: return chain([self.path], self.aux_files.values())
Iterates through all files in the set
def log(self, message, severity=INFO, tag=u""): """ Add a given message to the log, and return its time. :param string message: the message to be added :param severity: the severity of the message :type severity: :class:`~aeneas.logger.Logger` :param string tag: the tag associated with the message; usually, the name of the class generating the entry :rtype: datetime """ entry = _LogEntry( severity=severity, time=datetime.datetime.now(), tag=tag, indentation=self.indentation, message=self._sanitize(message) ) self.entries.append(entry) if self.tee: gf.safe_print(entry.pretty_print(show_datetime=self.tee_show_datetime)) return entry.time
Add a given message to the log, and return its time. :param string message: the message to be added :param severity: the severity of the message :type severity: :class:`~aeneas.logger.Logger` :param string tag: the tag associated with the message; usually, the name of the class generating the entry :rtype: datetime
def parse(args): """\ Parses the arguments and returns the result. """ parser = make_parser() if not len(args): parser.print_help() sys.exit(1) parsed_args = parser.parse_args(args) if parsed_args.error == '-': parsed_args.error = None # 'micro' is False by default. If version is set to a Micro QR Code version, # encoder.encode raises a VersionError. # Small problem: --version=M4 --no-micro is allowed version = parsed_args.version if version is not None: version = str(version).upper() if not parsed_args.micro and version in ('M1', 'M2', 'M3', 'M4'): parsed_args.micro = None return _AttrDict(vars(parsed_args))
\ Parses the arguments and returns the result.
def filter_queryset(self, request, queryset, view): """ Filter out any artifacts which the requesting user does not have permission to view. """ if request.user.is_superuser: return queryset return queryset.filter(status__user=request.user)
Filter out any artifacts which the requesting user does not have permission to view.
def remove_stale_javascripts(portal): """Removes stale javascripts """ logger.info("Removing stale javascripts ...") for js in JAVASCRIPTS_TO_REMOVE: logger.info("Unregistering JS %s" % js) portal.portal_javascripts.unregisterResource(js)
Removes stale javascripts
def visit_NameConstant(self, node: AST, dfltChaining: bool = True) -> str: """Return `node`s name as string.""" return str(node.value)
Return `node`s name as string.
def plot_magnitude_time_scatter( catalogue, plot_error=False, fmt_string='o', filename=None, figure_size=(8, 6), filetype='png', dpi=300, ax=None): """ Creates a simple scatter plot of magnitude with time :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param bool plot_error: Choose to plot error bars (True) or not (False) :param str fmt_string: Symbology of plot """ if ax is None: fig, ax = plt.subplots(figsize=figure_size) else: fig = ax.get_figure() dtime = catalogue.get_decimal_time() # pylint: disable=len-as-condition if len(catalogue.data['sigmaMagnitude']) == 0: print('Magnitude Error is missing - neglecting error bars!') plot_error = False if plot_error: ax.errorbar(dtime, catalogue.data['magnitude'], xerr=None, yerr=catalogue.data['sigmaMagnitude'], fmt=fmt_string) else: ax.plot(dtime, catalogue.data['magnitude'], fmt_string) ax.set_xlabel('Year') ax.set_ylabel('Magnitude') ax.set_title('Magnitude-Time Plot') _save_image(fig, filename, filetype, dpi)
Creates a simple scatter plot of magnitude with time :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param bool plot_error: Choose to plot error bars (True) or not (False) :param str fmt_string: Symbology of plot
def createDbusProxyObject(bus_name, object_path, bus=None): ''' Create dbus proxy object ''' bus = bus or dbus.SessionBus.get_session() return bus.get_object(bus_name, object_path)
Create dbus proxy object
def data(self, index, role): """use zipped icon.png as icon""" if index.column() == 0 and role == QtCore.Qt.DecorationRole: if self.isPyz(index): with ZipFile(str(self.filePath(index)), 'r') as myzip: # print myzip.namelist() try: myzip.extract('icon', self._tmp_dir_work) p = os.path.join(self._tmp_dir_work, 'icon') return QtGui.QIcon(p) except KeyError: pass return super(_FileSystemModel, self).data(index, role)
use zipped icon.png as icon
def std_ratio(sim=None, obs=None, node=None, skip_nan=False): """Calculate the ratio between the standard deviation of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import std_ratio >>> round_(std_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(std_ratio(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(std_ratio(sim=[0.0, 3.0, 6.0], obs=[1.0, 2.0, 3.0])) 2.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |std_ratio|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return numpy.std(sim)/numpy.std(obs)-1.
Calculate the ratio between the standard deviation of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import std_ratio >>> round_(std_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(std_ratio(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(std_ratio(sim=[0.0, 3.0, 6.0], obs=[1.0, 2.0, 3.0])) 2.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |std_ratio|.
def remove_file(path, conn=None): ''' Remove a single file from the file system ''' if conn is None: conn = init() log.debug('Removing package file %s', path) os.remove(path)
Remove a single file from the file system
def zero_extend(self, duration_s=None, num_samples=None): """ Adds a number of zeros (digital silence) to the AudioSegment (returning a new one). :param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None. :param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None. :returns: A new AudioSegment object that has been zero extended. :raises: ValueError if duration_s and num_samples are both specified. """ if duration_s is not None and num_samples is not None: raise ValueError("`duration_s` and `num_samples` cannot both be specified.") elif duration_s is not None: num_samples = self.frame_rate * duration_s seg = AudioSegment(self.seg, self.name) zeros = silent(duration=num_samples / self.frame_rate, frame_rate=self.frame_rate) return zeros.overlay(seg)
Adds a number of zeros (digital silence) to the AudioSegment (returning a new one). :param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None. :param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None. :returns: A new AudioSegment object that has been zero extended. :raises: ValueError if duration_s and num_samples are both specified.
def _parse(data: str) -> list: """ Parses the given data string and returns a list of rule objects. """ if isinstance(data, bytes): data = data.decode('utf-8') lines = ( item for item in (item.strip() for item in data.split('\n')) if len(item) and not item.startswith('#') ) rules = [] for line in lines: rules.append( Rule.parse(line) ) return rules
Parses the given data string and returns a list of rule objects.
def log_verbose(self, message): """ Logs a message only when logging level is verbose. :param str|list[str] message: The message. """ if self.get_verbosity() >= Output.VERBOSITY_VERBOSE: self.writeln(message)
Logs a message only when logging level is verbose. :param str|list[str] message: The message.
def update_conversation(self, conversation): """Update the internal state of the conversation. This method is used by :class:`.ConversationList` to maintain this instance. Args: conversation: ``Conversation`` message. """ # StateUpdate.conversation is actually a delta; fields that aren't # specified are assumed to be unchanged. Until this class is # refactored, hide this by saving and restoring previous values where # necessary. new_state = conversation.self_conversation_state old_state = self._conversation.self_conversation_state self._conversation = conversation # delivery_medium_option if not new_state.delivery_medium_option: new_state.delivery_medium_option.extend( old_state.delivery_medium_option ) # latest_read_timestamp old_timestamp = old_state.self_read_state.latest_read_timestamp new_timestamp = new_state.self_read_state.latest_read_timestamp if new_timestamp == 0: new_state.self_read_state.latest_read_timestamp = old_timestamp # user_read_state(s) for new_entry in conversation.read_state: tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp) if tstamp == 0: continue uid = parsers.from_participantid(new_entry.participant_id) if uid not in self._watermarks or self._watermarks[uid] < tstamp: self._watermarks[uid] = tstamp
Update the internal state of the conversation. This method is used by :class:`.ConversationList` to maintain this instance. Args: conversation: ``Conversation`` message.
def load_stylesheet(self, id, path): """ Proper way to dynamically inject a stylesheet in a page. :param path: Path of the stylesheet to inject. """ self.add_child(HeadLink(id=id, link_type="stylesheet", path=path))
Proper way to dynamically inject a stylesheet in a page. :param path: Path of the stylesheet to inject.
def send_file_external(self, url_json, chunk): """ Send chunk to external store specified in url_json. Raises ValueError on upload failure. :param data_service: data service to use for sending chunk :param url_json: dict contains where/how to upload chunk :param chunk: data to be uploaded """ http_verb = url_json['http_verb'] host = url_json['host'] url = url_json['url'] http_headers = url_json['http_headers'] resp = self._send_file_external_with_retry(http_verb, host, url, http_headers, chunk) if resp.status_code != 200 and resp.status_code != 201: raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code) + host + url)
Send chunk to external store specified in url_json. Raises ValueError on upload failure. :param data_service: data service to use for sending chunk :param url_json: dict contains where/how to upload chunk :param chunk: data to be uploaded
def normalize(l): """ Normalizes input list. Parameters ---------- l: list The list to be normalized Returns ------- The normalized list or numpy array Raises ------ ValueError, if the list sums to zero """ s = float(sum(l)) if s == 0: raise ValueError("Cannot normalize list with sum 0") return [x / s for x in l]
Normalizes input list. Parameters ---------- l: list The list to be normalized Returns ------- The normalized list or numpy array Raises ------ ValueError, if the list sums to zero
def load_raw(args): """ Read the actual file *as is* without parsing/modifiying it so that it can be written maintaining its same properties. :param args: Will be used to infer the proper configuration name :paran path: alternatively, use a path for any configuration file loading """ path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster) try: with open(path) as ceph_conf: return ceph_conf.read() except (IOError, OSError) as e: raise exc.ConfigError( "%s; has `ceph-deploy new` been run in this directory?" % e )
Read the actual file *as is* without parsing/modifiying it so that it can be written maintaining its same properties. :param args: Will be used to infer the proper configuration name :paran path: alternatively, use a path for any configuration file loading
def _maybe_download_corpus(tmp_dir, vocab_type): """Download and unpack the corpus. Args: tmp_dir: directory containing dataset. vocab_type: which vocabulary are we using. Returns: The list of names of files. """ filename = os.path.basename(PTB_URL) compressed_filepath = generator_utils.maybe_download( tmp_dir, filename, PTB_URL) ptb_files = [] ptb_char_files = [] with tarfile.open(compressed_filepath, "r:gz") as tgz: files = [] # Selecting only relevant files. for m in tgz.getmembers(): if "ptb" in m.name and ".txt" in m.name: if "char" in m.name: ptb_char_files += [m.name] else: ptb_files += [m.name] files += [m] tgz.extractall(tmp_dir, members=files) if vocab_type == text_problems.VocabType.CHARACTER: return ptb_char_files else: return ptb_files
Download and unpack the corpus. Args: tmp_dir: directory containing dataset. vocab_type: which vocabulary are we using. Returns: The list of names of files.
def p_expr_LT_expr(p): """ expr : expr LT expr """ p[0] = make_binary(p.lineno(2), 'LT', p[1], p[3], lambda x, y: x < y)
expr : expr LT expr
def get_html(grafs): """ Renders the grafs provided in HTML by wrapping them in <p> tags. Linebreaks are replaced with <br> tags. """ html = [format_html('<p>{}</p>', p) for p in grafs] html = [p.replace("\n", "<br>") for p in html] return format_html(six.text_type('\n\n'.join(html)))
Renders the grafs provided in HTML by wrapping them in <p> tags. Linebreaks are replaced with <br> tags.
def insertDatasetWOannex(self, dataset, blockcontent, otptIdList, conn, insertDataset = True, migration = False): """ _insertDatasetOnly_ Insert the dataset and only the dataset Meant to be called after everything else is put into place. The insertDataset flag is set to false if the dataset already exists """ tran = conn.begin() try: #8 Finally, we have everything to insert a dataset if insertDataset: # Then we have to get a new dataset ID dataset['dataset_id'] = self.datasetid.execute(conn, dataset['dataset']) if dataset['dataset_id'] <= 0: dataset['dataset_id'] = self.sm.increment(conn, "SEQ_DS") if not migration: dataset['last_modified_by'] = dbsUtils().getCreateBy() dataset['create_by'] = dbsUtils().getCreateBy() dataset['creation_date'] = dataset.get('creation_date', dbsUtils().getTime()) dataset['last_modification_date'] = dataset.get('last_modification_date', dbsUtils().getTime()) dataset['xtcrosssection'] = dataset.get('xtcrosssection', None) dataset['prep_id'] = dataset.get('prep_id', None) try: self.datasetin.execute(conn, dataset, tran) except exceptions.IntegrityError as ei: if str(ei).find("ORA-00001") != -1 or str(ei).lower().find("duplicate") !=-1: if conn.closed: conn = self.dbi.connection() dataset['dataset_id'] = self.datasetid.execute(conn, dataset['dataset']) if dataset['dataset_id'] <= 0: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-conflict-data', 'Dataset/[processed DS]/[dataset access type] not yet inserted by concurrent insert. ', self.logger.exception, 'Dataset/[processed DS]/[dataset access type] not yet inserted by concurrent insert. '+ str(ei)) elif str(ei).find("ORA-01400") > -1: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-missing-data', 'Missing data when insert Datasets. ', self.logger.exception, 'Missing data when insert Datasets. '+ str(ei)) else: if tran: tran.rollback() if conn: conn.close() dbsExceptionHandler('dbsException-invalid-input2', 'Invalid data when insert Datasets. ', self.logger.exception, 'Invalid data when insert Datasets. '+ str(ei)) except Exception: #should catch all above exception to rollback. YG Jan 17, 2013 if tran:tran.rollback() if conn:conn.close() raise #9 Fill Dataset Parentage #All parentage are deduced from file parentage. #10 Before we commit, make dataset and output module configuration #mapping. We have to try to fill the map even if dataset is #already in dest db for c in otptIdList: try: dcObj = { 'dataset_id' : dataset['dataset_id'], 'output_mod_config_id' : c } self.dcin.execute(conn, dcObj, tran) except exceptions.IntegrityError as ei: #FIXME YG 01/17/2013 if (str(ei).find("ORA-00001") != -1 and str(ei).find("TUC_DC_1") != -1) or \ str(ei).lower().find("duplicate")!=-1: #ok, already in db #FIXME: What happens when there are partially in db? #YG 11/17/2010 pass else: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-invalid-input2', 'Invalid data when insert dataset_configs. ', self.logger.exception, 'Invalid data when insert dataset_configs. '+ str(ei)) except Exception as ex: if tran:tran.rollback() if conn:conn.close() raise #Now commit everything. tran.commit() except exceptions.IntegrityError as ei: # Then is it already in the database? # Not really. We have to check it again. YG Jan 17, 2013 # we don't check the unique key here, since there are more than one unique key might # be violated: such as data_tier, processed_dataset, dataset_access_types. if str(ei).find("ORA-00001") != -1 or str(ei).lower().find("duplicate")!=-1: # For now, we assume most cases are the same dataset was instered by different thread. If not, # one has to call the insert dataset again. But we think this is a rare case and let the second # DBSBlockInsert call fix it if it happens. if conn.closed: conn = self.dbi.connection() dataset_id = self.datasetid.execute(conn, dataset['dataset']) if dataset_id <= 0: dbsExceptionHandler('dbsException-conflict-data', 'Dataset not yet inserted by concurrent insert', self.logger.exception, 'Dataset not yet inserted by concurrent insert') else: dataset['dataset_id'] = dataset_id else: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-invalid-input2', 'Invalid data when insert Datasets. ', self.logger.exception, 'Invalid data when insert Datasets. '+ str(ei)) except Exception as ex: if tran:tran.rollback() if conn:conn.close() raise finally: if tran:tran.rollback() if conn:conn.close() return dataset['dataset_id']
_insertDatasetOnly_ Insert the dataset and only the dataset Meant to be called after everything else is put into place. The insertDataset flag is set to false if the dataset already exists
def serialize(exc): """ Serialize `self.exc` into a data dictionary representing it. """ return { 'exc_type': type(exc).__name__, 'exc_path': get_module_path(type(exc)), 'exc_args': list(map(safe_for_serialization, exc.args)), 'value': safe_for_serialization(exc), }
Serialize `self.exc` into a data dictionary representing it.
def delete_record(self, instance): """Deletes the record.""" adapter = self.get_adapter_from_instance(instance) adapter.delete_record(instance)
Deletes the record.
def perpendicular_vector(n): """ Get a random vector perpendicular to the given vector """ dim = len(n) if dim == 2: return n[::-1] # More complex in 3d for ix in range(dim): _ = N.zeros(dim) # Try to keep axes near the global projection # by finding vectors perpendicular to higher- # index axes first. This may or may not be worth # doing. _[dim-ix-1] = 1 v1 = N.cross(n,_) if N.linalg.norm(v1) != 0: return v1 raise ValueError("Cannot find perpendicular vector")
Get a random vector perpendicular to the given vector
def get_utm_zone(longitude): """Return utm zone.""" zone = int((math.floor((longitude + 180.0) / 6.0) + 1) % 60) if zone == 0: zone = 60 return zone
Return utm zone.
def delete(self): """ Delete the cloud from the list of added clouds in mist.io service. :returns: A list of mist.clients' updated clouds. """ req = self.request(self.mist_client.uri + '/clouds/' + self.id) req.delete() self.mist_client.update_clouds()
Delete the cloud from the list of added clouds in mist.io service. :returns: A list of mist.clients' updated clouds.
def _grid_distance(self, index): """ Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence). """ # Take every dimension but the first in reverse # then reverse that list again. dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1] coord = [] for idx, dim in enumerate(dimensions): if idx != 0: value = (index % dimensions[idx-1]) // dim else: value = index // dim coord.append(value) coord.append(index % self.map_dimensions[-1]) for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)): x = np.abs(np.arange(width) - row) ** 2 dims = self.map_dimensions[::-1] if idx: dims = dims[:-idx] x = np.broadcast_to(x, dims).T if idx == 0: distance = np.copy(x) else: distance += x.T return distance
Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence).
def save(self, path=None, filter_name=None): """ Saves this document to a local file system. The optional first argument defaults to the document's path. Accept optional second argument which defines type of the saved file. Use one of FILTER_* constants or see list of available filters at http://wakka.net/archives/7 or http://www.oooforum.org/forum/viewtopic.phtml?t=71294. """ if path is None: try: self._target.store() except _IOException as e: raise IOError(e.Message) return # UNO requires absolute paths url = uno.systemPathToFileUrl(os.path.abspath(path)) if filter_name: format_filter = uno.createUnoStruct('com.sun.star.beans.PropertyValue') format_filter.Name = 'FilterName' format_filter.Value = filter_name filters = (format_filter,) else: filters = () # http://www.openoffice.org/api/docs/common/ref/com/sun/star/frame/XStorable.html#storeToURL try: self._target.storeToURL(url, filters) except _IOException as e: raise IOError(e.Message)
Saves this document to a local file system. The optional first argument defaults to the document's path. Accept optional second argument which defines type of the saved file. Use one of FILTER_* constants or see list of available filters at http://wakka.net/archives/7 or http://www.oooforum.org/forum/viewtopic.phtml?t=71294.
def isMember(userid, password, group): """Test to see if the given userid/password combo is an authenticated member of group. userid: CADC Username (str) password: CADC Password (str) group: CADC GMS group (str) """ try: certfile = getCert(userid, password) group_url = getGroupsURL(certfile, group) logging.debug("group url: %s" % ( group_url)) con = httplib.HTTPSConnection(_SERVER, 443, key_file=certfile.name, cert_file=certfile.name, timeout=600) con.connect() con.request("GET", group_url) resp = con.getresponse() if resp.status == 200: return True except Exception as e: logging.error(str(e)) #logging.debug(str(resp.status)) return False
Test to see if the given userid/password combo is an authenticated member of group. userid: CADC Username (str) password: CADC Password (str) group: CADC GMS group (str)
def sample_stats_prior_to_xarray(self): """Extract sample_stats_prior from prior.""" prior = self.prior data = get_sample_stats(prior) return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
Extract sample_stats_prior from prior.
def parse_favorites(self, favorites_page): """Parses the DOM and returns character favorites attributes. :type favorites_page: :class:`bs4.BeautifulSoup` :param favorites_page: MAL character favorites page's DOM :rtype: dict :return: Character favorites attributes. """ character_info = self.parse_sidebar(favorites_page) second_col = favorites_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: character_info[u'favorites'] = [] favorite_links = second_col.find_all('a', recursive=False) for link in favorite_links: # of the form /profile/shaldengeki character_info[u'favorites'].append(self.session.user(username=link.text)) except: if not self.session.suppress_parse_exceptions: raise return character_info
Parses the DOM and returns character favorites attributes. :type favorites_page: :class:`bs4.BeautifulSoup` :param favorites_page: MAL character favorites page's DOM :rtype: dict :return: Character favorites attributes.
def send(self, sender: PytgbotApiBot): """ Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage """ return sender.send_game( # receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id game_short_name=self.game_short_name, chat_id=self.receiver, reply_to_message_id=self.reply_id, disable_notification=self.disable_notification, reply_markup=self.reply_markup )
Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage
def literal(node): """ Inline code """ rendered = [] try: if node.info is not None: l = Lexer(node.literal, node.info, tokennames="long") for _ in l: rendered.append(node.inline(classes=_[0], text=_[1])) except: pass classes = ['code'] if node.info is not None: classes.append(node.info) if len(rendered) > 0: o = nodes.literal(classes=classes) for element in rendered: o += element else: o = nodes.literal(text=node.literal, classes=classes) for n in MarkDown(node): o += n return o
Inline code
def create_dampening(self, trigger_id, dampening): """ Create a new dampening. :param trigger_id: TriggerId definition attached to the dampening :param dampening: Dampening definition to be created. :type dampening: Dampening :return: Created dampening """ data = self._serialize_object(dampening) url = self._service_url(['triggers', trigger_id, 'dampenings']) return Dampening(self._post(url, data))
Create a new dampening. :param trigger_id: TriggerId definition attached to the dampening :param dampening: Dampening definition to be created. :type dampening: Dampening :return: Created dampening
def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == "before": raise NotImplementedError("offset before transform") for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup vertices, pathcodes = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == "figure": path_coordinates = "points" style = {"edgecolor": utils.color_to_hex(ec), "facecolor": utils.color_to_hex(fc), "edgewidth": lw, "dasharray": "10,0", "alpha": styles['alpha'], "zorder": styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj)
Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection
def boolean(value): """Parse the string ``"true"`` or ``"false"`` as a boolean (case insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False`` (respectively). If the input is from the request JSON body, the type is already a native python boolean, and will be passed through without further parsing. """ if isinstance(value, bool): return value if not value: raise ValueError("boolean type must be non-null") value = value.lower() if value in ('true', '1',): return True if value in ('false', '0',): return False raise ValueError("Invalid literal for boolean(): {0}".format(value))
Parse the string ``"true"`` or ``"false"`` as a boolean (case insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False`` (respectively). If the input is from the request JSON body, the type is already a native python boolean, and will be passed through without further parsing.
def parse(str_, lsep=",", avsep=":", vssep=",", avssep=";"): """Generic parser""" if avsep in str_: return parse_attrlist(str_, avsep, vssep, avssep) if lsep in str_: return parse_list(str_, lsep) return parse_single(str_)
Generic parser
def extension_context(extension_name='cpu', **kw): """Get the context of the specified extension. All extension's module must provide `context(**kw)` function. Args: extension_name (str) : Module path relative to `nnabla_ext`. kw (dict) : Additional keyword arguments for context function in a extension module. Returns: :class:`nnabla.Context`: The current extension context. Note: Deprecated. Use :function:`nnabla.ext_utils.get_extension_context` instead. Example: .. code-block:: python ctx = extension_context('cuda.cudnn', device_id=0) nn.set_default_context(ctx) """ from nnabla import logger logger.warn( 'Deprecated API. Use `nnabla.ext_util.get_extension_context(ext_name, **kw)`.') from nnabla.ext_utils import get_extension_context return get_extension_context(extension_name, **kw)
Get the context of the specified extension. All extension's module must provide `context(**kw)` function. Args: extension_name (str) : Module path relative to `nnabla_ext`. kw (dict) : Additional keyword arguments for context function in a extension module. Returns: :class:`nnabla.Context`: The current extension context. Note: Deprecated. Use :function:`nnabla.ext_utils.get_extension_context` instead. Example: .. code-block:: python ctx = extension_context('cuda.cudnn', device_id=0) nn.set_default_context(ctx)
def qc_to_rec(samples): """CWL: Convert a set of input samples into records for parallelization. """ samples = [utils.to_single_data(x) for x in samples] samples = cwlutils.assign_complex_to_samples(samples) to_analyze, extras = _split_samples_by_qc(samples) recs = cwlutils.samples_to_records([utils.to_single_data(x) for x in to_analyze + extras]) return [[x] for x in recs]
CWL: Convert a set of input samples into records for parallelization.
def url(self, host): """Generate url for coap client.""" path = '/'.join(str(v) for v in self._path) return 'coaps://{}:5684/{}'.format(host, path)
Generate url for coap client.
def activate_output(self, universe: int) -> None: """ Activates a universe that's then starting to sending every second. See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information :param universe: the universe to activate """ check_universe(universe) # check, if the universe already exists in the list: if universe in self._outputs: return # add new sending: new_output = Output(DataPacket(cid=self.__CID, sourceName=self.source_name, universe=universe)) self._outputs[universe] = new_output
Activates a universe that's then starting to sending every second. See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information :param universe: the universe to activate
def clear(self, database, callback=None): """ Wipe the given database. This only affects items inserted remotely; items inserted on the watch (e.g. alarm clock timeline pins) are not removed. :param database: The database to wipe. :type database: .BlobDatabaseID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=ClearCommand()), callback))
Wipe the given database. This only affects items inserted remotely; items inserted on the watch (e.g. alarm clock timeline pins) are not removed. :param database: The database to wipe. :type database: .BlobDatabaseID :param callback: A callback to be called on success or failure.
def _download_article(self, article_number, max_retries=10): """Download a given article. :type article_number: str :param article_number: the article number to download. :type group: str :param group: the group that contains the article to be downloaded. :returns: nntplib article response object if successful, else False. """ log.debug('downloading article {0} from {1}'.format(article_number, self.name)) _connection = self.session.connections.get() try: i = 0 while True: if i >= max_retries: return False try: _connection.group(self.name) resp = _connection.article(article_number) log.debug('downloaded article {0} from {1}'.format(article_number, self.name)) return resp # Connection closed, transient error, retry forever. except EOFError: log.warning('EOFError, refreshing connection retrying -- ' 'article={0}, group={1}'.format(article_number, self.name)) self.session.refresh_connection(_connection) time.sleep(2) _connection = self.session.connections.get() # NNTP Error. except nntplib.NNTPError as exc: log.warning('NNTPError: {0} -- article={1}, ' 'group={2}'.format(exc, article_number, self.name)) if any(s in exc.response for s in ['430', '423']): # Don't retry, article probably doesn't exist. i = max_retries else: i += 1 except: self.session.refresh_connection(_connection) time.sleep(2) _connection = self.session.connections.get() # Always return connection back to the pool! finally: self.session.connections.put(_connection)
Download a given article. :type article_number: str :param article_number: the article number to download. :type group: str :param group: the group that contains the article to be downloaded. :returns: nntplib article response object if successful, else False.
def get_signing_key(self, key_type="", owner="", kid=None, **kwargs): """ Shortcut to use for signing keys only. :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :param kwargs: Extra key word arguments :return: A possibly empty list of keys """ return self.get("sig", key_type, owner, kid, **kwargs)
Shortcut to use for signing keys only. :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :param kwargs: Extra key word arguments :return: A possibly empty list of keys
def running_covar(xx=True, xy=False, yy=False, remove_mean=False, symmetrize=False, sparse_mode='auto', modify_data=False, column_selection=None, diag_only=False, nsave=5): """ Returns a running covariance estimator Returns an estimator object that can be fed chunks of X and Y data, and that can generate on-the-fly estimates of mean, covariance, running sum and second moment matrix. Parameters ---------- xx : bool Estimate the covariance of X xy : bool Estimate the cross-covariance of X and Y yy : bool Estimate the covariance of Y remove_mean : bool Remove the data mean in the covariance estimation symmetrize : bool Use symmetric estimates with sum defined by sum_t x_t + y_t and second moment matrices defined by X'X + Y'Y and Y'X + X'Y. modify_data : bool If remove_mean=True, the mean will be removed in the input data, without creating an independent copy. This option is faster but should only be selected if the input data is not used elsewhere. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. nsave : int Depth of Moment storage. Moments computed from each chunk will be combined with Moments of similar statistical weight using the pairwise combination algorithm described in [1]_. References ---------- .. [1] http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf """ return RunningCovar(compute_XX=xx, compute_XY=xy, compute_YY=yy, sparse_mode=sparse_mode, modify_data=modify_data, remove_mean=remove_mean, symmetrize=symmetrize, column_selection=column_selection, diag_only=diag_only, nsave=nsave)
Returns a running covariance estimator Returns an estimator object that can be fed chunks of X and Y data, and that can generate on-the-fly estimates of mean, covariance, running sum and second moment matrix. Parameters ---------- xx : bool Estimate the covariance of X xy : bool Estimate the cross-covariance of X and Y yy : bool Estimate the covariance of Y remove_mean : bool Remove the data mean in the covariance estimation symmetrize : bool Use symmetric estimates with sum defined by sum_t x_t + y_t and second moment matrices defined by X'X + Y'Y and Y'X + X'Y. modify_data : bool If remove_mean=True, the mean will be removed in the input data, without creating an independent copy. This option is faster but should only be selected if the input data is not used elsewhere. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. nsave : int Depth of Moment storage. Moments computed from each chunk will be combined with Moments of similar statistical weight using the pairwise combination algorithm described in [1]_. References ---------- .. [1] http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
def stop_notifications(self): """Stop the notifications thread. :returns: """ with self._notifications_lock: if not self.has_active_notification_thread: return thread = self._notifications_thread self._notifications_thread = None stopping = thread.stop() api = self._get_api(mds.NotificationsApi) api.delete_long_poll_channel() return stopping.wait()
Stop the notifications thread. :returns:
def __is_current(filepath): '''Checks whether file is current''' if not __DOWNLOAD_PARAMS['auto_update']: return True if not os.path.isfile(filepath): return False return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)) \ > __get_last_update_time()
Checks whether file is current
def convert_json_node(self, json_input): """ Dispatch JSON input according to the outermost type and process it to generate the super awesome HTML format. We try to adhere to duck typing such that users can just pass all kinds of funky objects to json2html that *behave* like dicts and lists and other basic JSON types. """ if type(json_input) in text_types: if self.escape: return cgi.escape(text(json_input)) else: return text(json_input) if hasattr(json_input, 'items'): return self.convert_object(json_input) if hasattr(json_input, '__iter__') and hasattr(json_input, '__getitem__'): return self.convert_list(json_input) return text(json_input)
Dispatch JSON input according to the outermost type and process it to generate the super awesome HTML format. We try to adhere to duck typing such that users can just pass all kinds of funky objects to json2html that *behave* like dicts and lists and other basic JSON types.
def initialize_connection(self): # noqa: E501 pylint:disable=too-many-statements, too-many-branches """Initialize a socket to a Chromecast, retrying as necessary.""" tries = self.tries if self.socket is not None: self.socket.close() self.socket = None # Make sure nobody is blocking. for callback in self._request_callbacks.values(): callback['event'].set() self.app_namespaces = [] self.destination_id = None self.session_id = None self._request_id = 0 self._request_callbacks = {} self._open_channels = [] self.connecting = True retry_log_fun = self.logger.error # Dict keeping track of individual retry delay for each named service retries = {} def mdns_backoff(service, retry): """Exponentional backoff for service name mdns lookups.""" now = time.time() retry['next_retry'] = now + retry['delay'] retry['delay'] = min(retry['delay']*2, 300) retries[service] = retry while not self.stop.is_set() and (tries is None or tries > 0): # noqa: E501 pylint:disable=too-many-nested-blocks # Prune retries dict retries = {key: retries[key] for key in self.services if ( key is not None and key in retries)} for service in self.services.copy(): now = time.time() retry = retries.get( service, {'delay': self.retry_wait, 'next_retry': now}) # If we're connecting to a named service, check if it's time if service and now < retry['next_retry']: continue try: self.socket = new_socket() self.socket.settimeout(self.timeout) self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_CONNECTING, NetworkAddress(self.host, self.port))) # Resolve the service name. If service is None, we're # connecting directly to a host name or IP-address if service: host = None port = None service_info = get_info_from_service(service, self.zconf) host, port = get_host_from_service_info(service_info) if host and port: try: self.fn = service_info.properties[b'fn']\ .decode('utf-8') except (AttributeError, KeyError, UnicodeError): pass self.logger.debug( "[%s:%s] Resolved service %s to %s:%s", self.fn or self.host, self.port, service, host, port) self.host = host self.port = port else: self.logger.debug( "[%s:%s] Failed to resolve service %s", self.fn or self.host, self.port, service) self._report_connection_status( ConnectionStatus( CONNECTION_STATUS_FAILED_RESOLVE, NetworkAddress(service, None))) mdns_backoff(service, retry) # If zeroconf fails to receive the necessary data, # try next service continue self.logger.debug("[%s:%s] Connecting to %s:%s", self.fn or self.host, self.port, self.host, self.port) self.socket.connect((self.host, self.port)) self.socket = ssl.wrap_socket(self.socket) self.connecting = False self._force_recon = False self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_CONNECTED, NetworkAddress(self.host, self.port))) self.receiver_controller.update_status() self.heartbeat_controller.ping() self.heartbeat_controller.reset() self.logger.debug("[%s:%s] Connected!", self.fn or self.host, self.port) return except OSError as err: self.connecting = True if self.stop.is_set(): self.logger.error( "[%s:%s] Failed to connect: %s. " "aborting due to stop signal.", self.fn or self.host, self.port, err) raise ChromecastConnectionError("Failed to connect") self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_FAILED, NetworkAddress(self.host, self.port))) if service is not None: retry_log_fun( "[%s:%s] Failed to connect to service %s" ", retrying in %.1fs", self.fn or self.host, self.port, service, retry['delay']) mdns_backoff(service, retry) else: retry_log_fun( "[%s:%s] Failed to connect, retrying in %.1fs", self.fn or self.host, self.port, self.retry_wait) retry_log_fun = self.logger.debug # Only sleep if we have another retry remaining if tries is None or tries > 1: self.logger.debug( "[%s:%s] Not connected, sleeping for %.1fs. Services: %s", self.fn or self.host, self.port, self.retry_wait, self.services) time.sleep(self.retry_wait) if tries: tries -= 1 self.stop.set() self.logger.error("[%s:%s] Failed to connect. No retries.", self.fn or self.host, self.port) raise ChromecastConnectionError("Failed to connect")
Initialize a socket to a Chromecast, retrying as necessary.
def to_json(self): """Return a header as a dictionary.""" a_per = self.analysis_period.to_json() if self.analysis_period else None return {'data_type': self.data_type.to_json(), 'unit': self.unit, 'analysis_period': a_per, 'metadata': self.metadata}
Return a header as a dictionary.
async def run(*cmd): """Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout as a list of strings, one line per element (stderr output is piped to stdout). Raises: RuntimeError: if the command returns a non-zero result. """ stdout = await checked_run(*cmd) log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log') with gfile.Open(log_path, 'a') as f: f.write(expand_cmd_str(cmd)) f.write('\n') f.write(stdout) f.write('\n') # Split stdout into lines. return stdout.split('\n')
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout as a list of strings, one line per element (stderr output is piped to stdout). Raises: RuntimeError: if the command returns a non-zero result.
def _get_color(self): """Return the color of the button, depending on its state""" if self.clicked and self.hovered: # the mouse is over the button color = mix(self.color, BLACK, 0.8) elif self.hovered and not self.flags & self.NO_HOVER: color = mix(self.color, BLACK, 0.93) else: color = self.color self.text.bg_color = color return color
Return the color of the button, depending on its state
def do_add_signature(input_file, output_file, signature_file): """Add a signature to the MAR file.""" signature = open(signature_file, 'rb').read() if len(signature) == 256: hash_algo = 'sha1' elif len(signature) == 512: hash_algo = 'sha384' else: raise ValueError() with open(output_file, 'w+b') as dst: with open(input_file, 'rb') as src: add_signature_block(src, dst, hash_algo, signature)
Add a signature to the MAR file.
def generate_by_deltas(cls, options, width, put_inner_lte_delta, call_inner_lte_delta): """ totally just playing around ideas for the API. this IC sells - credit put spread - credit call spread the approach - set width for the wing spread (eg, 1, ie, 1 unit width spread) - set delta for inner leg of the put credit spread (eg, -0.2) - set delta for inner leg of the call credit spread (eg, 0.1) """ raise Exception("Not Implemented starting at the 0.3.0 release") # # put credit spread # put_options_unsorted = list( filter(lambda x: x['type'] == 'put', options)) put_options = cls.sort_by_strike_price(put_options_unsorted) deltas_as_strings = [x['delta'] for x in put_options] deltas = cls.strings_to_np_array(deltas_as_strings) put_inner_index = np.argmin(deltas >= put_inner_lte_delta) - 1 put_outer_index = put_inner_index - width put_inner_leg = cls.gen_leg( put_options[put_inner_index]["instrument"], "sell") put_outer_leg = cls.gen_leg( put_options[put_outer_index]["instrument"], "buy") # # call credit spread # call_options_unsorted = list( filter(lambda x: x['type'] == 'call', options)) call_options = cls.sort_by_strike_price(call_options_unsorted) deltas_as_strings = [x['delta'] for x in call_options] x = np.array(deltas_as_strings) deltas = x.astype(np.float) # because deep ITM call options have a delta that comes up as NaN, # but are approximately 0.99 or 1.0, I'm replacing Nan with 1.0 # so np.argmax is able to walk up the index until it finds # "call_inner_lte_delta" # @TODO change this so (put credit / call credit) spreads work the same where_are_NaNs = np.isnan(deltas) deltas[where_are_NaNs] = 1.0 call_inner_index = np.argmax(deltas <= call_inner_lte_delta) call_outer_index = call_inner_index + width call_inner_leg = cls.gen_leg( call_options[call_inner_index]["instrument"], "sell") call_outer_leg = cls.gen_leg( call_options[call_outer_index]["instrument"], "buy") legs = [put_outer_leg, put_inner_leg, call_inner_leg, call_outer_leg] # # price calcs # price = ( - Decimal(put_options[put_outer_index]['adjusted_mark_price']) + Decimal(put_options[put_inner_index]['adjusted_mark_price']) + Decimal(call_options[call_inner_index]['adjusted_mark_price']) - Decimal(call_options[call_outer_index]['adjusted_mark_price']) ) # # provide max bid ask spread diff # ic_options = [ put_options[put_outer_index], put_options[put_inner_index], call_options[call_inner_index], call_options[call_outer_index] ] max_bid_ask_spread = cls.max_bid_ask_spread(ic_options) return {"legs": legs, "price": price, "max_bid_ask_spread": max_bid_ask_spread}
totally just playing around ideas for the API. this IC sells - credit put spread - credit call spread the approach - set width for the wing spread (eg, 1, ie, 1 unit width spread) - set delta for inner leg of the put credit spread (eg, -0.2) - set delta for inner leg of the call credit spread (eg, 0.1)
def init_app(self, app): """ This callback can be used to initialize an application for the use with this prometheus reporter setup. This is usually used with a flask "app factory" configuration. Please see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/ Note, that you need to use `PrometheusMetrics(app=None, ...)` for this mode, otherwise it is called automatically. :param app: the Flask application """ if self.path: self.register_endpoint(self.path, app) if self._export_defaults: self.export_defaults( self.buckets, self.group_by, self._defaults_prefix, app )
This callback can be used to initialize an application for the use with this prometheus reporter setup. This is usually used with a flask "app factory" configuration. Please see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/ Note, that you need to use `PrometheusMetrics(app=None, ...)` for this mode, otherwise it is called automatically. :param app: the Flask application
def load_plugins(): """ Load all availabe plugins. Returns ------- plugin_cls : dict mapping from plugin names to plugin classes """ plugin_cls = {} for entry_point in pkg_resources.iter_entry_points('docker_interface.plugins'): cls = entry_point.load() assert cls.COMMANDS is not None, \ "plugin '%s' does not define its commands" % entry_point.name assert cls.ORDER is not None, \ "plugin '%s' does not define its priority" % entry_point.name plugin_cls[entry_point.name] = cls return plugin_cls
Load all availabe plugins. Returns ------- plugin_cls : dict mapping from plugin names to plugin classes
def save_raw_pickle(hwr_objects): """ Parameters ---------- hwr_objects : list of hwr objects """ converted_hwr = [] translate = {} translate_id = {} model_path = pkg_resources.resource_filename('hwrt', 'misc/') translation_csv = os.path.join(model_path, 'latex2writemathindex.csv') arguments = {'newline': '', 'encoding': 'utf8'} with open(translation_csv, 'rt', **arguments) as csvfile: contents = csvfile.read() lines = contents.split("\n") for csvrow in lines: csvrow = csvrow.split(',') if len(csvrow) == 1: writemathid = csvrow[0] latex = "" else: writemathid, latex = int(csvrow[0]), csvrow[1:] latex = ','.join(latex) translate[latex] = writemathid translate_id[writemathid] = latex for hwr in hwr_objects: hwr.formula_in_latex = translate_id[hwr.formula_id] formula_id2latex = {} for el in hwr_objects: if el.formula_id not in formula_id2latex: formula_id2latex[el.formula_id] = el.formula_in_latex for hwr in hwr_objects: hwr.formula_in_latex = translate_id[hwr.formula_id] hwr.raw_data_id = 42 converted_hwr.append({'is_in_testset': 0, 'formula_id': hwr.formula_id, 'handwriting': hwr, 'id': 42, 'formula_in_latex': hwr.formula_in_latex}) with open('crohme.pickle', 'wb') as f: pickle.dump({'formula_id2latex': formula_id2latex, 'handwriting_datasets': converted_hwr}, f, protocol=pickle.HIGHEST_PROTOCOL)
Parameters ---------- hwr_objects : list of hwr objects
def convert(self): """Initiate one-shot conversion. The current settings are used, with the exception of continuous mode.""" c = self.config c &= (~MCP342x._continuous_mode_mask & 0x7f) # Force one-shot c |= MCP342x._not_ready_mask # Convert logger.debug('Convert ' + hex(self.address) + ' config: ' + bin(c)) self.bus.write_byte(self.address, c)
Initiate one-shot conversion. The current settings are used, with the exception of continuous mode.
def has_no_checked_field(self, locator, **kwargs): """ Checks if the page or current node has no radio button or checkbox with the given label, value, or id that is currently checked. Args: locator (str): The label, name, or id of a checked field. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Returns: bool: Whether it doesn't exist. """ kwargs["checked"] = True return self.has_no_selector("field", locator, **kwargs)
Checks if the page or current node has no radio button or checkbox with the given label, value, or id that is currently checked. Args: locator (str): The label, name, or id of a checked field. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Returns: bool: Whether it doesn't exist.
def set_value_all(self, twig=None, value=None, check_default=False, **kwargs): """ Set the value of all returned :class:`Parameter`s in this ParameterSet. Any :class:`Parameter` that would be included in the resulting ParameterSet from a :func:`filter` call with the same arguments will have their value set. Note: setting the value of a Parameter in a ParameterSet WILL change that Parameter across any parent ParameterSets (including the :class:`phoebe.frontend.bundle.Bundle`) :parameter str twig: the twig to search for the parameter :parameter value: the value to set. Provide units, if necessary, by sending a Quantity object (ie 2.4*u.rad) :parameter bool check_default: whether to exclude any default values. Defaults to False (unlike all filtering). Note that this acts on the current ParameterSet so any filtering done before this call will EXCLUDE defaults by default. :parameter **kwargs: meta-tags to search """ if twig is not None and value is None: # then try to support value as the first argument if no matches with twigs if not isinstance(twig, str): value = twig twig = None elif not len(self.filter(twig=twig, check_default=check_default, **kwargs)): value = twig twig = None params = self.filter(twig=twig, check_default=check_default, **kwargs).to_list() if not kwargs.pop('ignore_none', False) and not len(params): raise ValueError("no parameters found") for param in params: if "index" in kwargs.keys(): return self.get_parameter(twig=twig, **kwargs).set_index_value(value=value, **kwargs) param.set_value(value=value, **kwargs)
Set the value of all returned :class:`Parameter`s in this ParameterSet. Any :class:`Parameter` that would be included in the resulting ParameterSet from a :func:`filter` call with the same arguments will have their value set. Note: setting the value of a Parameter in a ParameterSet WILL change that Parameter across any parent ParameterSets (including the :class:`phoebe.frontend.bundle.Bundle`) :parameter str twig: the twig to search for the parameter :parameter value: the value to set. Provide units, if necessary, by sending a Quantity object (ie 2.4*u.rad) :parameter bool check_default: whether to exclude any default values. Defaults to False (unlike all filtering). Note that this acts on the current ParameterSet so any filtering done before this call will EXCLUDE defaults by default. :parameter **kwargs: meta-tags to search
def get_event_attendee(self, id, attendee_id, **data): """ GET /events/:id/attendees/:attendee_id/ Returns a single :format:`attendee` by ID, as the key ``attendee``. """ return self.get("/events/{0}/attendees/{0}/".format(id,attendee_id), data=data)
GET /events/:id/attendees/:attendee_id/ Returns a single :format:`attendee` by ID, as the key ``attendee``.
def require_parents(packages): """ Exclude any apparent package that apparently doesn't include its parent. For example, exclude 'foo.bar' if 'foo' is not present. """ found = [] for pkg in packages: base, sep, child = pkg.rpartition('.') if base and base not in found: continue found.append(pkg) yield pkg
Exclude any apparent package that apparently doesn't include its parent. For example, exclude 'foo.bar' if 'foo' is not present.