code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def template_statemgr_yaml(cl_args, zookeepers): ''' Template statemgr.yaml ''' statemgr_config_file_template = "%s/standalone/templates/statemgr.template.yaml" \ % cl_args["config_path"] statemgr_config_file_actual = "%s/standalone/statemgr.yaml" % cl_args["config_path"] template_file(statemgr_config_file_template, statemgr_config_file_actual, {"<zookeeper_host:zookeeper_port>": ",".join( ['"%s"' % zk if ":" in zk else '"%s:2181"' % zk for zk in zookeepers])})
Template statemgr.yaml
def get_file_size(fileobj): """ Returns the size of a file-like object. """ currpos = fileobj.tell() fileobj.seek(0, 2) total_size = fileobj.tell() fileobj.seek(currpos) return total_size
Returns the size of a file-like object.
def pool_context(*args, **kwargs): """ Context manager for multiprocessing.Pool class (for compatibility with Python 2.7.x) """ pool = Pool(*args, **kwargs) try: yield pool except Exception as e: raise e finally: pool.terminate()
Context manager for multiprocessing.Pool class (for compatibility with Python 2.7.x)
def dist_hamming(src, tar, diff_lens=True): """Return the normalized Hamming distance between two strings. This is a wrapper for :py:meth:`Hamming.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison diff_lens : bool If True (default), this returns the Hamming distance for those characters that have a matching character in both strings plus the difference in the strings' lengths. This is equivalent to extending the shorter string with obligatorily non-matching characters. If False, an exception is raised in the case of strings of unequal lengths. Returns ------- float The normalized Hamming distance Examples -------- >>> round(dist_hamming('cat', 'hat'), 12) 0.333333333333 >>> dist_hamming('Niall', 'Neil') 0.6 >>> dist_hamming('aluminum', 'Catalan') 1.0 >>> dist_hamming('ATCG', 'TAGC') 1.0 """ return Hamming().dist(src, tar, diff_lens)
Return the normalized Hamming distance between two strings. This is a wrapper for :py:meth:`Hamming.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison diff_lens : bool If True (default), this returns the Hamming distance for those characters that have a matching character in both strings plus the difference in the strings' lengths. This is equivalent to extending the shorter string with obligatorily non-matching characters. If False, an exception is raised in the case of strings of unequal lengths. Returns ------- float The normalized Hamming distance Examples -------- >>> round(dist_hamming('cat', 'hat'), 12) 0.333333333333 >>> dist_hamming('Niall', 'Neil') 0.6 >>> dist_hamming('aluminum', 'Catalan') 1.0 >>> dist_hamming('ATCG', 'TAGC') 1.0
def set_target(self, target: EventDispatcherBase) -> None: """ This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance. """ if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance.
def modify_conf(cfgfile, service_name, outfn): """Modify config file neutron and keystone to include enabler options.""" if not cfgfile or not outfn: print('ERROR: There is no config file.') sys.exit(0) options = service_options[service_name] with open(cfgfile, 'r') as cf: lines = cf.readlines() for opt in options: op = opt.get('option') res = [line for line in lines if line.startswith(op)] if len(res) > 1: print('ERROR: There are more than one %s option.' % res) sys.exit(0) if res: (op, sep, val) = (res[0].strip('\n').replace(' ', ''). partition('=')) new_val = None if opt.get('is_list'): # Value for this option can contain list of values. # Append the value if it does not exist. if not any(opt.get('value') == value for value in val.split(',')): new_val = ','.join((val, opt.get('value'))) else: if val != opt.get('value'): new_val = opt.get('value') if new_val: opt_idx = lines.index(res[0]) # The setting is different, replace it with new one. lines.pop(opt_idx) lines.insert(opt_idx, '='.join((opt.get('option'), new_val + '\n'))) else: # Option does not exist. Add the option. try: sec_idx = lines.index('[' + opt.get('section') + ']\n') lines.insert(sec_idx + 1, '='.join( (opt.get('option'), opt.get('value') + '\n'))) except ValueError: print('Invalid %s section name.' % opt.get('section')) sys.exit(0) with open(outfn, 'w') as fwp: all_lines = '' for line in lines: all_lines += line fwp.write(all_lines)
Modify config file neutron and keystone to include enabler options.
def get_sample_times(self): """Return an Array containing the sample times. """ if self._epoch is None: return Array(range(len(self))) * self._delta_t else: return Array(range(len(self))) * self._delta_t + float(self._epoch)
Return an Array containing the sample times.
def run(self, data_dir=None): """ Note: this function will check the experiments directory for a special file, scheduler.info, that details how often each experiment should be run and the last time the experiment was run. If the time since the experiment was run is shorter than the scheduled interval in seconds, then the experiment will not be run. :param data_dir: :return: """ # XXX: android build needs this. refactor if data_dir: centinel_home = data_dir self.config['dirs']['results_dir'] = os.path.join(centinel_home, 'results') logging.info('Centinel started.') if not os.path.exists(self.config['dirs']['results_dir']): logging.warn("Creating results directory in " "%s" % (self.config['dirs']['results_dir'])) os.makedirs(self.config['dirs']['results_dir']) logging.debug("Results directory: %s" % (self.config['dirs']['results_dir'])) # load scheduler information sched_filename = os.path.join(self.config['dirs']['experiments_dir'], 'scheduler.info') logging.debug("Loading scheduler file.") sched_info = {} if os.path.exists(sched_filename): with open(sched_filename, 'r') as file_p: try: sched_info = json.load(file_p) except Exception as exp: logging.error("Failed to load the " "scheduler: %s" % str(exp)) return logging.debug("Scheduler file loaded.") logging.debug("Processing the experiment schedule.") for name in sched_info: # check if we should preempt on the experiment (if the # time to run next is greater than the current time) and # store the last run time as now # # Note: if the experiment is not in the scheduler, then it # will not be run at all. run_next = sched_info[name]['last_run'] run_next += sched_info[name]['frequency'] if run_next > time.time(): run_next_str = datetime.fromtimestamp(long(run_next)) logging.debug("Skipping %s, it will " "be run on or after %s." % (name, run_next_str)) continue # backward compatibility with older-style scheduler if 'python_exps' not in sched_info[name]: self.run_exp(name=name) else: exps = sched_info[name]['python_exps'].items() for python_exp, exp_config in exps: logging.debug("Running %s." % python_exp) self.run_exp(name=python_exp, exp_config=exp_config, schedule_name=name) logging.debug("Finished running %s." % python_exp) sched_info[name]['last_run'] = time.time() logging.debug("Updating timeout values in scheduler.") # write out the updated last run times with open(sched_filename, 'w') as file_p: json.dump(sched_info, file_p, indent=2, separators=(',', ': ')) self.consolidate_results() logging.info("Finished running experiments. " "Look in %s for results." % (self.config['dirs']['results_dir']))
Note: this function will check the experiments directory for a special file, scheduler.info, that details how often each experiment should be run and the last time the experiment was run. If the time since the experiment was run is shorter than the scheduled interval in seconds, then the experiment will not be run. :param data_dir: :return:
def from_path(path: str, encoding: str = 'utf-8', **kwargs) -> BELGraph: """Load a BEL graph from a file resource. This function is a thin wrapper around :func:`from_lines`. :param path: A file path :param encoding: the encoding to use when reading this file. Is passed to :code:`codecs.open`. See the python `docs <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ for a list of standard encodings. For example, files starting with a UTF-8 BOM should use :code:`utf_8_sig`. The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`. """ log.info('Loading from path: %s', path) graph = BELGraph(path=path) with codecs.open(os.path.expanduser(path), encoding=encoding) as lines: parse_lines(graph=graph, lines=lines, **kwargs) return graph
Load a BEL graph from a file resource. This function is a thin wrapper around :func:`from_lines`. :param path: A file path :param encoding: the encoding to use when reading this file. Is passed to :code:`codecs.open`. See the python `docs <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ for a list of standard encodings. For example, files starting with a UTF-8 BOM should use :code:`utf_8_sig`. The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty. """ matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order reference_length = 0 translation_length = 0 for (references, translation) in zip(reference_corpus, translation_corpus): reference_length += min(len(r) for r in references) translation_length += len(translation) merged_ref_ngram_counts = collections.Counter() for reference in references: merged_ref_ngram_counts |= _get_ngrams(reference, max_order) translation_ngram_counts = _get_ngrams(translation, max_order) overlap = translation_ngram_counts & merged_ref_ngram_counts for ngram in overlap: matches_by_order[len(ngram)-1] += overlap[ngram] for order in range(1, max_order+1): possible_matches = len(translation) - order + 1 if possible_matches > 0: possible_matches_by_order[order-1] += possible_matches precisions = [0] * max_order for i in range(0, max_order): if smooth: precisions[i] = ((matches_by_order[i] + 1.) / (possible_matches_by_order[i] + 1.)) else: if possible_matches_by_order[i] > 0: precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: precisions[i] = 0.0 if min(precisions) > 0: p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) geo_mean = math.exp(p_log_sum) else: geo_mean = 0 ratio = float(translation_length) / reference_length if ratio > 1.0: bp = 1. else: bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return (bleu, precisions, bp, ratio, translation_length, reference_length)
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty.
def batch_means(x, f=lambda y: y, theta=.5, q=.95, burn=0): """ TODO: Use Bayesian CI. Returns the half-width of the frequentist confidence interval (q'th quantile) of the Monte Carlo estimate of E[f(x)]. :Parameters: x : sequence Sampled series. Must be a one-dimensional array. f : function The MCSE of E[f(x)] will be computed. theta : float between 0 and 1 The batch length will be set to len(x) ** theta. q : float between 0 and 1 The desired quantile. :Example: >>>batch_means(x, f=lambda x: x**2, theta=.5, q=.95) :Reference: Flegal, James M. and Haran, Murali and Jones, Galin L. (2007). Markov chain Monte Carlo: Can we trust the third significant figure? <Publication> :Note: Requires SciPy """ try: import scipy from scipy import stats except ImportError: raise ImportError('SciPy must be installed to use batch_means.') x = x[burn:] n = len(x) b = np.int(n ** theta) a = n / b t_quant = stats.t.isf(1 - q, a - 1) Y = np.array([np.mean(f(x[i * b:(i + 1) * b])) for i in xrange(a)]) sig = b / (a - 1.) * sum((Y - np.mean(f(x))) ** 2) return t_quant * sig / np.sqrt(n)
TODO: Use Bayesian CI. Returns the half-width of the frequentist confidence interval (q'th quantile) of the Monte Carlo estimate of E[f(x)]. :Parameters: x : sequence Sampled series. Must be a one-dimensional array. f : function The MCSE of E[f(x)] will be computed. theta : float between 0 and 1 The batch length will be set to len(x) ** theta. q : float between 0 and 1 The desired quantile. :Example: >>>batch_means(x, f=lambda x: x**2, theta=.5, q=.95) :Reference: Flegal, James M. and Haran, Murali and Jones, Galin L. (2007). Markov chain Monte Carlo: Can we trust the third significant figure? <Publication> :Note: Requires SciPy
def execute_api_request(self): """ Execute the request and return json data as a dict :return: data dict """ if not self.auth.check_auth(): raise Exception('Authentification needed or API not available with your type of connection') if self.auth.is_authentified(): id_cookie = {BboxConstant.COOKIE_BBOX_ID: self.auth.get_cookie_id()} if self.parameters is None: resp = self.call_method(self.api_url.get_url(), cookies=id_cookie) else: resp = self.call_method(self.api_url.get_url(), data=self.parameters, cookies=id_cookie) else: if self.parameters is None: resp = self.call_method(self.api_url.get_url()) else: resp = self.call_method(self.api_url.get_url(), data=self.parameters) if resp.status_code != 200: # This means something went wrong. raise Exception('Error {} with request {}'.format( resp.status_code, self.api_url.get_url())) return resp
Execute the request and return json data as a dict :return: data dict
def reset_can(self, channel=Channel.CHANNEL_CH0, flags=ResetFlags.RESET_ALL): """ Resets a CAN channel of a device (hardware reset, empty buffer, and so on). :param int channel: CAN channel, to be reset (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int flags: Flags defines what should be reset (see enum :class:`ResetFlags`). """ UcanResetCanEx(self._handle, channel, flags)
Resets a CAN channel of a device (hardware reset, empty buffer, and so on). :param int channel: CAN channel, to be reset (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int flags: Flags defines what should be reset (see enum :class:`ResetFlags`).
def GET_getitemvalues(self) -> None: """Get the values of all |Variable| objects observed by the current |GetItem| objects. For |GetItem| objects observing time series, |HydPyServer.GET_getitemvalues| returns only the values within the current simulation period. """ for item in state.getitems: for name, value in item.yield_name2value(state.idx1, state.idx2): self._outputs[name] = value
Get the values of all |Variable| objects observed by the current |GetItem| objects. For |GetItem| objects observing time series, |HydPyServer.GET_getitemvalues| returns only the values within the current simulation period.
def get_extrapolated_conductivity(temps, diffusivities, new_temp, structure, species): """ Returns extrapolated mS/cm conductivity. Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s new_temp (float): desired temperature. units: K structure (structure): Structure used for the diffusivity calculation species (string/Specie): conducting species Returns: (float) Conductivity at extrapolated temp in mS/cm. """ return get_extrapolated_diffusivity(temps, diffusivities, new_temp) \ * get_conversion_factor(structure, species, new_temp)
Returns extrapolated mS/cm conductivity. Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s new_temp (float): desired temperature. units: K structure (structure): Structure used for the diffusivity calculation species (string/Specie): conducting species Returns: (float) Conductivity at extrapolated temp in mS/cm.
def update_meta_data_for_state_view(graphical_editor_view, state_v, affects_children=False, publish=True): """This method updates the meta data of a state view :param graphical_editor_view: Graphical Editor view the change occurred in :param state_v: The state view which has been changed/moved :param affects_children: Whether the children of the state view have been resized or not :param publish: Whether to publish the changes of the meta data """ from gaphas.item import NW # Update all port meta data to match with new position and size of parent update_meta_data_for_port(graphical_editor_view, state_v, None) if affects_children: update_meta_data_for_name_view(graphical_editor_view, state_v.name_view, publish=False) for transition_v in state_v.get_transitions(): update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, None, publish=False) for child_state_v in state_v.child_state_views(): update_meta_data_for_state_view(graphical_editor_view, child_state_v, True, publish=False) rel_pos = calc_rel_pos_to_parent(graphical_editor_view.editor.canvas, state_v, state_v.handles()[NW]) state_v.model.set_meta_data_editor('size', (state_v.width, state_v.height)) state_v.model.set_meta_data_editor('rel_pos', rel_pos) if publish: graphical_editor_view.emit('meta_data_changed', state_v.model, "size", affects_children)
This method updates the meta data of a state view :param graphical_editor_view: Graphical Editor view the change occurred in :param state_v: The state view which has been changed/moved :param affects_children: Whether the children of the state view have been resized or not :param publish: Whether to publish the changes of the meta data
def read_pandas (self, format='table', **kwargs): """Read using :mod:`pandas`. The function ``pandas.read_FORMAT`` is called where ``FORMAT`` is set from the argument *format*. *kwargs* are passed to this function. Supported formats likely include ``clipboard``, ``csv``, ``excel``, ``fwf``, ``gbq``, ``html``, ``json``, ``msgpack``, ``pickle``, ``sql``, ``sql_query``, ``sql_table``, ``stata``, ``table``. Note that ``hdf`` is not supported because it requires a non-keyword argument; see :meth:`Path.read_hdf`. """ import pandas reader = getattr (pandas, 'read_' + format, None) if not callable (reader): raise PKError ('unrecognized Pandas format %r: no function pandas.read_%s', format, format) with self.open ('rb') as f: return reader (f, **kwargs)
Read using :mod:`pandas`. The function ``pandas.read_FORMAT`` is called where ``FORMAT`` is set from the argument *format*. *kwargs* are passed to this function. Supported formats likely include ``clipboard``, ``csv``, ``excel``, ``fwf``, ``gbq``, ``html``, ``json``, ``msgpack``, ``pickle``, ``sql``, ``sql_query``, ``sql_table``, ``stata``, ``table``. Note that ``hdf`` is not supported because it requires a non-keyword argument; see :meth:`Path.read_hdf`.
def patch_wheel(in_wheel, patch_fname, out_wheel=None): """ Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel` If `out_wheel` is None (the default), overwrite the wheel `in_wheel` in-place. Parameters ---------- in_wheel : str Filename of wheel to process patch_fname : str Filename of patch file. Will be applied with ``patch -p1 < patch_fname`` out_wheel : None or str Filename of patched wheel to write. If None, overwrite `in_wheel` """ in_wheel = abspath(in_wheel) patch_fname = abspath(patch_fname) if out_wheel is None: out_wheel = in_wheel else: out_wheel = abspath(out_wheel) if not exists(patch_fname): raise ValueError("patch file {0} does not exist".format(patch_fname)) with InWheel(in_wheel, out_wheel): with open(patch_fname, 'rb') as fobj: patch_proc = Popen(['patch', '-p1'], stdin = fobj, stdout = PIPE, stderr = PIPE) stdout, stderr = patch_proc.communicate() if patch_proc.returncode != 0: raise RuntimeError("Patch failed with stdout:\n" + stdout.decode('latin1'))
Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel` If `out_wheel` is None (the default), overwrite the wheel `in_wheel` in-place. Parameters ---------- in_wheel : str Filename of wheel to process patch_fname : str Filename of patch file. Will be applied with ``patch -p1 < patch_fname`` out_wheel : None or str Filename of patched wheel to write. If None, overwrite `in_wheel`
def show_system_info_output_show_system_info_stack_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_info = ET.Element("show_system_info") config = show_system_info output = ET.SubElement(show_system_info, "output") show_system_info = ET.SubElement(output, "show-system-info") stack_mac = ET.SubElement(show_system_info, "stack-mac") stack_mac.text = kwargs.pop('stack_mac') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def time_stops(self): """ Valid time steps for this service as a list of datetime objects. """ if not self.supports_time: return [] if self.service.calendar == 'standard': units = self.service.time_interval_units interval = self.service.time_interval steps = [self.time_start] if units in ('years', 'decades', 'centuries'): if units == 'years': years = interval elif units == 'decades': years = 10 * interval else: years = 100 * interval next_value = lambda x: x.replace(year=x.year + years) elif units == 'months': def _fn(x): year = x.year + (x.month+interval-1) // 12 month = (x.month+interval) % 12 or 12 day = min(x.day, calendar.monthrange(year, month)[1]) return x.replace(year=year, month=month, day=day) next_value = _fn else: if units == 'milliseconds': delta = timedelta(milliseconds=interval) elif units == 'seconds': delta = timedelta(seconds=interval) elif units == 'minutes': delta = timedelta(minutes=interval) elif units == 'hours': delta = timedelta(hours=interval) elif units == 'days': delta = timedelta(days=interval) elif units == 'weeks': delta = timedelta(weeks=interval) else: raise ValidationError( "Service has an invalid time_interval_units: {}".format(self.service.time_interval_units) ) next_value = lambda x: x + delta while steps[-1] < self.time_end: value = next_value(steps[-1]) if value > self.time_end: break steps.append(value) return steps else: # TODO raise NotImplementedError
Valid time steps for this service as a list of datetime objects.
def gene_id_of_associated_transcript(effect): """ Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript.
def create_trace( turn_activity: Activity, name: str, value: object = None, value_type: str = None, label: str = None, ) -> Activity: """Creates a trace activity based on this activity. :param turn_activity: :type turn_activity: Activity :param name: The value to assign to the trace activity's <see cref="Activity.name"/> property. :type name: str :param value: The value to assign to the trace activity's <see cref="Activity.value"/> property., defaults to None :param value: object, optional :param value_type: The value to assign to the trace activity's <see cref="Activity.value_type"/> property, defaults to None :param value_type: str, optional :param label: The value to assign to the trace activity's <see cref="Activity.label"/> property, defaults to None :param label: str, optional :return: The created trace activity. :rtype: Activity """ from_property = ( ChannelAccount( id=turn_activity.recipient.id, name=turn_activity.recipient.name ) if turn_activity.recipient is not None else ChannelAccount() ) if value_type is None and value is not None: value_type = type(value).__name__ reply = Activity( type=ActivityTypes.trace, timestamp=datetime.utcnow(), from_property=from_property, recipient=ChannelAccount( id=turn_activity.from_property.id, name=turn_activity.from_property.name ), reply_to_id=turn_activity.id, service_url=turn_activity.service_url, channel_id=turn_activity.channel_id, conversation=ConversationAccount( is_group=turn_activity.conversation.is_group, id=turn_activity.conversation.id, name=turn_activity.conversation.name, ), name=name, label=label, value_type=value_type, value=value, ) return reply
Creates a trace activity based on this activity. :param turn_activity: :type turn_activity: Activity :param name: The value to assign to the trace activity's <see cref="Activity.name"/> property. :type name: str :param value: The value to assign to the trace activity's <see cref="Activity.value"/> property., defaults to None :param value: object, optional :param value_type: The value to assign to the trace activity's <see cref="Activity.value_type"/> property, defaults to None :param value_type: str, optional :param label: The value to assign to the trace activity's <see cref="Activity.label"/> property, defaults to None :param label: str, optional :return: The created trace activity. :rtype: Activity
def or_(self, first_qe, *qes): ''' Add a $not expression to the query, negating the query expressions given. The ``| operator`` on query expressions does the same thing **Examples**: ``query.or_(SomeDocClass.age == 18, SomeDocClass.age == 17)`` becomes ``{'$or' : [{ 'age' : 18 }, { 'age' : 17 }]}`` :param query_expressions: Instances of :class:`ommongo.query_expression.QueryExpression` ''' res = first_qe for qe in qes: res = (res | qe) self.filter(res) return self
Add a $not expression to the query, negating the query expressions given. The ``| operator`` on query expressions does the same thing **Examples**: ``query.or_(SomeDocClass.age == 18, SomeDocClass.age == 17)`` becomes ``{'$or' : [{ 'age' : 18 }, { 'age' : 17 }]}`` :param query_expressions: Instances of :class:`ommongo.query_expression.QueryExpression`
def claim_pep_node(self, node_namespace, *, register_feature=True, notify=False): """ Claim node `node_namespace`. :param node_namespace: the pubsub node whose events shall be handled. :param register_feature: Whether to publish the `node_namespace` as feature. :param notify: Whether to register the ``+notify`` feature to receive notification without explicit subscription. :raises RuntimeError: if a handler for `node_namespace` is already set. :returns: a :class:`~aioxmpp.pep.service.RegisteredPEPNode` instance representing the claim. .. seealso:: :class:`aioxmpp.pep.register_pep_node` a descriptor which can be used with :class:`~aioxmpp.service.Service` subclasses to claim a PEP node automatically. This registers `node_namespace` as feature for service discovery unless ``register_feature=False`` is passed. .. note:: For `notify` to work, it is required that :class:`aioxmpp.EntityCapsService` is loaded and that presence is re-sent soon after :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the documentation of the class and the signal for details. """ if node_namespace in self._pep_node_claims: raise RuntimeError( "claiming already claimed node" ) registered_node = RegisteredPEPNode( self, node_namespace, register_feature=register_feature, notify=notify, ) finalizer = weakref.finalize( registered_node, weakref.WeakMethod(registered_node._unregister) ) # we cannot guarantee that disco is not cleared up already, # so we do not unclaim the feature on exit finalizer.atexit = False self._pep_node_claims[node_namespace] = registered_node return registered_node
Claim node `node_namespace`. :param node_namespace: the pubsub node whose events shall be handled. :param register_feature: Whether to publish the `node_namespace` as feature. :param notify: Whether to register the ``+notify`` feature to receive notification without explicit subscription. :raises RuntimeError: if a handler for `node_namespace` is already set. :returns: a :class:`~aioxmpp.pep.service.RegisteredPEPNode` instance representing the claim. .. seealso:: :class:`aioxmpp.pep.register_pep_node` a descriptor which can be used with :class:`~aioxmpp.service.Service` subclasses to claim a PEP node automatically. This registers `node_namespace` as feature for service discovery unless ``register_feature=False`` is passed. .. note:: For `notify` to work, it is required that :class:`aioxmpp.EntityCapsService` is loaded and that presence is re-sent soon after :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the documentation of the class and the signal for details.
def enable(profile='allprofiles'): ''' .. versionadded:: 2015.5.0 Enable firewall profile Args: profile (Optional[str]): The name of the profile to enable. Default is ``allprofiles``. Valid options are: - allprofiles - domainprofile - privateprofile - publicprofile Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.enable ''' cmd = ['netsh', 'advfirewall', 'set', profile, 'state', 'on'] ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) if ret['retcode'] != 0: raise CommandExecutionError(ret['stdout']) return True
.. versionadded:: 2015.5.0 Enable firewall profile Args: profile (Optional[str]): The name of the profile to enable. Default is ``allprofiles``. Valid options are: - allprofiles - domainprofile - privateprofile - publicprofile Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.enable
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") message = ET.SubElement(fwdl_entries, "message") message.text = kwargs.pop('message') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS): "In the IOU fungible the supply is set by Issuer, who issue funds." # allocate new issue as result of a new cash entry ctx.accounts[ctx.msg_sender] += amount ctx.issued_amounts[ctx.msg_sender] += amount # Store hash(rtgs) ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) return OK
In the IOU fungible the supply is set by Issuer, who issue funds.
def select_point(action, action_space, select_point_act, screen): """Select a unit at a point.""" select = spatial(action, action_space).unit_selection_point screen.assign_to(select.selection_screen_coord) select.type = select_point_act
Select a unit at a point.
def patch(self, url, data=None, **kwargs): """ Shorthand for self.oauth_request(url, 'patch') :param str url: url to send patch oauth request to :param dict data: patch data to update the service :param kwargs: extra params to send to request api :return: Response of the request :rtype: requests.Response """ return self.oauth_request(url, 'patch', data=data, **kwargs)
Shorthand for self.oauth_request(url, 'patch') :param str url: url to send patch oauth request to :param dict data: patch data to update the service :param kwargs: extra params to send to request api :return: Response of the request :rtype: requests.Response
def dump_registers(cls, registers, arch = None): """ Dump the x86/x64 processor register values. The output mimics that of the WinDBG debugger. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. @type arch: str @param arch: Architecture of the machine whose registers were dumped. Defaults to the current architecture. Currently only the following architectures are supported: - L{win32.ARCH_I386} - L{win32.ARCH_AMD64} @rtype: str @return: Text suitable for logging. """ if registers is None: return '' if arch is None: if 'Eax' in registers: arch = win32.ARCH_I386 elif 'Rax' in registers: arch = win32.ARCH_AMD64 else: arch = 'Unknown' if arch not in cls.reg_template: msg = "Don't know how to dump the registers for architecture: %s" raise NotImplementedError(msg % arch) registers = registers.copy() registers['efl_dump'] = cls.dump_flags( registers['EFlags'] ) return cls.reg_template[arch] % registers
Dump the x86/x64 processor register values. The output mimics that of the WinDBG debugger. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. @type arch: str @param arch: Architecture of the machine whose registers were dumped. Defaults to the current architecture. Currently only the following architectures are supported: - L{win32.ARCH_I386} - L{win32.ARCH_AMD64} @rtype: str @return: Text suitable for logging.
def read_midc_raw_data_from_nrel(site, start, end): """Request and read MIDC data directly from the raw data api. Parameters ---------- site: string The MIDC station id. start: datetime Start date for requested data. end: datetime End date for requested data. Returns ------- data: Dataframe with DatetimeIndex localized to the station location. Notes ----- Requests spanning an instrumentation change will yield an error. See the MIDC raw data api page `here <https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist>`_ for more details and considerations. """ args = {'site': site, 'begin': start.strftime('%Y%m%d'), 'end': end.strftime('%Y%m%d')} endpoint = 'https://midcdmz.nrel.gov/apps/data_api.pl?' url = endpoint + '&'.join(['{}={}'.format(k, v) for k, v in args.items()]) return read_midc(url, raw_data=True)
Request and read MIDC data directly from the raw data api. Parameters ---------- site: string The MIDC station id. start: datetime Start date for requested data. end: datetime End date for requested data. Returns ------- data: Dataframe with DatetimeIndex localized to the station location. Notes ----- Requests spanning an instrumentation change will yield an error. See the MIDC raw data api page `here <https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist>`_ for more details and considerations.
def is_github_repo_owner_the_official_one(context, repo_owner): """Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner`` matches the one configured to be the official one """ official_repo_owner = context.config['official_github_repos_owner'] if not official_repo_owner: raise ConfigError( 'This worker does not have a defined owner for official GitHub repositories. ' 'Given "official_github_repos_owner": {}'.format(official_repo_owner) ) return official_repo_owner == repo_owner
Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner`` matches the one configured to be the official one
def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() new.xs = [x * factor for x in self.xs] return new
Multiplies the xs by a factor. factor: what to multiply by
def login(self, email=None, password=None, user=None): """ Logs the user in and setups the header with the private token :param email: Gitlab user Email :param user: Gitlab username :param password: Gitlab user password :return: True if login successful :raise: HttpError :raise: ValueError """ if user is not None: data = {'login': user, 'password': password} elif email is not None: data = {'email': email, 'password': password} else: raise ValueError('Neither username nor email provided to login') self.headers = {'connection': 'close'} response = self.post('/session', **data) self.token = response['private_token'] self.headers = {'PRIVATE-TOKEN': self.token, 'connection': 'close'} return response
Logs the user in and setups the header with the private token :param email: Gitlab user Email :param user: Gitlab username :param password: Gitlab user password :return: True if login successful :raise: HttpError :raise: ValueError
def get_copyright_metadata(self): """Gets the metadata for the copyright. return: (osid.Metadata) - metadata for the copyright *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['copyright']) metadata.update({'existing_string_values': self._my_map['copyright']}) return Metadata(**metadata)
Gets the metadata for the copyright. return: (osid.Metadata) - metadata for the copyright *compliance: mandatory -- This method must be implemented.*
def download_file_by_name(url, target_folder, file_name, mkdir=False): """Download a file to a directory. Args: url: A string to a valid URL. target_folder: Target folder for download (e.g. c:/ladybug) file_name: File name (e.g. testPts.zip). mkdir: Set to True to create the directory if doesn't exist (Default: False) """ # headers to "spoof" the download as coming from a browser (needed for E+ site) __hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 ' '(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,' 'application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'} # create the target directory. if not os.path.isdir(target_folder): if mkdir: preparedir(target_folder) else: created = preparedir(target_folder, False) if not created: raise ValueError("Failed to find %s." % target_folder) file_path = os.path.join(target_folder, file_name) if (sys.version_info < (3, 0)): _download_py2(url, file_path, __hdr__) else: _download_py3(url, file_path, __hdr__)
Download a file to a directory. Args: url: A string to a valid URL. target_folder: Target folder for download (e.g. c:/ladybug) file_name: File name (e.g. testPts.zip). mkdir: Set to True to create the directory if doesn't exist (Default: False)
def validate_properties_exist(self, classname, property_names): """Validate that the specified property names are indeed defined on the given class.""" schema_element = self.get_element_by_class_name(classname) requested_properties = set(property_names) available_properties = set(schema_element.properties.keys()) non_existent_properties = requested_properties - available_properties if non_existent_properties: raise InvalidPropertyError( u'Class "{}" does not have definitions for properties "{}": ' u'{}'.format(classname, non_existent_properties, property_names))
Validate that the specified property names are indeed defined on the given class.
def _set_platform_specific_keyboard_shortcuts(self): """ QtDesigner does not support QKeySequence::StandardKey enum based default keyboard shortcuts. This means that all default key combinations ("Save", "Quit", etc) have to be defined in code. """ self.action_new_phrase.setShortcuts(QKeySequence.New) self.action_save.setShortcuts(QKeySequence.Save) self.action_close_window.setShortcuts(QKeySequence.Close) self.action_quit.setShortcuts(QKeySequence.Quit) self.action_undo.setShortcuts(QKeySequence.Undo) self.action_redo.setShortcuts(QKeySequence.Redo) self.action_cut_item.setShortcuts(QKeySequence.Cut) self.action_copy_item.setShortcuts(QKeySequence.Copy) self.action_paste_item.setShortcuts(QKeySequence.Paste) self.action_delete_item.setShortcuts(QKeySequence.Delete) self.action_configure_autokey.setShortcuts(QKeySequence.Preferences)
QtDesigner does not support QKeySequence::StandardKey enum based default keyboard shortcuts. This means that all default key combinations ("Save", "Quit", etc) have to be defined in code.
def hgetall(self, name): """ Returns all the fields and values in the Hash. :param name: str the name of the redis key :return: Future() """ with self.pipe as pipe: f = Future() res = pipe.hgetall(self.redis_key(name)) def cb(): data = {} m_decode = self.memberparse.decode v_decode = self._value_decode for k, v in res.result.items(): k = m_decode(k) v = v_decode(k, v) data[k] = v f.set(data) pipe.on_execute(cb) return f
Returns all the fields and values in the Hash. :param name: str the name of the redis key :return: Future()
def _manual_lookup(self, facebook_id, facebook_id_string): """ People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return: """ resp = self._session.get( 'https://www.facebook.com/%s' % facebook_id, allow_redirects=True, timeout=10 ) # No point in trying to get this using BeautifulSoup. The HTML here # is the very epitome of what it is to be invalid... m = _MANUAL_NAME_MATCHER.search(resp.text) if m: name = m.group(1) else: name = facebook_id_string self._cached_profiles[facebook_id] = name return name
People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return:
def _cleanup(self): """Cleanup after extraction & analysis.""" self._expkg = None self._extmp = None self._flag_e = True self._ifile.close()
Cleanup after extraction & analysis.
def caldata(self, time): ''' Market open or not. :param datetime time: 欲判斷的日期 :rtype: bool :returns: True 為開市、False 為休市 ''' if time.date() in self.__ocdate['close']: # 判對是否為法定休市 return False elif time.date() in self.__ocdate['open']: # 判對是否為法定開市 return True else: if time.weekday() <= 4: # 判斷是否為平常日開市 return True else: return False
Market open or not. :param datetime time: 欲判斷的日期 :rtype: bool :returns: True 為開市、False 為休市
def collect(self): """Collect the elements in an PRDD and concatenate the partition.""" # The order of the frame order appends is based on the implementation # of reduce which calls our function with # f(valueToBeAdded, accumulator) so we do our reduce implementation. def append_frames(frame_a, frame_b): return frame_a.append(frame_b) return self._custom_rdd_reduce(append_frames)
Collect the elements in an PRDD and concatenate the partition.
def OnMouseUp(self, event): """Generate a dropIndex. Process: check self.IsInControl, check self.IsDrag, HitTest, compare HitTest value The mouse can end up in 5 different places: Outside the Control On itself Above its starting point and on another item Below its starting point and on another item Below its starting point and not on another item """ if not self.IsInControl: # 1. Outside the control : Do Nothing self.IsDrag = False elif self.IsDrag: if not self.IsDrag: # In control and is a drag event : Determine Location self.hitIndex = self.HitTest(event.GetPosition()) self.dropIndex = self.hitIndex[0] # Drop index indicates where the drop location is; # what index number # Determine dropIndex and its validity # 2. On itself or below control : Do Nothing if not (self.dropIndex == self.startIndex or self.dropIndex == -1): # Now that dropIndex has been established do 3 things # 1. gather item data # 2. delete item in list # 3. insert item & it's data into the list at the new index # dropList is a list of field values from the list control dropList = [] thisItem = self.GetItem(self.startIndex) for x in xrange(self.GetColumnCount()): dropList.append( self.GetItem(self.startIndex, x).GetText()) thisItem.SetId(self.dropIndex) self.DeleteItem(self.startIndex) self.InsertItem(thisItem) for x in range(self.GetColumnCount()): self.SetStringItem(self.dropIndex, x, dropList[x]) # If in control but not a drag event : Do Nothing self.IsDrag = False event.Skip()
Generate a dropIndex. Process: check self.IsInControl, check self.IsDrag, HitTest, compare HitTest value The mouse can end up in 5 different places: Outside the Control On itself Above its starting point and on another item Below its starting point and on another item Below its starting point and not on another item
def do_shell(self, args): """Pass command to a system shell when line begins with '!'""" if _debug: ConsoleCmd._debug("do_shell %r", args) os.system(args)
Pass command to a system shell when line begins with '!
def fanpower_watts(ddtt): """return fan power in bhp given the fan IDF object""" from eppy.bunch_subclass import BadEPFieldError # here to prevent circular dependency try: fan_tot_eff = ddtt.Fan_Total_Efficiency # from V+ V8.7.0 onwards except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_watts(fan_tot_eff, pascal, m3s)
return fan power in bhp given the fan IDF object
def addattachments(message, template_path): """Add the attachments from the message from the commandline options.""" if 'attachment' not in message: return message, 0 message = make_message_multipart(message) attachment_filepaths = message.get_all('attachment', failobj=[]) template_parent_dir = os.path.dirname(template_path) for attachment_filepath in attachment_filepaths: attachment_filepath = os.path.expanduser(attachment_filepath.strip()) if not attachment_filepath: continue if not os.path.isabs(attachment_filepath): # Relative paths are relative to the template's parent directory attachment_filepath = os.path.join(template_parent_dir, attachment_filepath) normalized_path = os.path.abspath(attachment_filepath) # Check that the attachment exists if not os.path.exists(normalized_path): print("Error: can't find attachment " + normalized_path) sys.exit(1) filename = os.path.basename(normalized_path) with open(normalized_path, "rb") as attachment: part = email.mime.application.MIMEApplication(attachment.read(), Name=filename) part.add_header('Content-Disposition', 'attachment; filename="{}"'.format(filename)) message.attach(part) print(">>> attached {}".format(normalized_path)) del message['attachment'] return message, len(attachment_filepaths)
Add the attachments from the message from the commandline options.
def protein_subsequences_around_mutations(effects, padding_around_mutation): """ From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets. """ protein_subsequences = {} protein_subsequence_start_offsets = {} for effect in effects: protein_sequence = effect.mutant_protein_sequence # some effects will lack a mutant protein sequence since # they are either silent or unpredictable if protein_sequence: mutation_start = effect.aa_mutation_start_offset mutation_end = effect.aa_mutation_end_offset seq_start_offset = max( 0, mutation_start - padding_around_mutation) # some pseudogenes have stop codons in the reference sequence, # if we try to use them for epitope prediction we should trim # the sequence to not include the stop character '*' first_stop_codon_index = protein_sequence.find("*") if first_stop_codon_index < 0: first_stop_codon_index = len(protein_sequence) seq_end_offset = min( first_stop_codon_index, mutation_end + padding_around_mutation) subsequence = protein_sequence[seq_start_offset:seq_end_offset] protein_subsequences[effect] = subsequence protein_subsequence_start_offsets[effect] = seq_start_offset return protein_subsequences, protein_subsequence_start_offsets
From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets.
def enterEvent(self, event): """ Reimplements the :meth:`QLabel.enterEvent` method. :param event: QEvent. :type event: QEvent """ if self.__checkable: not self.__checked and self.setPixmap(self.__hover_pixmap) else: self.setPixmap(self.__hover_pixmap)
Reimplements the :meth:`QLabel.enterEvent` method. :param event: QEvent. :type event: QEvent
def flags(self, index): """Override Qt method""" column = index.column() if index.isValid(): if column in [C.COL_START, C.COL_END]: # return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) return Qt.ItemFlags(Qt.ItemIsEnabled) else: return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) # return Qt.ItemFlags(Qt.ItemIsEnabled) else: return Qt.ItemFlags(Qt.ItemIsEnabled)
Override Qt method
def resolve_file_path_list(pathlist, workdir, prefix='', randomize=False): """Resolve the path of each file name in the file ``pathlist`` and write the updated paths to a new file. """ files = [] with open(pathlist, 'r') as f: files = [line.strip() for line in f] newfiles = [] for f in files: f = os.path.expandvars(f) if os.path.isfile(f): newfiles += [f] else: newfiles += [os.path.join(workdir, f)] if randomize: _, tmppath = tempfile.mkstemp(prefix=prefix, dir=workdir) else: tmppath = os.path.join(workdir, prefix) tmppath += '.txt' with open(tmppath, 'w') as tmpfile: tmpfile.write("\n".join(newfiles)) return tmppath
Resolve the path of each file name in the file ``pathlist`` and write the updated paths to a new file.
def fire_metric(metric_name, metric_value): """ Fires a metric using the MetricsApiClient """ metric_value = float(metric_value) metric = {metric_name: metric_value} metric_client.fire_metrics(**metric) return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
Fires a metric using the MetricsApiClient
def EXP_gas(self, base, exponent): """Calculate extra gas fee""" EXP_SUPPLEMENTAL_GAS = 10 # cost of EXP exponent per byte def nbytes(e): result = 0 for i in range(32): result = Operators.ITEBV(512, Operators.EXTRACT(e, i * 8, 8) != 0, i + 1, result) return result return EXP_SUPPLEMENTAL_GAS * nbytes(exponent)
Calculate extra gas fee
def _parse_alt_title(html_chunk): """ Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. """ title = html_chunk.find("img", fn=has_param("alt")) if not title: raise UserWarning("Can't find alternative title source!") return title[0].params["alt"].strip()
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
def nice_number(number, thousands_separator=',', max_ndigits_after_dot=None): """Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale(). """ if isinstance(number, float): if max_ndigits_after_dot is not None: number = round(number, max_ndigits_after_dot) int_part, frac_part = str(number).split('.') return '%s.%s' % (nice_number(int(int_part), thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number - i - 1]) chars_out.reverse() return ''.join(chars_out)
Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale().
def data_properties(data, mask=None, background=None): """ Calculate the morphological properties (and centroid) of a 2D array (e.g. an image cutout of an object) using image moments. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SourceProperties` instance A `~photutils.segmentation.SourceProperties` object. """ from ..segmentation import SourceProperties # prevent circular imports segment_image = np.ones(data.shape, dtype=np.int) return SourceProperties(data, segment_image, label=1, mask=mask, background=background)
Calculate the morphological properties (and centroid) of a 2D array (e.g. an image cutout of an object) using image moments. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SourceProperties` instance A `~photutils.segmentation.SourceProperties` object.
def set_video_crop(self, x1, y1, x2, y2): """ Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px) """ crop = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2)) self._player_interface.SetVideoCropPos(ObjectPath('/not/used'), String(crop))
Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px)
def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)): r""" Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image. """ return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing)
r""" Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image.
def param_map_rc_encode(self, target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max): ''' Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float) ''' return MAVLink_param_map_rc_message(target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max)
Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float)
def transmute_sites( self, old_site_label, new_site_label, n_sites_to_change ): """ Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None """ selected_sites = self.select_sites( old_site_label ) for site in random.sample( selected_sites, n_sites_to_change ): site.label = new_site_label self.site_labels = set( [ site.label for site in self.sites ] )
Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None
def predict(self, x, distributed=True): """ Use a model to do prediction. # Arguments x: Input data. A Numpy array or RDD of Sample. distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array. """ if is_distributed: if isinstance(x, np.ndarray): features = to_sample_rdd(x, np.zeros([x.shape[0]])) elif isinstance(x, RDD): features = x else: raise TypeError("Unsupported prediction data type: %s" % type(x)) return self.predict_distributed(features) else: if isinstance(x, np.ndarray): return self.predict_local(x) else: raise TypeError("Unsupported prediction data type: %s" % type(x))
Use a model to do prediction. # Arguments x: Input data. A Numpy array or RDD of Sample. distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array.
def make_graph(pkg): """Returns a dictionary of information about pkg & its recursive deps. Given a string, which can be parsed as a requirement specifier, return a dictionary where each key is the name of pkg or one of its recursive dependencies, and each value is a dictionary returned by research_package. (No, it's not really a graph.) """ ignore = ['argparse', 'pip', 'setuptools', 'wsgiref'] pkg_deps = recursive_dependencies(pkg_resources.Requirement.parse(pkg)) dependencies = {key: {} for key in pkg_deps if key not in ignore} installed_packages = pkg_resources.working_set versions = {package.key: package.version for package in installed_packages} for package in dependencies: try: dependencies[package]['version'] = versions[package] except KeyError: warnings.warn("{} is not installed so we cannot compute " "resources for its dependencies.".format(package), PackageNotInstalledWarning) dependencies[package]['version'] = None for package in dependencies: package_data = research_package(package, dependencies[package]['version']) dependencies[package].update(package_data) return OrderedDict( [(package, dependencies[package]) for package in sorted(dependencies.keys())] )
Returns a dictionary of information about pkg & its recursive deps. Given a string, which can be parsed as a requirement specifier, return a dictionary where each key is the name of pkg or one of its recursive dependencies, and each value is a dictionary returned by research_package. (No, it's not really a graph.)
def start(self, ccallbacks=None): """Establish and maintain connections.""" self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks) self.__ready_ev.wait()
Establish and maintain connections.
def add_resolved_requirements(self, reqs, platforms=None): """Multi-platform dependency resolution for PEX files. :param builder: Dump the requirements into this builder. :param interpreter: The :class:`PythonInterpreter` to resolve requirements for. :param reqs: A list of :class:`PythonRequirement` to resolve. :param log: Use this logger. :param platforms: A list of :class:`Platform`s to resolve requirements for. Defaults to the platforms specified by PythonSetup. """ distributions = self._resolve_distributions_by_platform(reqs, platforms=platforms) locations = set() for platform, dists in distributions.items(): for dist in dists: if dist.location not in locations: self._log.debug(' Dumping distribution: .../{}'.format(os.path.basename(dist.location))) self.add_distribution(dist) locations.add(dist.location)
Multi-platform dependency resolution for PEX files. :param builder: Dump the requirements into this builder. :param interpreter: The :class:`PythonInterpreter` to resolve requirements for. :param reqs: A list of :class:`PythonRequirement` to resolve. :param log: Use this logger. :param platforms: A list of :class:`Platform`s to resolve requirements for. Defaults to the platforms specified by PythonSetup.
def get_code(self, *args, **kwargs): """ get the python source code from callback """ # FIXME: Honestly should allow multiple commands callback = self._commands[args[0]] # TODO: syntax color would be nice source = _inspect.getsourcelines(callback)[0] """ source_len = len(source) source = PygmentsLexer(CythonLexer).lex_document(source)() """ # FIXME: formatting sucks return "\n" + "".join(source)
get the python source code from callback
def _check_table(self): """Ensure that an incorrect table doesn't exist If a bad (old) table does exist, return False """ cursor = self._db.execute("PRAGMA table_info(%s)"%self.table) lines = cursor.fetchall() if not lines: # table does not exist return True types = {} keys = [] for line in lines: keys.append(line[1]) types[line[1]] = line[2] if self._keys != keys: # key mismatch self.log.warn('keys mismatch') return False for key in self._keys: if types[key] != self._types[key]: self.log.warn( 'type mismatch: %s: %s != %s'%(key,types[key],self._types[key]) ) return False return True
Ensure that an incorrect table doesn't exist If a bad (old) table does exist, return False
def db_type(self, connection): """ The type of the field to insert into the database. """ conn_module = type(connection).__module__ if "mysql" in conn_module: return "bigint AUTO_INCREMENT" elif "postgres" in conn_module: return "bigserial" return super(BigAutoField, self).db_type(connection)
The type of the field to insert into the database.
def build(self): """Only build and create Slackware package """ pkg_security([self.name]) self.error_uns() if self.FAULT: print("") self.msg.template(78) print("| Package {0} {1} {2} {3}".format(self.prgnam, self.red, self.FAULT, self.endc)) self.msg.template(78) else: sources = [] if not os.path.exists(self.meta.build_path): os.makedirs(self.meta.build_path) if not os.path.exists(self._SOURCES): os.makedirs(self._SOURCES) os.chdir(self.meta.build_path) Download(self.meta.build_path, self.sbo_dwn.split(), repo="sbo").start() Download(self._SOURCES, self.source_dwn, repo="sbo").start() script = self.sbo_dwn.split("/")[-1] for src in self.source_dwn: sources.append(src.split("/")[-1]) BuildPackage(script, sources, self.meta.build_path, auto=False).build() slack_package(self.prgnam)
Only build and create Slackware package
def detect_voice(self, prob_detect_voice=0.5): """ Returns self as a list of tuples: [('v', voiced segment), ('u', unvoiced segment), (etc.)] The overall order of the AudioSegment is preserved. :param prob_detect_voice: The raw probability that any random 20ms window of the audio file contains voice. :returns: The described list. """ assert self.frame_rate in (48000, 32000, 16000, 8000), "Try resampling to one of the allowed frame rates." assert self.sample_width == 2, "Try resampling to 16 bit." assert self.channels == 1, "Try resampling to one channel." class model_class: def __init__(self, aggressiveness): self.v = webrtcvad.Vad(int(aggressiveness)) def predict(self, vector): if self.v.is_speech(vector.raw_data, vector.frame_rate): return 1 else: return 0 model = model_class(aggressiveness=2) pyesno = 0.3 # Probability of the next 20 ms being unvoiced given that this 20 ms was voiced pnoyes = 0.2 # Probability of the next 20 ms being voiced given that this 20 ms was unvoiced p_realyes_outputyes = 0.4 # WebRTCVAD has a very high FP rate - just because it says yes, doesn't mean much p_realyes_outputno = 0.05 # If it says no, we can be very certain that it really is a no p_yes_raw = prob_detect_voice filtered = self.detect_event(model=model, ms_per_input=20, transition_matrix=(pyesno, pnoyes), model_stats=(p_realyes_outputyes, p_realyes_outputno), event_length_s=0.25, prob_raw_yes=p_yes_raw) ret = [] for tup in filtered: t = ('v', tup[1]) if tup[0] == 'y' else ('u', tup[1]) ret.append(t) return ret
Returns self as a list of tuples: [('v', voiced segment), ('u', unvoiced segment), (etc.)] The overall order of the AudioSegment is preserved. :param prob_detect_voice: The raw probability that any random 20ms window of the audio file contains voice. :returns: The described list.
def redis_key(cls, key): """ Get the key we pass to redis. If no namespace is declared, it will use the class name. :param key: str the name of the redis key :return: str """ keyspace = cls.keyspace tpl = cls.keyspace_template key = "%s" % key if keyspace is None else tpl % (keyspace, key) return cls.keyparse.encode(key)
Get the key we pass to redis. If no namespace is declared, it will use the class name. :param key: str the name of the redis key :return: str
def previous(self, day_of_week=None): """ Modify to the previous occurrence of a given day of the week. If no day_of_week is provided, modify to the previous occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :param day_of_week: The previous day of week to reset to. :type day_of_week: int or None :rtype: Date """ if day_of_week is None: day_of_week = self.day_of_week if day_of_week < SUNDAY or day_of_week > SATURDAY: raise ValueError("Invalid day of week") dt = self.subtract(days=1) while dt.day_of_week != day_of_week: dt = dt.subtract(days=1) return dt
Modify to the previous occurrence of a given day of the week. If no day_of_week is provided, modify to the previous occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :param day_of_week: The previous day of week to reset to. :type day_of_week: int or None :rtype: Date
def cropped(self, t0, t1): """returns a cropped copy of this segment which starts at self.point(t0) and ends at self.point(t1).""" if abs(self.delta*(t1 - t0)) <= 180: new_large_arc = 0 else: new_large_arc = 1 return Arc(self.point(t0), radius=self.radius, rotation=self.rotation, large_arc=new_large_arc, sweep=self.sweep, end=self.point(t1), autoscale_radius=self.autoscale_radius)
returns a cropped copy of this segment which starts at self.point(t0) and ends at self.point(t1).
def add_name(self, tax_id, tax_name, source_name=None, source_id=None, name_class='synonym', is_primary=False, is_classified=None, execute=True, **ignored): """Add a record to the names table corresponding to ``tax_id``. Arguments are as follows: - tax_id (string, required) - tax_name (string, required) *one* of the following are required: - source_id (int or string coercable to int) - source_name (string) ``source_id`` or ``source_name`` must identify an existing record in table "source". The following are optional: - name_class (string, default 'synonym') - is_primary (bool, see below) - is_classified (bool or None, default None) ``is_primary`` is optional and defaults to True if only one name is provided; otherwise is_primary must be True for exactly one name (and is optional in others). """ assert isinstance(is_primary, bool) assert is_classified in {None, True, False} if ignored: log.info('some arguments were ignored: {} '.format(str(ignored))) source_id = self.get_source(source_id, source_name)['id'] statements = [] if is_primary: statements.append(self.names.update( whereclause=self.names.c.tax_id == tax_id, values={'is_primary': False})) statements.append(self.names.insert().values( tax_id=tax_id, tax_name=tax_name, source_id=source_id, is_primary=is_primary, name_class=name_class, is_classified=is_classified)) if execute: self.execute(statements) else: return statements
Add a record to the names table corresponding to ``tax_id``. Arguments are as follows: - tax_id (string, required) - tax_name (string, required) *one* of the following are required: - source_id (int or string coercable to int) - source_name (string) ``source_id`` or ``source_name`` must identify an existing record in table "source". The following are optional: - name_class (string, default 'synonym') - is_primary (bool, see below) - is_classified (bool or None, default None) ``is_primary`` is optional and defaults to True if only one name is provided; otherwise is_primary must be True for exactly one name (and is optional in others).
def play(self, sgffile): "Play a game" global verbose if verbose >= 1: print "Setting boardsize and komi for black\n" self.blackplayer.boardsize(self.size) self.blackplayer.komi(self.komi) if verbose >= 1: print "Setting boardsize and komi for white\n" self.whiteplayer.boardsize(self.size) self.whiteplayer.komi(self.komi) self.handicap_stones = [] if self.endgamefile == "": if self.handicap < 2: self.first_to_play = "B" else: self.handicap_stones = self.blackplayer.handicap(self.handicap, self.handicap_type) for stone in self.handicap_stones: self.whiteplayer.black(stone) self.first_to_play = "W" else: self.blackplayer.loadsgf(self.endgamefile, self.endgame_start) self.blackplayer.set_random_seed("0") self.whiteplayer.loadsgf(self.endgamefile, self.endgame_start) self.whiteplayer.set_random_seed("0") if self.blackplayer.is_known_command("list_stones"): self.get_position_from_engine(self.blackplayer) elif self.whiteplayer.is_known_command("list_stones"): self.get_position_from_engine(self.whiteplayer) to_play = self.first_to_play self.moves = [] passes = 0 won_by_resignation = "" while passes < 2: if to_play == "B": move = self.blackplayer.genmove("black") if move[:5] == "ERROR": # FIXME: write_sgf sys.exit(1) if move[:6] == "resign": if verbose >= 1: print "Black resigns" won_by_resignation = "W+Resign" break else: self.moves.append(move) if string.lower(move[:4]) == "pass": passes = passes + 1 if verbose >= 1: print "Black passes" else: passes = 0 self.whiteplayer.black(move) if verbose >= 1: print "Black plays " + move to_play = "W" else: move = self.whiteplayer.genmove("white") if move[:5] == "ERROR": # FIXME: write_sgf sys.exit(1) if move[:6] == "resign": if verbose >= 1: print "White resigns" won_by_resignation = "B+Resign" break else: self.moves.append(move) if string.lower(move[:4]) == "pass": passes = passes + 1 if verbose >= 1: print "White passes" else: passes = 0 self.blackplayer.white(move) if verbose >= 1: print "White plays " + move to_play = "B" if verbose >= 2: print self.whiteplayer.showboard() + "\n" if won_by_resignation == "": self.resultw = self.whiteplayer.final_score() self.resultb = self.blackplayer.final_score() else: self.resultw = won_by_resignation; self.resultb = won_by_resignation; # if self.resultb == self.resultw: # print "Result: ", self.resultw # else: # print "Result according to W: ", self.resultw # print "Result according to B: ", self.resultb # FIXME: $self->writesgf($sgffile) if defined $sgffile; if sgffile != "": self.writesgf(sgffile)
Play a game
def apply_range_set(self, hist: Hist) -> None: """ Apply the associated range set to the axis of a given hist. Note: The min and max values should be bins, not user ranges! For more, see the binning explanation in ``apply_func_to_find_bin(...)``. Args: hist: Histogram to which the axis range restriction should be applied. Returns: None. The range is set on the axis. """ # Do individual assignments to clarify which particular value is causing an error here. axis = self.axis(hist) #logger.debug(f"axis: {axis}, axis(): {axis.GetName()}") # Help out mypy assert not isinstance(self.min_val, float) assert not isinstance(self.max_val, float) # Evaluate the functions to determine the values. min_val = self.min_val(axis) max_val = self.max_val(axis) # NOTE: Using SetRangeUser() here was a bug, since I've been passing bin values! In general, # passing bin values is more flexible, but requires the values to be passed to # ``apply_func_to_find_bin()`` to be shifted by some small epsilon to get the desired bin. self.axis(hist).SetRange(min_val, max_val)
Apply the associated range set to the axis of a given hist. Note: The min and max values should be bins, not user ranges! For more, see the binning explanation in ``apply_func_to_find_bin(...)``. Args: hist: Histogram to which the axis range restriction should be applied. Returns: None. The range is set on the axis.
def _get_support_sound_mode(self): """ Get if sound mode is supported from device. Method executes the method for the current receiver type. """ if self._receiver_type == AVR_X_2016.type: return self._get_support_sound_mode_avr_2016() else: return self._get_support_sound_mode_avr()
Get if sound mode is supported from device. Method executes the method for the current receiver type.
def delete_lambda_deprecated(awsclient, function_name, s3_event_sources=[], time_event_sources=[], delete_logs=False): # FIXME: mutable default arguments! """Deprecated: please use delete_lambda! :param awsclient: :param function_name: :param s3_event_sources: :param time_event_sources: :param delete_logs: :return: exit_code """ unwire_deprecated(awsclient, function_name, s3_event_sources=s3_event_sources, time_event_sources=time_event_sources, alias_name=ALIAS_NAME) client_lambda = awsclient.get_client('lambda') response = client_lambda.delete_function(FunctionName=function_name) if delete_logs: log_group_name = '/aws/lambda/%s' % function_name delete_log_group(awsclient, log_group_name) # TODO remove event source first and maybe also needed for permissions log.info(json2table(response)) return 0
Deprecated: please use delete_lambda! :param awsclient: :param function_name: :param s3_event_sources: :param time_event_sources: :param delete_logs: :return: exit_code
def gblocks(self, new_path = None, seq_type = 'nucl' or 'prot'): """Apply the gblocks filtering algorithm to the alignment. See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html Need to rename all sequences, because it will complain with long names.""" # Temporary path # if new_path is None: final = self.__class__(new_temp_path()) else: final = self.__class__(new_path) # Mapping every sequence name with a random name # orig_name_to_temp = {seq.description: 'name' + str(i) for i,seq in enumerate(self)} temp_name_to_orig = {v: k for k, v in orig_name_to_temp.items()} # Rename every sequence with a random name # temp_fasta = self.rename_sequences(orig_name_to_temp) # Options # if seq_type == 'nucl': t_option = "-t=d" if seq_type == 'prot': t_option = "-t=p" # Run it # result = sh.gblocks91(temp_fasta.path, t_option, '-p=n', "-b4=3", "-b3=20", "-b5=a", _ok_code=[0,1]) created_file = temp_fasta.path + '-gb' assert os.path.exists(created_file) # Check errors # if "Execution terminated" in result.stdout: raise Exception("gblocks crashed again.") # Back # temp_fasta.rename_sequences(temp_name_to_orig, final) # Return # return final
Apply the gblocks filtering algorithm to the alignment. See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html Need to rename all sequences, because it will complain with long names.
def next(self): """ Handles the iteration by pulling the next line out of the stream, attempting to convert the response to JSON if necessary. :returns: Data representing what was seen in the feed """ while True: if not self._resp: self._start() if self._stop: raise StopIteration skip, data = self._process_data(next_(self._lines)) if not skip: break return data
Handles the iteration by pulling the next line out of the stream, attempting to convert the response to JSON if necessary. :returns: Data representing what was seen in the feed
def getobjectsize(self, window_name, object_name=None): """ Get object size @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: x, y, width, height on success. @rtype: list """ if not object_name: handle, name, app = self._get_window_handle(window_name) else: handle = self._get_object_handle(window_name, object_name) return self._getobjectsize(handle)
Get object size @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: x, y, width, height on success. @rtype: list
def revert(self, revision_id): """Revert the record to a specific revision. #. Send a signal :data:`invenio_records.signals.before_record_revert` with the current record as parameter. #. Revert the record to the revision id passed as parameter. #. Send a signal :data:`invenio_records.signals.after_record_revert` with the reverted record as parameter. :param revision_id: Specify the record revision id :returns: The :class:`Record` instance corresponding to the revision id """ if self.model is None: raise MissingModelError() revision = self.revisions[revision_id] with db.session.begin_nested(): before_record_revert.send( current_app._get_current_object(), record=self ) self.model.json = dict(revision) db.session.merge(self.model) after_record_revert.send( current_app._get_current_object(), record=self ) return self.__class__(self.model.json, model=self.model)
Revert the record to a specific revision. #. Send a signal :data:`invenio_records.signals.before_record_revert` with the current record as parameter. #. Revert the record to the revision id passed as parameter. #. Send a signal :data:`invenio_records.signals.after_record_revert` with the reverted record as parameter. :param revision_id: Specify the record revision id :returns: The :class:`Record` instance corresponding to the revision id
def potential_from_grid(self, grid): """ Calculate the potential at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid, args=(self.axis_ratio, self.kappa_s, self.scale_radius), epsrel=1.49e-5)[0] return potential_grid
Calculate the potential at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on.
def add(self, labels, value): """Add adds a single observation to the summary.""" if type(value) not in (float, int): raise TypeError("Summary only works with digits (int, float)") # We have already a lock for data but not for the estimator with mutex: try: e = self.get_value(labels) except KeyError: # Initialize quantile estimator e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS) self.set_value(labels, e) e.observe(float(value))
Add adds a single observation to the summary.
def solution_to_array(solution, events, slots): """Convert a schedule from solution to array form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For For 3 events, 7 slots and the solution:: [(0, 1), (1, 4), (2, 5)] The resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]] """ array = np.zeros((len(events), len(slots)), dtype=np.int8) for item in solution: array[item[0], item[1]] = 1 return array
Convert a schedule from solution to array form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For For 3 events, 7 slots and the solution:: [(0, 1), (1, 4), (2, 5)] The resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]]
def amplification_type(self, channels=None): """ Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear. """ # Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._amplification_type[ch] for ch in channels] else: return self._amplification_type[channels]
Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear.
def get_stats_item(self, item): """Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...) """ if isinstance(self.stats, dict): try: return self._json_dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {} ({})".format(item, e)) return None elif isinstance(self.stats, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list # But https://github.com/nicolargo/glances/issues/1401 return self._json_dumps({item: list(map(itemgetter(item), self.stats))}) except (KeyError, ValueError) as e: logger.error("Cannot get item {} ({})".format(item, e)) return None else: return None
Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...)
def print_table(self, stream=sys.stdout, filter_function=None): """ A pretty ASCII printer for the periodic table, based on some filter_function. Args: stream: file-like object filter_function: A filtering function that take a Pseudo as input and returns a boolean. For example, setting filter_function = lambda p: p.Z_val > 2 will print a periodic table containing only pseudos with Z_val > 2. """ print(self.to_table(filter_function=filter_function), file=stream)
A pretty ASCII printer for the periodic table, based on some filter_function. Args: stream: file-like object filter_function: A filtering function that take a Pseudo as input and returns a boolean. For example, setting filter_function = lambda p: p.Z_val > 2 will print a periodic table containing only pseudos with Z_val > 2.
def bfs(self, root = None, display = None): ''' API: bfs(self, root = None, display = None) Description: Searches tree starting from node named root using breadth-first strategy if root argument is provided. Starts search from root node of the tree otherwise. Pre: Node indicated by root argument should exist. Input: root: Starting node name. display: Display argument. ''' if root == None: root = self.root if display == None: display = self.attr['display'] self.traverse(root, display, Queue())
API: bfs(self, root = None, display = None) Description: Searches tree starting from node named root using breadth-first strategy if root argument is provided. Starts search from root node of the tree otherwise. Pre: Node indicated by root argument should exist. Input: root: Starting node name. display: Display argument.
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ Y = np.array(self.data[self.max_lag:]) sample = np.random.choice(len(Y), mini_batch, replace=False) Y = Y[sample] X = self.X[:, sample] # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) return neural_network_tanh_mb(Y, X, z, self.units, self.layers, self.ar+len(self.X_names)), Y
Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags)
def iter_node(node, name='', unknown=None, # Runtime optimization list=list, getattr=getattr, isinstance=isinstance, enumerate=enumerate, missing=NonExistent): """Iterates over an object: - If the object has a _fields attribute, it gets attributes in the order of this and returns name, value pairs. - Otherwise, if the object is a list instance, it returns name, value pairs for each item in the list, where the name is passed into this function (defaults to blank). - Can update an unknown set with information about attributes that do not exist in fields. """ fields = getattr(node, '_fields', None) if fields is not None: for name in fields: value = getattr(node, name, missing) if value is not missing: yield value, name if unknown is not None: unknown.update(set(vars(node)) - set(fields)) elif isinstance(node, list): for value in node: yield value, name
Iterates over an object: - If the object has a _fields attribute, it gets attributes in the order of this and returns name, value pairs. - Otherwise, if the object is a list instance, it returns name, value pairs for each item in the list, where the name is passed into this function (defaults to blank). - Can update an unknown set with information about attributes that do not exist in fields.
def name(self): """The process name.""" name = self._platform_impl.get_process_name() if os.name == 'posix': # On UNIX the name gets truncated to the first 15 characters. # If it matches the first part of the cmdline we return that # one instead because it's usually more explicative. # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". try: cmdline = self.cmdline except AccessDenied: pass else: if cmdline: extended_name = os.path.basename(cmdline[0]) if extended_name.startswith(name): name = extended_name # XXX - perhaps needs refactoring self._platform_impl._process_name = name return name
The process name.
def create_api_handler(self): """ Creates an api handler and sets it on self """ try: self.github = github3.login(username=config.data['gh_user'], password=config.data['gh_password']) except KeyError as e: raise config.NotConfigured(e) logger.info("ratelimit remaining: {}".format(self.github.ratelimit_remaining)) if hasattr(self.github, 'set_user_agent'): self.github.set_user_agent('{}: {}'.format(self.org_name, self.org_homepage)) try: self.org = self.github.organization(self.org_name) except github3.GitHubError: logger.error("Possibly the github ratelimit has been exceeded") logger.info("ratelimit: " + str(self.github.ratelimit_remaining))
Creates an api handler and sets it on self
def _find_free_location(self, free_locations, required_sectors=1, preferred=None): """ Given a list of booleans, find a list of <required_sectors> consecutive True values. If no such list is found, return length(free_locations). Assumes first two values are always False. """ # check preferred (current) location if preferred and all(free_locations[preferred:preferred+required_sectors]): return preferred # check other locations # Note: the slicing may exceed the free_location boundary. # This implementation relies on the fact that slicing will work anyway, # and the any() function returns True for an empty list. This ensures # that blocks outside the file are considered Free as well. i = 2 # First two sectors are in use by the header while i < len(free_locations): if all(free_locations[i:i+required_sectors]): break i += 1 return i
Given a list of booleans, find a list of <required_sectors> consecutive True values. If no such list is found, return length(free_locations). Assumes first two values are always False.
def on_path(self, new): """ Handle the file path changing. """ self.name = basename(new) self.graph = self.editor_input.load()
Handle the file path changing.
def rpc_call(self, request, method=None, **payload): """ Call REST API with RPC force. return object: a result """ if not method or self.separator not in method: raise AssertionError("Wrong method name: {0}".format(method)) resource_name, method = method.split(self.separator, 1) if resource_name not in self.api.resources: raise AssertionError("Unknown method " + method) data = QueryDict('', mutable=True) data.update(payload.get('data', dict())) data['callback'] = payload.get('callback') or request.GET.get( 'callback') or request.GET.get('jsonp') or 'callback' for h, v in payload.get('headers', dict()).iteritems(): request.META["HTTP_%s" % h.upper().replace('-', '_')] = v request.POST = request.PUT = request.GET = data delattr(request, '_request') request.method = method.upper() request.META['CONTENT_TYPE'] = 'application/x-www-form-urlencoded' params = payload.pop('params', dict()) response = self.api.call(resource_name, request, **params) if not isinstance(response, SerializedHttpResponse): return response if response['Content-type'] in self._meta.emitters_dict: return HttpResponse(response.content, status=response.status_code) if response.status_code == 200: return response.response raise AssertionError(response.response)
Call REST API with RPC force. return object: a result
def merge_dicts(*dict_list): """Extract all of the dictionaries from this list, then merge them together """ # if not isinstance(dict_list, list): # raise TypeError("dict_list is not a list. Please try again") # print(dict_list) all_dicts = [] for ag in dict_list: if isinstance(ag, dict): all_dicts.append(ag) # Get all dictionaries # Merge them # Check if type is there try: qitem = { k: v for d in all_dicts for k, v in d.items() } return qitem except Exception: return {}
Extract all of the dictionaries from this list, then merge them together
def copy_files(self): """ Copy the LICENSE and CONTRIBUTING files to each folder repo Generate covers if needed. Dump the metadata. """ files = [u'LICENSE', u'CONTRIBUTING.rst'] this_dir = dirname(abspath(__file__)) for _file in files: sh.cp( '{0}/templates/{1}'.format(this_dir, _file), '{0}/'.format(self.book.local_path) ) # copy metadata rdf file if self.book.meta.rdf_path: # if None, meta is from yaml file sh.cp( self.book.meta.rdf_path, '{0}/'.format(self.book.local_path) ) if 'GITenberg' not in self.book.meta.subjects: if not self.book.meta.subjects: self.book.meta.metadata['subjects'] = [] self.book.meta.metadata['subjects'].append('GITenberg') self.save_meta()
Copy the LICENSE and CONTRIBUTING files to each folder repo Generate covers if needed. Dump the metadata.
def decode_nibbles(value): """ The inverse of the Hex Prefix function """ nibbles_with_flag = bytes_to_nibbles(value) flag = nibbles_with_flag[0] needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1} is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1} if is_odd_length: raw_nibbles = nibbles_with_flag[1:] else: raw_nibbles = nibbles_with_flag[2:] if needs_terminator: nibbles = add_nibbles_terminator(raw_nibbles) else: nibbles = raw_nibbles return nibbles
The inverse of the Hex Prefix function
def notify_created(room, event, user): """Notifies about the creation of a chatroom. :param room: the chatroom :param event: the event :param user: the user performing the action """ tpl = get_plugin_template_module('emails/created.txt', chatroom=room, event=event, user=user) _send(event, tpl)
Notifies about the creation of a chatroom. :param room: the chatroom :param event: the event :param user: the user performing the action
def validate_deprecation_semver(version_string, version_description): """Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid. """ if version_string is None: raise MissingSemanticVersionError('The {} must be provided.'.format(version_description)) if not isinstance(version_string, six.string_types): raise BadSemanticVersionError('The {} must be a version string.'.format(version_description)) try: # NB: packaging will see versions like 1.a.0 as 1a0, and are "valid" # We explicitly want our versions to be of the form x.y.z. v = Version(version_string) if len(v.base_version.split('.')) != 3: raise BadSemanticVersionError('The given {} is not a valid version: ' '{}'.format(version_description, version_string)) if not v.is_prerelease: raise NonDevSemanticVersionError('The given {} is not a dev version: {}\n' 'Features should generally be removed in the first `dev` release ' 'of a release cycle.'.format(version_description, version_string)) return v except InvalidVersion as e: raise BadSemanticVersionError('The given {} {} is not a valid version: ' '{}'.format(version_description, version_string, e))
Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid.