_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q38300
User.wrap_json
train
def wrap_json(cls, json): """Create a User instance for the given json :param json: the dict with the information of the user :type json: :class:`dict` | None :returns: the new user instance :rtype: :class:`User` :raises: None """ u = User(usertype=json['type'], name=json['name'], logo=json['logo'], twitchid=json['_id'], displayname=json['display_name'], bio=json['bio']) return u
python
{ "resource": "" }
q38301
_wrap_execute_after
train
def _wrap_execute_after(funcname): """Warp the given method, so it gets executed by the reactor Wrap a method of :data:`IRCCLient.out_connection`. The returned function should be assigned to a :class:`irc.client.SimpleIRCClient` class. :param funcname: the name of a :class:`irc.client.ServerConnection` method :type funcname: :class:`str` :returns: a new function, that executes the given one via :class:`irc.schedule.IScheduler.execute_after` :raises: None """ def method(self, *args, **kwargs): f = getattr(self.out_connection, funcname) p = functools.partial(f, *args, **kwargs) self.reactor.scheduler.execute_after(0, p) method.__name__ = funcname return method
python
{ "resource": "" }
q38302
Reactor.shutdown
train
def shutdown(self): """Disconnect all connections and end the loop :returns: None :rtype: None :raises: None """ log.debug('Shutting down %s' % self) self.disconnect_all() self._looping.clear()
python
{ "resource": "" }
q38303
Reactor3.server
train
def server(self, ): """Creates and returns a ServerConnection :returns: a server connection :rtype: :class:`connection.ServerConnection3` :raises: None """ c = connection.ServerConnection3(self) with self.mutex: self.connections.append(c) return c
python
{ "resource": "" }
q38304
init_run
train
def init_run(shell, no_daemon, daemon_options, daemon_outfile): """ Configure your shell. Add the following line in your shell RC file and then you are ready to go:: eval $(%(prog)s) To check if your shell is supported, simply run:: %(prog)s --no-daemon If you want to specify shell other than $SHELL, you can give --shell option:: eval $(%(prog)s --shell zsh) By default, this command also starts daemon in background to automatically index shell history records. To not start daemon, use --no-daemon option like this:: eval $(%(prog)s --no-daemon) To see the other methods to launch the daemon process, see ``rash daemon --help``. """ import sys from .__init__ import __version__ init_file = find_init(shell) if os.path.exists(init_file): sys.stdout.write(INIT_TEMPLATE.format( file=init_file, version=__version__)) else: raise RuntimeError( "Shell '{0}' is not supported.".format(shell_name(shell))) if not no_daemon: from .daemon import start_daemon_in_subprocess start_daemon_in_subprocess(daemon_options, daemon_outfile)
python
{ "resource": "" }
q38305
update_data
train
def update_data(func): """ Decorator to save data more easily. Use parquet as data format Args: func: function to load data from data source Returns: wrapped function """ default = dict([ (param.name, param.default) for param in inspect.signature(func).parameters.values() if param.default != getattr(inspect, '_empty') ]) @wraps(func) def wrapper(*args, **kwargs): default.update(kwargs) kwargs.update(default) cur_mod = sys.modules[func.__module__] logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream') root_path = cur_mod.DATA_PATH date_type = kwargs.pop('date_type', 'date') save_static = kwargs.pop('save_static', True) save_dynamic = kwargs.pop('save_dynamic', True) symbol = kwargs.get('symbol') file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type) d_file = cache_file(has_date=True, **file_kw) s_file = cache_file(has_date=False, **file_kw) cached = kwargs.pop('cached', False) if cached and save_static and files.exists(s_file): logger.info(f'Reading data from {s_file} ...') return pd.read_parquet(s_file) data = func(*args, **kwargs) if save_static: files.create_folder(s_file, is_file=True) save_data(data=data, file_fmt=s_file, append=False) logger.info(f'Saved data file to {s_file} ...') if save_dynamic: drop_dups = kwargs.pop('drop_dups', None) files.create_folder(d_file, is_file=True) save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups) logger.info(f'Saved data file to {d_file} ...') return data return wrapper
python
{ "resource": "" }
q38306
save_data
train
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs): """ Save data to file Args: data: pd.DataFrame file_fmt: data file format in terms of f-strings append: if append data to existing data drop_dups: list, drop duplicates in columns info: dict, infomation to be hashed and passed to f-strings **kwargs: additional parameters for f-strings Examples: >>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> # save_data( >>> # data, '{ROOT}/daily/{typ}.parq', >>> # ROOT='tests/data', typ='earnings' >>> # ) """ d_file = data_file(file_fmt=file_fmt, info=info, **kwargs) if append and files.exists(d_file): data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False)) if drop_dups is not None: data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True) if not data.empty: data.to_parquet(d_file) return data
python
{ "resource": "" }
q38307
data_file
train
def data_file(file_fmt, info=None, **kwargs): """ Data file name for given infomation Args: file_fmt: file format in terms of f-strings info: dict, to be hashed and then pass to f-string using 'hash_key' these info will also be passed to f-strings **kwargs: arguments for f-strings Returns: str: data file name """ if isinstance(info, dict): kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest() kwargs.update(info) return utils.fstr(fmt=file_fmt, **kwargs)
python
{ "resource": "" }
q38308
index_run
train
def index_run(record_path, keep_json, check_duplicate): """ Convert raw JSON records into sqlite3 DB. Normally RASH launches a daemon that takes care of indexing. See ``rash daemon --help``. """ from .config import ConfigStore from .indexer import Indexer cfstore = ConfigStore() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all()
python
{ "resource": "" }
q38309
DataLoggingService.list
train
def list(self): """ List all available data logging sessions """ # We have to open this queue before we make the request, to ensure we don't miss the response. queue = self._pebble.get_endpoint_queue(DataLogging) self._pebble.send_packet(DataLogging(data=DataLoggingReportOpenSessions(sessions=[]))) sessions = [] while True: try: result = queue.get(timeout=2).data except TimeoutError: break if isinstance(result, DataLoggingDespoolOpenSession): self._pebble.send_packet(DataLogging(data=DataLoggingACK( session_id=result.session_id))) sessions.append(result.__dict__) queue.close() return sessions
python
{ "resource": "" }
q38310
DataLoggingService.get_send_enable
train
def get_send_enable(self): """ Return true if sending of sessions is enabled on the watch """ # We have to open this queue before we make the request, to ensure we don't miss the response. queue = self._pebble.get_endpoint_queue(DataLogging) self._pebble.send_packet(DataLogging(data=DataLoggingGetSendEnableRequest())) enabled = False while True: result = queue.get().data if isinstance(result, DataLoggingGetSendEnableResponse): enabled = result.enabled break queue.close() return enabled
python
{ "resource": "" }
q38311
DataLoggingService.set_send_enable
train
def set_send_enable(self, setting): """ Set the send enable setting on the watch """ self._pebble.send_packet(DataLogging(data=DataLoggingSetSendEnable(enabled=setting)))
python
{ "resource": "" }
q38312
loglevel
train
def loglevel(level): """ Convert any representation of `level` to an int appropriately. :type level: int or str :rtype: int >>> loglevel('DEBUG') == logging.DEBUG True >>> loglevel(10) 10 >>> loglevel(None) Traceback (most recent call last): ... ValueError: None is not a proper log level. """ if isinstance(level, str): level = getattr(logging, level.upper()) elif isinstance(level, int): pass else: raise ValueError('{0!r} is not a proper log level.'.format(level)) return level
python
{ "resource": "" }
q38313
setup_daemon_log_file
train
def setup_daemon_log_file(cfstore): """ Attach file handler to RASH logger. :type cfstore: rash.config.ConfigStore """ level = loglevel(cfstore.daemon_log_level) handler = logging.FileHandler(filename=cfstore.daemon_log_path) handler.setLevel(level) logger.setLevel(level) logger.addHandler(handler)
python
{ "resource": "" }
q38314
gzip_encode
train
def gzip_encode(data): """data -> gzip encoded data Encode data using the gzip content encoding as described in RFC 1952 """ if not gzip: raise NotImplementedError f = StringIO.StringIO() gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) gzf.write(data) gzf.close() encoded = f.getvalue() f.close() return encoded
python
{ "resource": "" }
q38315
gzip_decode
train
def gzip_decode(data, max_decode=20971520): """gzip encoded data -> unencoded data Decode data using the gzip content encoding as described in RFC 1952 """ if not gzip: raise NotImplementedError f = StringIO.StringIO(data) gzf = gzip.GzipFile(mode="rb", fileobj=f) try: if max_decode < 0: # no limit decoded = gzf.read() else: decoded = gzf.read(max_decode + 1) except IOError: raise ValueError("invalid data") f.close() gzf.close() if max_decode >= 0 and len(decoded) > max_decode: raise ValueError("max gzipped payload length exceeded") return decoded
python
{ "resource": "" }
q38316
result_headers
train
def result_headers(context, cl): """ Generates the list column headers. """ for i, field_name in enumerate(cl.list_display): text, attr = label_for_field(field_name, cl.model, return_attr=True) if attr: # Potentially not sortable # if the field is the action checkbox: no sorting and special class if field_name == 'action_checkbox': yield { "text": text, "class_attrib": mark_safe(' class="action-checkbox-column"'), "sortable": False, } continue # if other_fields like Edit, Visualize, etc: # Not sortable # yield { # "text": text, # "class_attrib": format_html(' class="column-{0}"', field_name), # "sortable": False, # } # continue # OK, it is sortable if we got this far th_classes = ['sortable', 'column-{0}'.format(field_name)] ascending = None is_sorted = False # Is it currently being sorted on? if context.get('current_sort_field') == str(i + 1): is_sorted = True ascending = False th_classes.append('sorted descending') elif context.get('current_sort_field') == '-'+str(i + 1): is_sorted = True ascending = True th_classes.append('sorted ascending') if 'request' in context: url = context['request'].path else: url = "./" ### TODO: when start using action_checkbox use i instead of i + 1. This +1 is to correct enumerate index # builds url url += "?sort_by=" if ascending is False: url += "-" url += str(i + 1) if 'getsortvars' in context: extra_vars = context['getsortvars'] else: if 'request' in context: request = context['request'] getvars = request.GET.copy() if 'sort_by' in getvars: del getvars['sort_by'] if len(getvars.keys()) > 0: context['getsortvars'] = "&%s" % getvars.urlencode() else: context['getsortvars'] = '' extra_vars = context['getsortvars'] # append other vars to url url += extra_vars yield { "text": text, "url": url, "sortable": True, "sorted": is_sorted, "ascending": ascending, "class_attrib": format_html(' class="{0}"', ' '.join(th_classes)) if th_classes else '', }
python
{ "resource": "" }
q38317
Tag.from_str
train
def from_str(cls, tagstring): """Create a tag by parsing the tag of a message :param tagstring: A tag string described in the irc protocol :type tagstring: :class:`str` :returns: A tag :rtype: :class:`Tag` :raises: None """ m = cls._parse_regexp.match(tagstring) return cls(name=m.group('name'), value=m.group('value'), vendor=m.group('vendor'))
python
{ "resource": "" }
q38318
Emote.from_str
train
def from_str(cls, emotestr): """Create an emote from the emote tag key :param emotestr: the tag key, e.g. ``'123:0-4'`` :type emotestr: :class:`str` :returns: an emote :rtype: :class:`Emote` :raises: None """ emoteid, occstr = emotestr.split(':') occurences = [] for occ in occstr.split(','): start, end = occ.split('-') occurences.append((int(start), int(end))) return cls(int(emoteid), occurences)
python
{ "resource": "" }
q38319
Message3.from_event
train
def from_event(cls, event): """Create a message from an event :param event: the event that was received of type ``pubmsg`` or ``privmsg`` :type event: :class:`Event3` :returns: a message that resembles the event :rtype: :class:`Message3` :raises: None """ source = Chatter(event.source) return cls(source, event.target, event.arguments[0], event.tags)
python
{ "resource": "" }
q38320
Message3.set_tags
train
def set_tags(self, tags): """For every known tag, set the appropriate attribute. Known tags are: :color: The user color :emotes: A list of emotes :subscriber: True, if subscriber :turbo: True, if turbo user :user_type: None, mod, staff, global_mod, admin :param tags: a list of tags :type tags: :class:`list` of :class:`Tag` | None :returns: None :rtype: None :raises: None """ if tags is None: return attrmap = {'color': 'color', 'emotes': 'emotes', 'subscriber': 'subscriber', 'turbo': 'turbo', 'user-type': 'user_type'} for t in tags: attr = attrmap.get(t.name) if not attr: continue else: setattr(self, attr, t.value)
python
{ "resource": "" }
q38321
Message3.emotes
train
def emotes(self, emotes): """Set the emotes :param emotes: the key of the emotes tag :type emotes: :class:`str` :returns: None :rtype: None :raises: None """ if emotes is None: self._emotes = [] return es = [] for estr in emotes.split('/'): es.append(Emote.from_str(estr)) self._emotes = es
python
{ "resource": "" }
q38322
coerce_types
train
def coerce_types(T1, T2): """Coerce types T1 and T2 to a common type. Coercion is performed according to this table, where "N/A" means that a TypeError exception is raised. +----------+-----------+-----------+-----------+----------+ | | int | Fraction | Decimal | float | +----------+-----------+-----------+-----------+----------+ | int | int | Fraction | Decimal | float | | Fraction | Fraction | Fraction | N/A | float | | Decimal | Decimal | N/A | Decimal | float | | float | float | float | float | float | +----------+-----------+-----------+-----------+----------+ Subclasses trump their parent class; two subclasses of the same base class will be coerced to the second of the two. """ # Get the common/fast cases out of the way first. if T1 is T2: return T1 if T1 is int: return T2 if T2 is int: return T1 # Subclasses trump their parent class. if issubclass(T2, T1): return T2 if issubclass(T1, T2): return T1 # Floats trump everything else. if issubclass(T2, float): return T2 if issubclass(T1, float): return T1 # Subclasses of the same base class give priority to the second. if T1.__base__ is T2.__base__: return T2 # Otherwise, just give up. raise TypeError('cannot coerce types %r and %r' % (T1, T2))
python
{ "resource": "" }
q38323
median_low
train
def median_low(data): """Return the low median of numeric data. When the number of data points is odd, the middle value is returned. When it is even, the smaller of the two middle values is returned. """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") if n % 2 == 1: return data[n // 2] else: return data[n // 2 - 1]
python
{ "resource": "" }
q38324
median_high
train
def median_high(data): """Return the high median of data. When the number of data points is odd, the middle value is returned. When it is even, the larger of the two middle values is returned. """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") return data[n // 2]
python
{ "resource": "" }
q38325
variance
train
def variance(data, xbar=None): """Return the sample variance of data. data should be an iterable of Real-valued numbers, with at least two values. The optional argument xbar, if given, should be the mean of the data. If it is missing or None, the mean is automatically calculated. Use this function when your data is a sample from a population. To calculate the variance from the entire population, see ``pvariance``. If you have already calculated the mean of your data, you can pass it as the optional second argument ``xbar`` to avoid recalculating it: This function does not check that ``xbar`` is actually the mean of ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or impossible results. Decimals and Fractions are supported """ if iter(data) is data: data = list(data) n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') ss = _ss(data, xbar) return ss / (n - 1)
python
{ "resource": "" }
q38326
pvariance
train
def pvariance(data, mu=None): """Return the population variance of ``data``. data should be an iterable of Real-valued numbers, with at least one value. The optional argument mu, if given, should be the mean of the data. If it is missing or None, the mean is automatically calculated. Use this function to calculate the variance from the entire population. To estimate the variance from a sample, the ``variance`` function is usually a better choice. If you have already calculated the mean of the data, you can pass it as the optional second argument to avoid recalculating it: This function does not check that ``mu`` is actually the mean of ``data``. Giving arbitrary values for ``mu`` may lead to invalid or impossible results. Decimals and Fractions are supported: """ if iter(data) is data: data = list(data) n = len(data) if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) return ss / n
python
{ "resource": "" }
q38327
stdev
train
def stdev(data, xbar=None): """Return the square root of the sample variance. See ``variance`` for arguments and other details. """ var = variance(data, xbar) try: return var.sqrt() except AttributeError: return math.sqrt(var)
python
{ "resource": "" }
q38328
pstdev
train
def pstdev(data, mu=None): """Return the square root of the population variance. See ``pvariance`` for arguments and other details. """ var = pvariance(data, mu) try: return var.sqrt() except AttributeError: return math.sqrt(var)
python
{ "resource": "" }
q38329
geometric_mean
train
def geometric_mean(data): """Return the geometric mean of data """ if not data: raise StatisticsError('geometric_mean requires at least one data point') # in order to support negative or null values data = [x if x > 0 else math.e if x == 0 else 1.0 for x in data] return math.pow(math.fabs(functools.reduce(operator.mul, data)), 1.0 / len(data))
python
{ "resource": "" }
q38330
harmonic_mean
train
def harmonic_mean(data): """Return the harmonic mean of data """ if not data: raise StatisticsError('harmonic_mean requires at least one data point') divisor = sum(map(lambda x: 1.0 / x if x else 0.0, data)) return len(data) / divisor if divisor else 0.0
python
{ "resource": "" }
q38331
kurtosis
train
def kurtosis(data): """Return the kurtosis of the data's distribution """ if not data: raise StatisticsError('kurtosis requires at least one data point') size = len(data) sd = stdev(data) ** 4 if not sd: return 0.0 mn = mean(data) return sum(map(lambda x: ((x - mn) ** 4 / sd), data)) / size - 3
python
{ "resource": "" }
q38332
percentile
train
def percentile(data, n): """Return the n-th percentile of the given data Assume that the data are already sorted """ size = len(data) idx = (n / 100.0) * size - 0.5 if idx < 0 or idx > size: raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n)) return data[int(idx)]
python
{ "resource": "" }
q38333
get_histogram
train
def get_histogram(data): """Return the histogram relative to the given data Assume that the data are already sorted """ count = len(data) if count < 2: raise StatisticsError('Too few data points ({}) for get_histogram'.format(count)) min_ = data[0] max_ = data[-1] std = stdev(data) bins = get_histogram_bins(min_, max_, std, count) res = {x: 0 for x in bins} for value in data: for bin_ in bins: if value <= bin_: res[bin_] += 1 break return sorted(iteritems(res))
python
{ "resource": "" }
q38334
get_histogram_bins
train
def get_histogram_bins(min_, max_, std, count): """ Return optimal bins given the input parameters """ width = _get_bin_width(std, count) count = int(round((max_ - min_) / width) + 1) if count: bins = [i * width + min_ for i in xrange(1, count + 1)] else: bins = [min_] return bins
python
{ "resource": "" }
q38335
_get_bin_width
train
def _get_bin_width(stdev, count): """Return the histogram's optimal bin width based on Sturges http://www.jstor.org/pss/2965501 """ w = int(round((3.5 * stdev) / (count ** (1.0 / 3)))) if w: return w else: return 1
python
{ "resource": "" }
q38336
_longest_common_subsequence
train
def _longest_common_subsequence(x, y): """ Return the longest common subsequence between two sequences. Parameters ---------- x, y : sequence Returns ------- sequence Longest common subsequence of x and y. Examples -------- >>> _longest_common_subsequence("AGGTAB", "GXTXAYB") ['G', 'T', 'A', 'B'] >>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"], ... ["GA", "X", "T", "X", "A", "Y", "B"]) ['GA', 'T', 'A', 'B'] """ m = len(x) n = len(y) # L[i, j] will contain the length of the longest common subsequence of # x[0..i - 1] and y[0..j - 1]. L = _np.zeros((m + 1, n + 1), dtype=int) for i in range(m + 1): for j in range(n + 1): if i == 0 or j == 0: continue elif x[i - 1] == y[j - 1]: L[i, j] = L[i - 1, j - 1] + 1 else: L[i, j] = max(L[i - 1, j], L[i, j - 1]) ret = [] i, j = m, n while i > 0 and j > 0: # If current character in x and y are same, then current character is # part of the longest common subsequence. if x[i - 1] == y[j - 1]: ret.append(x[i - 1]) i, j = i - 1, j - 1 # If not same, then find the larger of two and go in the direction of # larger value. elif L[i - 1, j] > L[i, j - 1]: i -= 1 else: j -= 1 return ret[::-1]
python
{ "resource": "" }
q38337
tower_layout
train
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2): """ Position all nodes of graph stacked on top of each other. Parameters ---------- graph : `networkx.Graph` or `list` of nodes A position will be assigned to every node in graph. height : `str` or `None`, optional The node attribute that holds the numerical value used for the node height. This defaults to ``'freeenergy'``. If `None`, all node heights are set to zero. scale : number, optional Scale factor for positions. center : array-like, optional Coordinate pair around which to center the layout. Default is the origin. dim : `int` Dimension of layout. If `dim` > 2, the remaining dimensions are set to zero in the returned positions. Returns ------- pos : mapping A mapping of positions keyed by node. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> from pyrrole.drawing import tower_layout >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)")) >>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) ... .to_digraph()) >>> layout = tower_layout(digraph) >>> layout['AcOH(g)'] array([ 0. , -228.56450866]) Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes: >>> layout = tower_layout(digraph, scale=1) >>> layout['AcOH(g)'][1] <= 1. True """ # TODO: private function of packages should not be used. graph, center = _nx.drawing.layout._process_params(graph, center, dim) num_nodes = len(graph) if num_nodes == 0: return {} elif num_nodes == 1: return {_nx.utils.arbitrary_element(graph): center} paddims = max(0, (dim - 2)) if height is None: y = _np.zeros(len(graph)) else: y = _np.array([data for node, data in graph.nodes(data=height)]) pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y, _np.zeros((num_nodes, paddims))]) if scale is not None: pos_arr = _nx.drawing.layout.rescale_layout(pos_arr, scale=scale) + center pos = dict(zip(graph, pos_arr)) # TODO: make test return pos
python
{ "resource": "" }
q38338
diagram_layout
train
def diagram_layout(graph, height='freeenergy', sources=None, targets=None, pos=None, scale=None, center=None, dim=2): """ Position nodes such that paths are highlighted, from left to right. Parameters ---------- graph : `networkx.Graph` or `list` of nodes A position will be assigned to every node in graph. height : `str` or `None`, optional The node attribute that holds the numerical value used for the node height. This defaults to ``'freeenergy'``. If `None`, all node heights are set to zero. sources : `list` of `str` All simple paths starting at members of `sources` are considered. Defaults to all nodes of graph. targets : `list` of `str` All simple paths ending at members of `targets` are considered. Defaults to all nodes of graph. pos : mapping, optional Initial positions for nodes as a mapping with node as keys and values as a coordinate `list` or `tuple`. If not specified (default), initial positions are computed with `tower_layout`. scale : number, optional Scale factor for positions. center : array-like, optional Coordinate pair around which to center the layout. Default is the origin. dim : `int` Dimension of layout. If `dim` > 2, the remaining dimensions are set to zero in the returned positions. Returns ------- pos : mapping A mapping of positions keyed by node. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import diagram_layout >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> layout = diagram_layout(digraph) >>> layout['mCARB2'] array([ 3. , -19.8]) Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes: >>> layout = diagram_layout(digraph, scale=1) >>> layout['mTS1'][1] <= 1. True """ # TODO: private function of packages should not be used. graph, center = _nx.drawing.layout._process_params(graph, center, dim) num_nodes = len(graph) if num_nodes == 0: return {} elif num_nodes == 1: return {_nx.utils.arbitrary_element(graph): center} if sources is None: sources = graph.nodes() if targets is None: targets = graph.nodes() simple_paths = [path for source in set(sources) for target in set(targets) for path in _nx.all_simple_paths(graph, source, target)] if pos is None: pos = tower_layout(graph, height=height, scale=None, center=center, dim=dim) for path in simple_paths: for n, step in enumerate(path): if pos[step][0] < n: pos[step][0] = n if scale is not None: pos_arr = _np.array([pos[node] for node in graph]) pos_arr = _nx.drawing.layout.rescale_layout(pos_arr, scale=scale) + center pos = dict(zip(graph, pos_arr)) # TODO: make test return pos
python
{ "resource": "" }
q38339
draw_diagram_nodes
train
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7, node_color='k', style='solid', alpha=1.0, cmap=None, vmin=None, vmax=None, ax=None, label=None): """ Draw nodes of graph. This draws only the nodes of graph as horizontal lines at each ``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where ``x = pos[0]``. Parameters ---------- graph : `networkx.Graph` A NetworkX graph. pos : mapping, optional A mapping with nodes as keys and positions as values. Positions should be sequences of length 2. If not specified (default), a diagram layout positioning will be computed. See `networkx.layout` and `pyrrole.drawing` for functions that compute node positions. nodelist : `list`, optional Draw only specified nodes (default is ``graph.nodes()``). node_size : scalar or array Size of nodes (default is ``.7``). If an array is specified it must be the same length as nodelist. node_color : color `str`, or array of `float` Node color. Can be a single color format `str` (default is ``'k'``), or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the `cmap` and `vmin`, `vmax` parameters. See `matplotlib.hlines` for more details. style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``) Edge line style (default is ``'solid'``). See `matplotlib.hlines` for more details. alpha : `float` or array of `float`, optional The node transparency. This can be a single alpha value (default is ``'1.0'``), in which case it will be applied to all the nodes of color. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary). cmap : Matplotlib colormap, optional Colormap name or Colormap instance for mapping intensities of nodes. vmin : `float`, optional Minimum for node colormap scaling. vmax : `float`, optional Maximum for node colormap scaling. ax : `matplotlib.axes.Axes`, optional Draw the graph in the specified Matplotlib axes. label : `str`, optional Label for legend. Returns ------- `matplotlib.collections.LineCollection` `LineCollection` of the nodes. Raises ------ networkx.NetworkXError Raised if a node has no position or one with bad value. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import draw_diagram_nodes >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> nodes = draw_diagram_nodes(digraph) """ if ax is None: ax = _plt.gca() if nodelist is None: nodelist = list(graph.nodes()) if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing return None if pos is None: pos = diagram_layout(graph) try: xy = _np.asarray([pos[v] for v in nodelist]) except KeyError as e: raise _nx.NetworkXError('Node {} has no position.'.format(e)) except ValueError: raise _nx.NetworkXError('Bad value in node positions.') if isinstance(alpha, _collections.Iterable): node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax) alpha = None node_collection = ax.hlines(xy[:, 1], xy[:, 0] - node_size/2., xy[:, 0] + node_size/2., colors=node_color, linestyles=style, label=label, cmap=cmap) node_collection.set_zorder(2) return node_collection
python
{ "resource": "" }
q38340
draw_diagram_labels
train
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12, font_color='k', font_family='sans-serif', font_weight='normal', alpha=1.0, bbox=None, ax=None, offset=None, **kwds): """ Draw node labels of graph. This draws only the node labels of a graph. Parameters ---------- graph : `networkx.Graph` A NetworkX graph. pos : mapping, optional A mapping with nodes as keys and positions as values. Positions should be sequences of length 2. If not specified (default), a diagram layout positioning will be computed. See `networkx.layout` and `pyrrole.drawing` for functions that compute node positions. labels : mapping, optional Node labels in a mapping keyed by node of text labels. font_size : `int`, optional Font size for text labels (default is ``12``). font_color : `str`, optional Font color `str` (default is ``'k'``, i.e., black). font_family : `str`, optional Font family (default is ``'sans-serif'``). font_weight : `str`, optional Font weight (default is ``'normal'``). alpha : `float`, optional The text transparency (default is ``1.0``). ax : `matplotlib.axes.Axes`, optional Draw the graph in the specified Matplotlib axes. offset : array-like or `str`, optional Label positions are summed to this before drawing. Defaults to zero vector. If `str`, can be either ``'above'`` (equivalent to ``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``). Returns ------- mapping Mapping of labels keyed on the nodes. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import draw_diagram_labels >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> edges = draw_diagram_labels(digraph, font_color='blue', ... offset="below") >>> labels = {k: "{:g}".format(v) ... for k, v in digraph.nodes(data='freeenergy')} >>> edges = draw_diagram_labels(digraph, labels=labels, ... offset="above") """ if ax is None: ax = _plt.gca() if labels is None: labels = dict((n, n) for n in graph.nodes()) if pos is None: pos = diagram_layout(graph) if offset is None: offset = _np.array([0., 0.]) elif offset == "above": offset = _np.array([0., 1.5]) elif offset == "below": offset = _np.array([0., -1.5]) # set optional alignment horizontalalignment = kwds.get('horizontalalignment', 'center') verticalalignment = kwds.get('verticalalignment', 'center') text_items = {} # there is no text collection so we'll fake one for n, label in labels.items(): (x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset) if not isinstance(label, str): label = str(label) # this makes "1" and 1 labeled the same t = ax.text(x, y, label, size=font_size, color=font_color, family=font_family, weight=font_weight, alpha=alpha, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, transform=ax.transData, bbox=bbox, clip_on=True) text_items[n] = t return text_items
python
{ "resource": "" }
q38341
draw_diagram
train
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds): """ Draw a diagram for graph using Matplotlib. Draw graph as a simple energy diagram with Matplotlib with options for node positions, labeling, titles, and many other drawing features. See examples below. Parameters ---------- graph : `networkx.Graph` A NetworkX graph. pos : mapping, optional A mapping with nodes as keys and positions as values. Positions should be sequences of length 2. If not specified (default) a diagram layout positioning will be computed. See `networkx.drawing.layout` and `pyrrole.drawing` for functions that compute node positions. with_labels : `bool`, optional Set to `True` (default) to draw labels on the nodes. offset : array-like or `str`, optional Label positions are summed to this before drawing. Defaults to ``'below'``. See `draw_diagram_labels` for more. Notes ----- Further keywords are passed to `draw_diagram_nodes` and `draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called and have keywords passed as well. The same happens with `draw_diagram_labels` if `with_labels` is `True`. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import draw_diagram, draw_diagram_labels >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> draw_diagram(digraph) >>> labels = {k: "{:g}".format(v) ... for k, v in digraph.nodes(data='freeenergy')} >>> edges = draw_diagram_labels(digraph, labels=labels, ... offset="above") """ if pos is None: pos = diagram_layout(graph, **kwds) # default to diagram layout node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa if with_labels: if offset is None: # TODO: This changes the default behaviour of draw_diagram_labels. offset = "below" draw_diagram_labels(graph, pos, offset=offset, **kwds) _plt.draw_if_interactive()
python
{ "resource": "" }
q38342
PebblePacket.serialise
train
def serialise(self, default_endianness=None): """ Serialise a message, without including any framing. :param default_endianness: The default endianness, unless overridden by the fields or class metadata. Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and ``'>'`` for big endian. :type default_endianness: str :return: The serialised message. :rtype: bytes """ # Figure out an endianness. endianness = (default_endianness or DEFAULT_ENDIANNESS) if hasattr(self, '_Meta'): endianness = self._Meta.get('endianness', endianness) inferred_fields = set() for k, v in iteritems(self._type_mapping): inferred_fields |= {x._name for x in v.dependent_fields()} for field in inferred_fields: setattr(self, field, None) # Some fields want to manipulate other fields that appear before them (e.g. Unions) for k, v in iteritems(self._type_mapping): v.prepare(self, getattr(self, k)) message = b'' for k, v in iteritems(self._type_mapping): message += v.value_to_bytes(self, getattr(self, k), default_endianness=endianness) return message
python
{ "resource": "" }
q38343
PebblePacket.serialise_packet
train
def serialise_packet(self): """ Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet. ``self.Meta.endpoint`` must be defined to call this method. :return: A serialised message, ready to be sent to the Pebble. """ if not hasattr(self, '_Meta'): raise ReferenceError("Can't serialise a packet that doesn't have an endpoint ID.") serialised = self.serialise() return struct.pack('!HH', len(serialised), self._Meta['endpoint']) + serialised
python
{ "resource": "" }
q38344
expand_query
train
def expand_query(config, kwds): """ Expand `kwds` based on `config.search.query_expander`. :type config: .config.Configuration :type kwds: dict :rtype: dict :return: Return `kwds`, modified in place. """ pattern = [] for query in kwds.pop('pattern', []): expansion = config.search.alias.get(query) if expansion is None: pattern.append(query) else: parser = SafeArgumentParser() search_add_arguments(parser) ns = parser.parse_args(expansion) for (key, value) in vars(ns).items(): if isinstance(value, (list, tuple)): if not kwds.get(key): kwds[key] = value else: kwds[key].extend(value) else: kwds[key] = value kwds['pattern'] = pattern return config.search.kwds_adapter(kwds)
python
{ "resource": "" }
q38345
preprocess_kwds
train
def preprocess_kwds(kwds): """ Preprocess keyword arguments for `DataBase.search_command_record`. """ from .utils.timeutils import parse_datetime, parse_duration for key in ['output', 'format', 'format_level', 'with_command_id', 'with_session_id']: kwds.pop(key, None) for key in ['time_after', 'time_before']: val = kwds[key] if val: dt = parse_datetime(val) if dt: kwds[key] = dt for key in ['duration_longer_than', 'duration_less_than']: val = kwds[key] if val: dt = parse_duration(val) if dt: kwds[key] = dt # interpret "pattern" (currently just copying to --include-pattern) less_strict_pattern = list(map("*{0}*".format, kwds.pop('pattern', []))) kwds['match_pattern'] = kwds['match_pattern'] + less_strict_pattern if not kwds['sort_by']: kwds['sort_by'] = ['count'] kwds['sort_by'] = [SORT_KEY_SYNONYMS[k] for k in kwds['sort_by']] return kwds
python
{ "resource": "" }
q38346
__fix_args
train
def __fix_args(kwargs): """ Set all named arguments shortcuts and flags. """ kwargs.setdefault('fixed_strings', kwargs.get('F')) kwargs.setdefault('basic_regexp', kwargs.get('G')) kwargs.setdefault('extended_regexp', kwargs.get('E')) kwargs.setdefault('ignore_case', kwargs.get('i')) kwargs.setdefault('invert', kwargs.get('v')) kwargs.setdefault('words', kwargs.get('w')) kwargs.setdefault('line', kwargs.get('x')) kwargs.setdefault('count', kwargs.get('c')) kwargs.setdefault('max_count', kwargs.get('m')) kwargs.setdefault('after_context', kwargs.get('A')) kwargs.setdefault('before_context', kwargs.get('B')) kwargs.setdefault('quiet', kwargs.get('q')) kwargs.setdefault('byte_offset', kwargs.get('b')) kwargs.setdefault('only_matching', kwargs.get('o')) kwargs.setdefault('line_number', kwargs.get('n')) kwargs.setdefault('regex_flags', kwargs.get('r')) kwargs.setdefault('keep_eol', kwargs.get('k')) kwargs.setdefault('trim', kwargs.get('t'))
python
{ "resource": "" }
q38347
__process_line
train
def __process_line(line, strip_eol, strip): """ process a single line value. """ if strip: line = line.strip() elif strip_eol and line.endswith('\n'): line = line[:-1] return line
python
{ "resource": "" }
q38348
get_parser
train
def get_parser(commands): """ Generate argument parser given a list of subcommand specifications. :type commands: list of (str, function, function) :arg commands: Each element must be a tuple ``(name, adder, runner)``. :param name: subcommand :param adder: a function takes one object which is an instance of :class:`argparse.ArgumentParser` and add arguments to it :param runner: a function takes keyword arguments which must be specified by the arguments parsed by the parser defined by `adder`. Docstring of this function will be the description of the subcommand. """ parser = argparse.ArgumentParser( formatter_class=Formatter, description=__doc__, epilog=EPILOG, ) subparsers = parser.add_subparsers() for (name, adder, runner) in commands: subp = subparsers.add_parser( name, formatter_class=Formatter, description=runner.__doc__ and textwrap.dedent(runner.__doc__)) adder(subp) subp.set_defaults(func=runner) return parser
python
{ "resource": "" }
q38349
locate_run
train
def locate_run(output, target, no_newline): """ Print location of RASH related file. """ from .config import ConfigStore cfstore = ConfigStore() path = getattr(cfstore, "{0}_path".format(target)) output.write(path) if not no_newline: output.write("\n")
python
{ "resource": "" }
q38350
UserInteractionFragment.userBrowser
train
def userBrowser(self, request, tag): """ Render a TDB of local users. """ f = LocalUserBrowserFragment(self.browser) f.docFactory = webtheme.getLoader(f.fragmentName) f.setFragmentParent(self) return f
python
{ "resource": "" }
q38351
UserInteractionFragment.userCreate
train
def userCreate(self, request, tag): """ Render a form for creating new users. """ userCreator = liveform.LiveForm( self.createUser, [liveform.Parameter( "localpart", liveform.TEXT_INPUT, unicode, "localpart"), liveform.Parameter( "domain", liveform.TEXT_INPUT, unicode, "domain"), liveform.Parameter( "password", liveform.PASSWORD_INPUT, unicode, "password")]) userCreator.setFragmentParent(self) return userCreator
python
{ "resource": "" }
q38352
UserInteractionFragment.createUser
train
def createUser(self, localpart, domain, password=None): """ Create a new, blank user account with the given name and domain and, if specified, with the given password. @type localpart: C{unicode} @param localpart: The local portion of the username. ie, the C{'alice'} in C{'alice@example.com'}. @type domain: C{unicode} @param domain: The domain portion of the username. ie, the C{'example.com'} in C{'alice@example.com'}. @type password: C{unicode} or C{None} @param password: The password to associate with the new account. If C{None}, generate a new password automatically. """ loginSystem = self.browser.store.parent.findUnique(userbase.LoginSystem) if password is None: password = u''.join([random.choice(string.ascii_letters + string.digits) for i in xrange(8)]) loginSystem.addAccount(localpart, domain, password)
python
{ "resource": "" }
q38353
LocalUserBrowserFragment.doAction
train
def doAction(self, loginMethod, actionClass): """ Show the form for the requested action. """ loginAccount = loginMethod.account return actionClass( self, loginMethod.localpart + u'@' + loginMethod.domain, loginAccount)
python
{ "resource": "" }
q38354
REPL.getStore
train
def getStore(self, name, domain): """Convenience method for the REPL. I got tired of typing this string every time I logged in.""" return IRealm(self.original.store.parent).accountByAddress(name, domain).avatars.open()
python
{ "resource": "" }
q38355
PortConfiguration.createPort
train
def createPort(self, portNumber, ssl, certPath, factory, interface=u''): """ Create a new listening port. @type portNumber: C{int} @param portNumber: Port number on which to listen. @type ssl: C{bool} @param ssl: Indicates whether this should be an SSL port or not. @type certPath: C{str} @param ssl: If C{ssl} is true, a path to a certificate file somewhere within the site store's files directory. Ignored otherwise. @param factory: L{Item} which provides L{IProtocolFactoryFactory} which will be used to get a protocol factory to associate with this port. @return: C{None} """ store = self.store.parent if ssl: port = SSLPort(store=store, portNumber=portNumber, certificatePath=FilePath(certPath), factory=factory, interface=interface) else: port = TCPPort(store=store, portNumber=portNumber, factory=factory, interface=interface) installOn(port, store)
python
{ "resource": "" }
q38356
FactoryColumn.extractValue
train
def extractValue(self, model, item): """ Get the class name of the factory referenced by a port. @param model: Either a TabularDataModel or a ScrollableView, depending on what this column is part of. @param item: A port item instance (as defined by L{xmantissa.port}). @rtype: C{unicode} @return: The name of the class of the item to which this column's attribute refers. """ factory = super(FactoryColumn, self).extractValue(model, item) return factory.__class__.__name__.decode('ascii')
python
{ "resource": "" }
q38357
CertificateColumn.extractValue
train
def extractValue(self, model, item): """ Get the path referenced by this column's attribute. @param model: Either a TabularDataModel or a ScrollableView, depending on what this column is part of. @param item: A port item instance (as defined by L{xmantissa.port}). @rtype: C{unicode} """ certPath = super(CertificateColumn, self).extractValue(model, item) return certPath.path.decode('utf-8', 'replace')
python
{ "resource": "" }
q38358
connectRoute
train
def connectRoute(amp, router, receiver, protocol): """ Connect the given receiver to a new box receiver for the given protocol. After connecting this router to an AMP server, use this method similarly to how you would use C{reactor.connectTCP} to establish a new connection to an HTTP, SMTP, or IRC server. @param receiver: An L{IBoxReceiver} which will be started when a route to a receiver for the given protocol is found. @param protocol: The name of a protocol which the AMP peer to which this router is connected has an L{IBoxReceiverFactory}. @return: A L{Deferred} which fires with C{receiver} when the route is established. """ route = router.bindRoute(receiver) d = amp.callRemote( Connect, origin=route.localRouteName, protocol=protocol) def cbGotRoute(result): route.connectTo(result['route']) return receiver d.addCallback(cbGotRoute) return d
python
{ "resource": "" }
q38359
AMPConfiguration.getFactory
train
def getFactory(self): """ Return a server factory which creates AMP protocol instances. """ factory = ServerFactory() def protocol(): proto = CredReceiver() proto.portal = Portal( self.loginSystem, [self.loginSystem, OneTimePadChecker(self._oneTimePads)]) return proto factory.protocol = protocol return factory
python
{ "resource": "" }
q38360
getfirstline
train
def getfirstline(file, default): """ Returns the first line of a file. """ with open(file, 'rb') as fh: content = fh.readlines() if len(content) == 1: return content[0].decode('utf-8').strip('\n') return default
python
{ "resource": "" }
q38361
JsonRpcResponseBatch.add_item
train
def add_item(self, item): """Adds an item to the batch.""" if not isinstance(item, JsonRpcResponse): raise TypeError( "Expected JsonRpcResponse but got {} instead".format(type(item).__name__)) self.items.append(item)
python
{ "resource": "" }
q38362
LocalPool.manifest
train
def manifest(self, subvol): """ Generator for manifest, yields 7-tuples """ subvol_path = os.path.join(self.path, str(subvol)) builtin_path = os.path.join(subvol_path, MANIFEST_DIR[1:], str(subvol)) manifest_path = os.path.join(MANIFEST_DIR, str(subvol)) if os.path.exists(builtin_path): # Stream the manifest written into the (read-only) template, # note that this has not been done up to now return open(builtin_path, "rb") elif os.path.exists(manifest_path): # Stream the manifest written into /var/lib/butterknife/manifests return open(manifest_path, "rb") else: # If we don't have any stream manifest and save it under /var/lib/butterknife/manifests def generator(): with tempfile.NamedTemporaryFile(prefix=str(subvol), dir=MANIFEST_DIR, delete=False) as fh: print("Temporarily writing to", fh.name) for entry in generate_manifest(os.path.join(self.path, str(subvol))): line = ("\t".join(["-" if j == None else str(j) for j in entry])).encode("utf-8")+b"\n" fh.write(line) yield line print("Renaming to", manifest_path) os.rename(fh.name, manifest_path) return generator()
python
{ "resource": "" }
q38363
event_handler
train
def event_handler(event_name): """ Decorator for designating a handler for an event type. ``event_name`` must be a string representing the name of the event type. The decorated function must accept a parameter: the body of the received event, which will be a Python object that can be encoded as a JSON (dict, list, str, int, bool, float or None) :param event_name: The name of the event that will be handled. Only one handler per event name is supported by the same microservice. """ def wrapper(func): func._event_handler = True func._handled_event = event_name return func return wrapper
python
{ "resource": "" }
q38364
exposed_method
train
def exposed_method(name=None, private=False, is_coroutine=True, requires_handler_reference=False): """ Marks a method as exposed via JSON RPC. :param name: the name of the exposed method. Must contains only letters, digits, dots and underscores. If not present or is set explicitly to ``None``, this parameter will default to the name of the exposed method. If two methods with the same name are exposed, a ``ValueError`` is raised. :type name: str :param private: Flag that specifies if the exposed method is private. :type private: bool :param is_coroutine: Flag that specifies if the method is a Tornado coroutine. If True, it will be wrapped with the :py:func:`tornado.gen.coroutine` decorator. :type is_coroutine: bool :param requires_handler_reference: If ``True``, the handler method will receive as the first parameter a ``handler`` argument with the Tornado request handler for the current request. This request handler can be further used to extract various information from the request, such as headers, cookies, etc. :type requires_handler_reference: bool .. versionadded:: 0.9.0 """ def wrapper(func): # validation if name: method_name = name else: method_name = func.__name__ if not METHOD_NAME_REGEX.match(method_name): raise ValueError("Invalid method name: '{}'".format(method_name)) @functools.wraps(func) def real_wrapper(*args, **kwargs): return func(*args, **kwargs) # set appropriate flags if private: setattr(real_wrapper, "_exposed_private", True) else: setattr(real_wrapper, "_exposed_public", True) if is_coroutine: real_wrapper.__gemstone_is_coroutine = True real_wrapper = tornado.gen.coroutine(real_wrapper) setattr(real_wrapper, "_is_coroutine", True) if requires_handler_reference: setattr(real_wrapper, "_req_h_ref", True) setattr(real_wrapper, "_exposed_name", method_name) return real_wrapper return wrapper
python
{ "resource": "" }
q38365
Metrics.available
train
def available(self): ''' Check if a related database exists ''' return self.db_name in map( lambda x: x['name'], self._db.get_database_list() )
python
{ "resource": "" }
q38366
make_geohash_tables
train
def make_geohash_tables(table,listofprecisions,**kwargs): ''' sort_by - field to sort by for each group return_squares - boolean arg if true returns a list of squares instead of writing out to table ''' return_squares = False sort_by = 'COUNT' # logic for accepting kwarg inputs for key,value in kwargs.iteritems(): if key == 'sort_by': sort_by = value if key == 'return_squares': return_squares = value # getting header header = df2list(table)[0] # getting columns columns = header[10:] # getting original table originaltable = table if not sort_by == 'COUNT': originaltable = originaltable.sort([sort_by],ascending=[0]) listofprecisions = sorted(listofprecisions,reverse=True) # making total table to hold a list of dfs if return_squares == True and listofprecisions[-1] == 8: total_list = [table] elif return_squares == True: total_list = [] for row in listofprecisions: precision = int(row) table = originaltable table['GEOHASH'] = table.GEOHASH.str[:precision] table = table[['GEOHASH','COUNT']+columns].groupby(['GEOHASH'],sort=True).sum() table = table.sort([sort_by],ascending=[0]) table = table.reset_index() newsquares = [header] # iterating through each square here for row in df2list(table)[1:]: # getting points points = get_points_geohash(row[0]) # making new row newrow = [row[0]] + points + row[1:] # appending to newsquares newsquares.append(newrow) # taking newsquares to dataframe table = list2df(newsquares) if return_squares == True: total_list.append(table) else: table.to_csv('squares' + str(precision) + '.csv',index=False) if return_squares == True: return total_list else: print 'Wrote output squares tables to csv files.'
python
{ "resource": "" }
q38367
expand_includes
train
def expand_includes(text, path='.'): """Recursively expands includes in given text.""" def read_and_expand(match): filename = match.group('filename') filename = join(path, filename) text = read(filename) return expand_includes( text, path=join(path, dirname(filename))) return re.sub(r'^\.\. include:: (?P<filename>.*)$', read_and_expand, text, flags=re.MULTILINE)
python
{ "resource": "" }
q38368
angSepVincenty
train
def angSepVincenty(ra1, dec1, ra2, dec2): """ Vincenty formula for distances on a sphere """ ra1_rad = np.radians(ra1) dec1_rad = np.radians(dec1) ra2_rad = np.radians(ra2) dec2_rad = np.radians(dec2) sin_dec1, cos_dec1 = np.sin(dec1_rad), np.cos(dec1_rad) sin_dec2, cos_dec2 = np.sin(dec2_rad), np.cos(dec2_rad) delta_ra = ra2_rad - ra1_rad cos_delta_ra, sin_delta_ra = np.cos(delta_ra), np.sin(delta_ra) diffpos = np.arctan2(np.sqrt((cos_dec2 * sin_delta_ra) ** 2 + (cos_dec1 * sin_dec2 - sin_dec1 * cos_dec2 * cos_delta_ra) ** 2), sin_dec1 * sin_dec2 + cos_dec1 * cos_dec2 * cos_delta_ra) return np.degrees(diffpos)
python
{ "resource": "" }
q38369
parse_file
train
def parse_file(infile, exit_on_error=True): """Parse a comma-separated file with columns "ra,dec,magnitude". """ try: a, b, mag = np.atleast_2d( np.genfromtxt( infile, usecols=[0, 1, 2], delimiter=',' ) ).T except IOError as e: if exit_on_error: logger.error("There seems to be a problem with the input file, " "the format should be: RA_degrees (J2000), Dec_degrees (J2000), " "Magnitude. There should be no header, columns should be " "separated by a comma") sys.exit(1) else: raise e return a, b, mag
python
{ "resource": "" }
q38370
onSiliconCheck
train
def onSiliconCheck(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): """Check a single position.""" dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) if dist >= 90.: return False # padding_pix=3 means that objects less than 3 pixels off the edge of # a channel are counted inside, to account for inaccuracies in K2fov. return FovObj.isOnSilicon(ra_deg, dec_deg, padding_pix=padding_pix)
python
{ "resource": "" }
q38371
onSiliconCheckList
train
def onSiliconCheckList(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): """Check a list of positions.""" dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) mask = (dist < 90.) out = np.zeros(len(dist), dtype=bool) out[mask] = FovObj.isOnSiliconList(ra_deg[mask], dec_deg[mask], padding_pix=padding_pix) return out
python
{ "resource": "" }
q38372
K2onSilicon
train
def K2onSilicon(infile, fieldnum, do_nearSiliconCheck=False): """Checks whether targets are on silicon during a given campaign. This function will write a csv table called targets_siliconFlag.csv, which details the silicon status for each target listed in `infile` (0 = not on silicon, 2 = on silion). Parameters ---------- infile : str Path to a csv table with columns ra_deg,dec_deg,magnitude (no header). fieldnum : int K2 Campaign number. do_nearSiliconCheck : bool If `True`, targets near (but not on) silicon are flagged with a "1". """ ra_sources_deg, dec_sources_deg, mag = parse_file(infile) n_sources = np.shape(ra_sources_deg)[0] if n_sources > 500: logger.warning("Warning: there are {0} sources in your target list, " "this could take some time".format(n_sources)) k = fields.getKeplerFov(fieldnum) raDec = k.getCoordsOfChannelCorners() onSilicon = list( map( onSiliconCheck, ra_sources_deg, dec_sources_deg, np.repeat(k, len(ra_sources_deg)) ) ) onSilicon = np.array(onSilicon, dtype=bool) if do_nearSiliconCheck: nearSilicon = list( map( nearSiliconCheck, ra_sources_deg, dec_sources_deg, np.repeat(k, len(ra_sources_deg)) ) ) nearSilicon = np.array(nearSilicon, dtype=bool) if got_mpl: almost_black = '#262626' light_grey = np.array([float(248)/float(255)]*3) ph = proj.PlateCaree() k.plotPointing(ph, showOuts=False) targets = ph.skyToPix(ra_sources_deg, dec_sources_deg) targets = np.array(targets) fig = pl.gcf() ax = fig.gca() ax = fig.add_subplot(111) ax.scatter(*targets, color='#fc8d62', s=7, label='not on silicon') ax.scatter(targets[0][onSilicon], targets[1][onSilicon], color='#66c2a5', s=8, label='on silicon') ax.set_xlabel('R.A. [degrees]', fontsize=16) ax.set_ylabel('Declination [degrees]', fontsize=16) ax.invert_xaxis() ax.minorticks_on() legend = ax.legend(loc=0, frameon=True, scatterpoints=1) rect = legend.get_frame() rect.set_alpha(0.3) rect.set_facecolor(light_grey) rect.set_linewidth(0.0) texts = legend.texts for t in texts: t.set_color(almost_black) fig.savefig('targets_fov.png', dpi=300) pl.close('all') # prints zero if target is not on silicon siliconFlag = np.zeros_like(ra_sources_deg) # print a 1 if target is near but not on silicon if do_nearSiliconCheck: siliconFlag = np.where(nearSilicon, 1, siliconFlag) # prints a 2 if target is on silicon siliconFlag = np.where(onSilicon, 2, siliconFlag) outarr = np.array([ra_sources_deg, dec_sources_deg, mag, siliconFlag]) np.savetxt('targets_siliconFlag.csv', outarr.T, delimiter=', ', fmt=['%10.10f', '%10.10f', '%10.2f', '%i']) if got_mpl: print('I made two files: targets_siliconFlag.csv and targets_fov.png') else: print('I made one file: targets_siliconFlag.csv')
python
{ "resource": "" }
q38373
K2onSilicon_main
train
def K2onSilicon_main(args=None): """Function called when `K2onSilicon` is executed on the command line.""" import argparse parser = argparse.ArgumentParser( description="Run K2onSilicon to find which targets in a " "list call on active silicon for a given K2 campaign.") parser.add_argument('csv_file', type=str, help="Name of input csv file with targets, column are " "Ra_degrees, Dec_degrees, Kepmag") parser.add_argument('campaign', type=int, help='K2 Campaign number') args = parser.parse_args(args) K2onSilicon(args.csv_file, args.campaign)
python
{ "resource": "" }
q38374
Template._get_template
train
def _get_template(self, template_name): """ Retrieve the cached version of the template """ if template_name not in self.chached_templates: self.chached_templates[template_name] = self.env.get_template(template_name) return self.chached_templates[template_name]
python
{ "resource": "" }
q38375
Template._render_context
train
def _render_context(self, template, block, **context): """ Render a block to a string with its context """ return u''.join(block(template.new_context(context)))
python
{ "resource": "" }
q38376
Mail.init_app
train
def init_app(self, app): """ For Flask using the app config """ self.__init__(aws_access_key_id=app.config.get("SES_AWS_ACCESS_KEY"), aws_secret_access_key=app.config.get("SES_AWS_SECRET_KEY"), region=app.config.get("SES_REGION", "us-east-1"), sender=app.config.get("SES_SENDER", None), reply_to=app.config.get("SES_REPLY_TO", None), template=app.config.get("SES_TEMPLATE", None), template_context=app.config.get("SES_TEMPLATE_CONTEXT", {}) )
python
{ "resource": "" }
q38377
Mail.send_template
train
def send_template(self, template, to, reply_to=None, **context): """ Send email from template """ mail_data = self.parse_template(template, **context) subject = mail_data["subject"] body = mail_data["body"] del(mail_data["subject"]) del(mail_data["body"]) return self.send(to=to, subject=subject, body=body, reply_to=reply_to, **mail_data)
python
{ "resource": "" }
q38378
Mail.parse_template
train
def parse_template(self, template, **context): """ To parse a template and return all the blocks """ required_blocks = ["subject", "body"] optional_blocks = ["text_body", "html_body", "return_path", "format"] if self.template_context: context = dict(self.template_context.items() + context.items()) blocks = self.template.render_blocks(template, **context) for rb in required_blocks: if rb not in blocks: raise AttributeError("Template error: block '%s' is missing from '%s'" % (rb, template)) mail_params = { "subject": blocks["subject"].strip(), "body": blocks["body"] } for ob in optional_blocks: if ob in blocks: if ob == "format" and mail_params[ob].lower() not in ["html", "text"]: continue mail_params[ob] = blocks[ob] return mail_params
python
{ "resource": "" }
q38379
_typelist
train
def _typelist(x): """Helper function converting all items of x to instances.""" if isinstance(x, collections.Sequence): return list(map(_to_instance, x)) elif isinstance(x, collections.Iterable): return x return None if x is None else [_to_instance(x)]
python
{ "resource": "" }
q38380
Command.write
train
def write(self, transport, protocol, *data): """Generates and sends a command message unit. :param transport: An object implementing the `.Transport` interface. It is used by the protocol to send the message. :param protocol: An object implementing the `.Protocol` interface. :param data: The program data. :raises AttributeError: if the command is not writable. """ if not self._write: raise AttributeError('Command is not writeable') if self.protocol: protocol = self.protocol if self._write.data_type: data = _dump(self._write.data_type, data) else: # TODO We silently ignore possible data data = () if isinstance(transport, SimulatedTransport): self.simulate_write(data) else: protocol.write(transport, self._write.header, *data)
python
{ "resource": "" }
q38381
Command.query
train
def query(self, transport, protocol, *data): """Generates and sends a query message unit. :param transport: An object implementing the `.Transport` interface. It is used by the protocol to send the message and receive the response. :param protocol: An object implementing the `.Protocol` interface. :param data: The program data. :raises AttributeError: if the command is not queryable. """ if not self._query: raise AttributeError('Command is not queryable') if self.protocol: protocol = self.protocol if self._query.data_type: data = _dump(self._query.data_type, data) else: # TODO We silently ignore possible data data = () if isinstance(transport, SimulatedTransport): response = self.simulate_query(data) else: response = protocol.query(transport, self._query.header, *data) response = _load(self._query.response_type, response) # Return single value if parsed_data is 1-tuple. return response[0] if len(response) == 1 else response
python
{ "resource": "" }
q38382
Driver._write
train
def _write(self, cmd, *datas): """Helper function to simplify writing.""" cmd = Command(write=cmd) cmd.write(self._transport, self._protocol, *datas)
python
{ "resource": "" }
q38383
Driver._query
train
def _query(self, cmd, *datas): """Helper function to allow method queries.""" cmd = Command(query=cmd) return cmd.query(self._transport, self._protocol, *datas)
python
{ "resource": "" }
q38384
YahooFinance.getAll
train
def getAll(self, symbol): """ Get all available quote data for the given ticker symbol. Returns a dictionary. """ values = self.__request(symbol, 'l1c1va2xj1b4j4dyekjm3m4rr5p5p6s7').split(',') data = {} data['price'] = values[0] data['change'] = values[1] data['volume'] = values[2] data['avg_daily_volume'] = values[3] data['stock_exchange'] = values[4] data['market_cap'] = values[5] data['book_value'] = values[6] data['ebitda'] = values[7] data['dividend_per_share'] = values[8] data['dividend_yield'] = values[9] data['earnings_per_share'] = values[10] data['52_week_high'] = values[11] data['52_week_low'] = values[12] data['50day_moving_avg'] = values[13] data['200day_moving_avg'] = values[14] data['price_earnings_ratio'] = values[15] data['price_earnings_growth_ratio'] = values[16] data['price_sales_ratio'] = values[17] data['price_book_ratio'] = values[18] data['short_ratio'] = values[19] return data
python
{ "resource": "" }
q38385
YahooFinance.getQuotes
train
def getQuotes(self, symbol, start, end): """ Get historical prices for the given ticker symbol. Date format is 'YYYY-MM-DD' Returns a nested list. """ try: start = str(start).replace('-', '') end = str(end).replace('-', '') url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \ 'd=%s&' % str(int(end[4:6]) - 1) + \ 'e=%s&' % str(int(end[6:8])) + \ 'f=%s&' % str(int(end[0:4])) + \ 'g=d&' + \ 'a=%s&' % str(int(start[4:6]) - 1) + \ 'b=%s&' % str(int(start[6:8])) + \ 'c=%s&' % str(int(start[0:4])) + \ 'ignore=.csv' days = urllib.urlopen(url).readlines() values = [day[:-2].split(',') for day in days] # sample values:[['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Clos'], \ # ['2009-12-31', '112.77', '112.80', '111.39', '111.44', '90637900', '109.7']...] data = [] for value in values[1:]: data.append(Quote(value[0], value[1], value[2], value[3], value[4], value[5], value[6])) dateValues = sorted(data, key = lambda q: q.time) return dateValues except IOError: raise UfException(Errors.NETWORK_ERROR, "Can't connect to Yahoo server") except BaseException: raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in YahooFinance.getHistoricalPrices %s" % traceback.format_exc())
python
{ "resource": "" }
q38386
Product.installProductOn
train
def installProductOn(self, userstore): """ Creates an Installation in this user store for our collection of powerups, and then install those powerups on the user's store. """ def install(): i = Installation(store=userstore) i.types = self.types i.install() userstore.transact(install)
python
{ "resource": "" }
q38387
Product.installOrResume
train
def installOrResume(self, userstore): """ Install this product on a user store. If this product has been installed on the user store already and the installation is suspended, it will be resumed. If it exists and is not suspended, an error will be raised. """ for i in userstore.query(Installation, Installation.types == self.types): if i.suspended: unsuspendTabProviders(i) return else: raise RuntimeError("installOrResume called for an" " installation that isn't suspended") else: self.installProductOn(userstore)
python
{ "resource": "" }
q38388
Installation.items
train
def items(self): """ Loads the items this Installation refers to. """ for id in self._items: yield self.store.getItemByID(int(id))
python
{ "resource": "" }
q38389
Installation.install
train
def install(self): """ Called when installed on the user store. Installs my powerups. """ items = [] for typeName in self.types: it = self.store.findOrCreate(namedAny(typeName)) installOn(it, self.store) items.append(str(it.storeID).decode('ascii')) self._items = items
python
{ "resource": "" }
q38390
Installation.uninstall
train
def uninstall(self): """ Called when uninstalled from the user store. Uninstalls all my powerups. """ for item in self.items: uninstallFrom(item, self.store) self._items = []
python
{ "resource": "" }
q38391
ProductFragment.coerceProduct
train
def coerceProduct(self, **kw): """ Create a product and return a status string which should be part of a template. @param **kw: Fully qualified Python names for powerup types to associate with the created product. """ self.original.createProduct(filter(None, kw.values())) return u'Created.'
python
{ "resource": "" }
q38392
Action.fetch
train
def fetch(self): """ Fetch & return a new `Action` object representing the action's current state :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return api._action(api.request(self.url)["action"])
python
{ "resource": "" }
q38393
Action.wait
train
def wait(self, wait_interval=None, wait_time=None): """ Poll the server periodically until the action has either completed or errored out and return its final state. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing the action's most recently fetched state) is raised. If a `KeyboardInterrupt` is caught, the action's most recently fetched state is returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param number wait_interval: how many seconds to sleep between requests; defaults to the `doapi` object's :attr:`~doapi.wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if the action has not yet completed, or a negative number to wait indefinitely; defaults to the `doapi` object's :attr:`~doapi.wait_time` if not specified or `None` :return: the action's final state :rtype: Action :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ return next(self.doapi_manager.wait_actions([self], wait_interval, wait_time))
python
{ "resource": "" }
q38394
CachedJSModule.wasModified
train
def wasModified(self): """ Check to see if this module has been modified on disk since the last time it was cached. @return: True if it has been modified, False if not. """ self.filePath.restat() mtime = self.filePath.getmtime() if mtime >= self.lastModified: return True else: return False
python
{ "resource": "" }
q38395
CachedJSModule.maybeUpdate
train
def maybeUpdate(self): """ Check this cache entry and update it if any filesystem information has changed. """ if self.wasModified(): self.lastModified = self.filePath.getmtime() self.fileContents = self.filePath.getContent() self.hashValue = hashlib.sha1(self.fileContents).hexdigest()
python
{ "resource": "" }
q38396
HashedJSModuleProvider.getModule
train
def getModule(self, moduleName): """ Retrieve a JavaScript module cache from the file path cache. @returns: Module cache for the named module. @rtype: L{CachedJSModule} """ if moduleName not in self.moduleCache: modulePath = FilePath( athena.jsDeps.getModuleForName(moduleName)._cache.path) cachedModule = self.moduleCache[moduleName] = CachedJSModule( moduleName, modulePath) else: cachedModule = self.moduleCache[moduleName] return cachedModule
python
{ "resource": "" }
q38397
check_result
train
def check_result(data, key=''): """Check the result of an API response. Ideally, this should be done by checking that the value of the ``resultCode`` attribute is 0, but there are endpoints that simply do not follow this rule. Args: data (dict): Response obtained from the API endpoint. key (string): Key to check for existence in the dict. Returns: bool: True if result was correct, False otherwise. """ if not isinstance(data, dict): return False if key: if key in data: return True return False if 'resultCode' in data.keys(): # OpenBus return True if data.get('resultCode', -1) == 0 else False elif 'code' in data.keys(): # Parking return True if data.get('code', -1) == 0 else False return False
python
{ "resource": "" }
q38398
datetime_string
train
def datetime_string(day, month, year, hour, minute): """Build a date string using the provided day, month, year numbers. Automatically adds a leading zero to ``day`` and ``month`` if they only have one digit. Args: day (int): Day number. month(int): Month number. year(int): Year number. hour (int): Hour of the day in 24h format. minute (int): Minute of the hour. Returns: str: Date in the format *YYYY-MM-DDThh:mm:ss*. """ # Overflow if hour < 0 or hour > 23: hour = 0 if minute < 0 or minute > 60: minute = 0 return '%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute)
python
{ "resource": "" }
q38399
response_list
train
def response_list(data, key): """Obtain the relevant response data in a list. If the response does not already contain the result in a list, a new one will be created to ease iteration in the parser methods. Args: data (dict): API response. key (str): Attribute of the response that contains the result values. Returns: List of response items (usually dict) or None if the key is not present. """ if key not in data: return None if isinstance(data[key], list): return data[key] else: return [data[key],]
python
{ "resource": "" }