signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def apply_addons(widget, *addon_types, **named_addon_types):
|
for addon_type in addon_types:<EOL><INDENT>addon_type(widget)<EOL><DEDENT>for name, addon_type in named_addon_types.items():<EOL><INDENT>addon_type(widget, addon_name=name)<EOL><DEDENT>
|
Apply some addons to a widget.
:param widget: The widget to apply addons to.
:param addon_types: A list of addon types, which will be instantiated
and applied to the widget with the default name of
the addon.
:param named_addon_types: A named list of addons, the keywords will be
the name of the addon when loaded and will
override the default addon name. This can
allow loading the same addon multpily for the
same widget under different names.
Plugins should conform to the GObjectPlugin interface or be a subclass of
it. Once loaded, addons will be available as widget.addons.<addon_name>
withe standard attribute access.
|
f5944:m0
|
def apply_addon(widget, addon_type, **kw):
|
return addon_type(widget, **kw)<EOL>
|
Apply a single addon to a widget
:param widget: The widget to apply the addon to.
:param kw: A dict of keyword arguments to be passed to the addon
|
f5944:m1
|
def configure(self, **kw):
|
Configure and initialise the addon
For overriding in implementations.
|
f5944:c0:m1
|
|
def create_ui(self):
|
self.entry = gtk.Entry()<EOL>self.widget.add(self.entry)<EOL>
|
Create the user interface
create_ui is a method called during the Delegate's
initialisation process, to create, add to, or modify any UI
created by GtkBuilder files.
|
f5979:c0:m0
|
def none_missing(df, columns=None):
|
if columns is None:<EOL><INDENT>columns = df.columns<EOL><DEDENT>assert not df[columns].isnull().any().any()<EOL>return df<EOL>
|
Asserts that there are no missing values (NaNs) in the DataFrame.
|
f5986:m0
|
def is_monotonic(df, items=None, increasing=None, strict=False):
|
if items is None:<EOL><INDENT>items = {k: (increasing, strict) for k in df}<EOL><DEDENT>for col, (increasing, strict) in items.items():<EOL><INDENT>s = pd.Index(df[col])<EOL>if increasing:<EOL><INDENT>good = getattr(s, '<STR_LIT>')<EOL><DEDENT>elif increasing is None:<EOL><INDENT>good = getattr(s, '<STR_LIT>') | getattr(s, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>good = getattr(s, '<STR_LIT>')<EOL><DEDENT>if strict:<EOL><INDENT>if increasing:<EOL><INDENT>good = good & (s.to_series().diff().dropna() > <NUM_LIT:0>).all()<EOL><DEDENT>elif increasing is None:<EOL><INDENT>good = good & ((s.to_series().diff().dropna() > <NUM_LIT:0>).all() |<EOL>(s.to_series().diff().dropna() < <NUM_LIT:0>).all())<EOL><DEDENT>else:<EOL><INDENT>good = good & (s.to_series().diff().dropna() < <NUM_LIT:0>).all()<EOL><DEDENT><DEDENT>if not good:<EOL><INDENT>raise AssertionError<EOL><DEDENT><DEDENT>return df<EOL>
|
Asserts that the DataFrame is monotonic
Parameters
==========
df : Series or DataFrame
items : dict
mapping columns to conditions (increasing, strict)
increasing : None or bool
None is either increasing or decreasing.
strict: whether the comparison should be strict
|
f5986:m1
|
def is_shape(df, shape):
|
assert df.shape == shape<EOL>return df<EOL>
|
Asserts that the DataFrame is of a known shape.
Parameters
==========
df: DataFrame
shape : tuple (n_rows, n_columns)
|
f5986:m2
|
def unique_index(df):
|
assert df.index.is_unique<EOL>return df<EOL>
|
Assert that the index is unique
|
f5986:m3
|
def within_set(df, items=None):
|
for k, v in items.items():<EOL><INDENT>if not df[k].isin(v).all():<EOL><INDENT>raise AssertionError<EOL><DEDENT><DEDENT>return df<EOL>
|
Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
|
f5986:m4
|
def within_range(df, items=None):
|
for k, (lower, upper) in items.items():<EOL><INDENT>if (lower > df[k]).any() or (upper < df[k]).any():<EOL><INDENT>raise AssertionError<EOL><DEDENT><DEDENT>return df<EOL>
|
Assert that a DataFrame is within a range.
Parameters
==========
df : DataFame
items : dict
mapping of columns (k) to a (low, high) tuple (v)
that ``df[k]`` is expected to be between.
|
f5986:m5
|
def none_missing():
|
def decorate(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>result = func(*args, **kwargs)<EOL>ck.none_missing(result)<EOL>return result<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorate<EOL>
|
Asserts that no missing values (NaN) are found
|
f5987:m0
|
def within_set(items):
|
def decorate(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>result = func(*args, **kwargs)<EOL>ck.within_set(result, items)<EOL>return result<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorate<EOL>
|
Check that DataFrame values are within set.
@within_set({'A': {1, 3}})
def f(df):
return df
|
f5987:m4
|
def within_range(items):
|
def decorate(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>result = func(*args, **kwargs)<EOL>ck.within_range(result, items)<EOL>return result<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorate<EOL>
|
Check that a DataFrame's values are within a range.
Parameters
==========
items : dict or array-like
dict maps columss to (lower, upper)
array-like checks the same (lower, upper) for each column
Raises
======
ValueError
|
f5987:m5
|
def within_n_std(n=<NUM_LIT:3>):
|
def decorate(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>result = func(*args, **kwargs)<EOL>ck.within_n_std(result, n=n)<EOL>return result<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorate<EOL>
|
Tests that all values are within 3 standard deviations
of their mean.
|
f5987:m6
|
def __init__(self, nodes=[], **kwargs):
|
hash_fn = kwargs.get('<STR_LIT>', None)<EOL>vnodes = kwargs.get('<STR_LIT>', None)<EOL>weight_fn = kwargs.get('<STR_LIT>', None)<EOL>if hash_fn == '<STR_LIT>':<EOL><INDENT>if vnodes is None:<EOL><INDENT>vnodes = <NUM_LIT><EOL><DEDENT>self.runtime = KetamaRing()<EOL><DEDENT>else:<EOL><INDENT>if vnodes is None:<EOL><INDENT>vnodes = <NUM_LIT><EOL><DEDENT>self.runtime = MetaRing(hash_fn)<EOL><DEDENT>self._default_vnodes = vnodes<EOL>self.hashi = self.runtime.hashi<EOL>if weight_fn and not hasattr(weight_fn, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._weight_fn = weight_fn<EOL>if self._configure_nodes(nodes):<EOL><INDENT>self.runtime._create_ring(self.runtime._nodes.items())<EOL><DEDENT>
|
Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
|
f5995:c0:m0
|
def _configure_nodes(self, nodes):
|
if isinstance(nodes, str):<EOL><INDENT>nodes = [nodes]<EOL><DEDENT>elif not isinstance(nodes, (dict, list)):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(type(nodes)))<EOL><DEDENT>conf_changed = False<EOL>for node in nodes:<EOL><INDENT>conf = {<EOL>'<STR_LIT>': node,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': node,<EOL>'<STR_LIT:port>': None,<EOL>'<STR_LIT>': self._default_vnodes,<EOL>'<STR_LIT>': <NUM_LIT:1><EOL>}<EOL>current_conf = self.runtime._nodes.get(node, {})<EOL>nodename = node<EOL>if not current_conf:<EOL><INDENT>conf_changed = True<EOL><DEDENT>if isinstance(nodes, dict):<EOL><INDENT>node_conf = nodes[node]<EOL>if isinstance(node_conf, int):<EOL><INDENT>conf['<STR_LIT>'] = node_conf<EOL><DEDENT>elif isinstance(node_conf, dict):<EOL><INDENT>for k, v in node_conf.items():<EOL><INDENT>if k in conf:<EOL><INDENT>conf[k] = v<EOL>if k in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if current_conf.get(k) != v:<EOL><INDENT>conf_changed = True<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(type(node_conf)))<EOL><DEDENT><DEDENT>if self._weight_fn:<EOL><INDENT>conf['<STR_LIT>'] = self._weight_fn(**conf)<EOL><DEDENT>if current_conf.get('<STR_LIT>') != conf['<STR_LIT>']:<EOL><INDENT>conf_changed = True<EOL><DEDENT>self.runtime._nodes[nodename] = conf<EOL><DEDENT>return conf_changed<EOL>
|
Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
|
f5995:c0:m1
|
def __delitem__(self, nodename):
|
self.runtime._remove_node(nodename)<EOL>
|
Remove the given node.
:param nodename: the node name.
|
f5995:c0:m2
|
def __getitem__(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the instance of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m3
|
def __setitem__(self, nodename, conf={'<STR_LIT>': <NUM_LIT:1>}):
|
if self._configure_nodes({nodename: conf}):<EOL><INDENT>self.runtime._create_ring([(nodename, self._nodes[nodename])])<EOL><DEDENT>
|
Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
|
f5995:c0:m4
|
def _get_pos(self, key):
|
p = bisect(self.runtime._keys, self.hashi(key))<EOL>if p == len(self.runtime._keys):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return p<EOL><DEDENT>
|
Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
|
f5995:c0:m5
|
def _get(self, key, what):
|
if not self.runtime._ring:<EOL><INDENT>return None<EOL><DEDENT>pos = self._get_pos(key)<EOL>if what == '<STR_LIT>':<EOL><INDENT>return pos<EOL><DEDENT>nodename = self.runtime._ring[self.runtime._keys[pos]]<EOL>if what in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:port>', '<STR_LIT>']:<EOL><INDENT>return self.runtime._nodes[nodename][what]<EOL><DEDENT>elif what == '<STR_LIT>':<EOL><INDENT>return self.runtime._nodes[nodename]<EOL><DEDENT>elif what == '<STR_LIT>':<EOL><INDENT>return nodename<EOL><DEDENT>elif what == '<STR_LIT>':<EOL><INDENT>return (self.runtime._keys[pos], nodename)<EOL><DEDENT>
|
Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
|
f5995:c0:m6
|
def get(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the node object dict matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m7
|
def get_instances(self):
|
return [c.get('<STR_LIT>') for c in self.runtime._nodes.values()<EOL>if c.get('<STR_LIT>')]<EOL>
|
Returns a list of the instances of all the configured nodes.
|
f5995:c0:m8
|
def get_key(self, key):
|
return self.hashi(key)<EOL>
|
Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
|
f5995:c0:m9
|
def get_node(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the node name of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m10
|
def get_node_hostname(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m11
|
def get_node_port(self, key):
|
return self._get(key, '<STR_LIT:port>')<EOL>
|
Returns the port of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m12
|
def get_node_pos(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the index position of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m13
|
def get_node_weight(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns the weight of the node matching the hashed key.
:param key: the key to look for.
|
f5995:c0:m14
|
def get_nodes(self):
|
return self.runtime._nodes.keys()<EOL>
|
Returns a list of the names of all the configured nodes.
|
f5995:c0:m15
|
def get_points(self):
|
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]<EOL>
|
Returns a ketama compatible list of (position, nodename) tuples.
|
f5995:c0:m16
|
def get_server(self, key):
|
return self._get(key, '<STR_LIT>')<EOL>
|
Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
|
f5995:c0:m17
|
def iterate_nodes(self, key, distinct=True):
|
if not self.runtime._ring:<EOL><INDENT>yield None<EOL><DEDENT>else:<EOL><INDENT>for node in self.range(key, unique=distinct):<EOL><INDENT>yield node['<STR_LIT>']<EOL><DEDENT><DEDENT>
|
hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
|
f5995:c0:m18
|
def print_continuum(self):
|
numpoints = len(self.runtime._keys)<EOL>if numpoints:<EOL><INDENT>print('<STR_LIT>'.format(numpoints))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>for p in self.get_points():<EOL><INDENT>point, node = p<EOL>print('<STR_LIT>'.format(node, point))<EOL><DEDENT>
|
Prints a ketama compatible continuum report.
|
f5995:c0:m19
|
def range(self, key, size=None, unique=True):
|
all_nodes = set()<EOL>if unique:<EOL><INDENT>size = size or len(self.runtime._nodes)<EOL><DEDENT>else:<EOL><INDENT>all_nodes = []<EOL><DEDENT>pos = self._get_pos(key)<EOL>for key in self.runtime._keys[pos:]:<EOL><INDENT>nodename = self.runtime._ring[key]<EOL>if unique:<EOL><INDENT>if nodename in all_nodes:<EOL><INDENT>continue<EOL><DEDENT>all_nodes.add(nodename)<EOL><DEDENT>else:<EOL><INDENT>all_nodes.append(nodename)<EOL><DEDENT>yield self.runtime._nodes[nodename]<EOL>if len(all_nodes) == size:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i, key in enumerate(self.runtime._keys):<EOL><INDENT>if i < pos:<EOL><INDENT>nodename = self.runtime._ring[key]<EOL>if unique:<EOL><INDENT>if nodename in all_nodes:<EOL><INDENT>continue<EOL><DEDENT>all_nodes.add(nodename)<EOL><DEDENT>else:<EOL><INDENT>all_nodes.append(nodename)<EOL><DEDENT>yield self.runtime._nodes[nodename]<EOL>if len(all_nodes) == size:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
|
f5995:c0:m20
|
def patch_memcache():
|
def _init(self, servers, *k, **kw):<EOL><INDENT>self._old_init(servers, *k, **kw)<EOL>nodes = {}<EOL>for server in self.servers:<EOL><INDENT>conf = {<EOL>'<STR_LIT>': server.ip,<EOL>'<STR_LIT>': server,<EOL>'<STR_LIT:port>': server.port,<EOL>'<STR_LIT>': server.weight<EOL>}<EOL>nodes[server.ip] = conf<EOL><DEDENT>self.uhashring = HashRing(nodes)<EOL><DEDENT>def _get_server(self, key):<EOL><INDENT>if isinstance(key, tuple):<EOL><INDENT>return self._old_get_server(key)<EOL><DEDENT>for i in range(self._SERVER_RETRIES):<EOL><INDENT>for node in self.uhashring.range(key):<EOL><INDENT>if node['<STR_LIT>'].connect():<EOL><INDENT>return node['<STR_LIT>'], key<EOL><DEDENT><DEDENT><DEDENT>return None, None<EOL><DEDENT>memcache = __import__('<STR_LIT>')<EOL>memcache.Client._old_get_server = memcache.Client._get_server<EOL>memcache.Client._old_init = memcache.Client.__init__<EOL>memcache.Client.__init__ = _init<EOL>memcache.Client._get_server = _get_server<EOL>
|
Monkey patch python-memcached to implement our consistent hashring
in its node selection and operations.
|
f5997:m0
|
def __init__(self):
|
self._distribution = Counter()<EOL>self._keys = []<EOL>self._nodes = {}<EOL>self._replicas = <NUM_LIT:4><EOL>self._ring = {}<EOL>if version_info >= (<NUM_LIT:3>, ):<EOL><INDENT>self._listbytes = lambda x: x<EOL><DEDENT>
|
Create a new HashRing.
|
f5998:c0:m0
|
def hashi(self, key, replica=<NUM_LIT:0>):
|
dh = self._listbytes(md5(str(key).encode('<STR_LIT:utf-8>')).digest())<EOL>rd = replica * <NUM_LIT:4><EOL>return (<EOL>(dh[<NUM_LIT:3> + rd] << <NUM_LIT>) | (dh[<NUM_LIT:2> + rd] << <NUM_LIT:16>) |<EOL>(dh[<NUM_LIT:1> + rd] << <NUM_LIT:8>) | dh[<NUM_LIT:0> + rd])<EOL>
|
Returns a ketama compatible hash from the given key.
|
f5998:c0:m1
|
def _hashi_weight_generator(self, node_name, node_conf):
|
ks = (node_conf['<STR_LIT>'] * len(self._nodes) *<EOL>node_conf['<STR_LIT>']) // self._weight_sum<EOL>for w in range(<NUM_LIT:0>, ks):<EOL><INDENT>w_node_name = '<STR_LIT>' % (node_name, w)<EOL>for i in range(<NUM_LIT:0>, self._replicas):<EOL><INDENT>yield self.hashi(w_node_name, replica=i)<EOL><DEDENT><DEDENT>
|
Calculate the weight factor of the given node and
yield its hash key for every configured replica.
:param node_name: the node name.
|
f5998:c0:m2
|
@staticmethod<EOL><INDENT>def _listbytes(data):<DEDENT>
|
return map(ord, data)<EOL>
|
Python 2 compatible int iterator from str.
:param data: the string to int iterate upon.
|
f5998:c0:m3
|
def _create_ring(self, nodes):
|
_weight_sum = <NUM_LIT:0><EOL>for node_conf in self._nodes.values():<EOL><INDENT>_weight_sum += node_conf['<STR_LIT>']<EOL><DEDENT>self._weight_sum = _weight_sum<EOL>_distribution = Counter()<EOL>_keys = []<EOL>_ring = {}<EOL>for node_name, node_conf in self._nodes.items():<EOL><INDENT>for h in self._hashi_weight_generator(node_name, node_conf):<EOL><INDENT>_ring[h] = node_name<EOL>insort(_keys, h)<EOL>_distribution[node_name] += <NUM_LIT:1><EOL><DEDENT><DEDENT>self._distribution = _distribution<EOL>self._keys = _keys<EOL>self._ring = _ring<EOL>
|
Generate a ketama compatible continuum/ring.
|
f5998:c0:m4
|
def _remove_node(self, node_name):
|
try:<EOL><INDENT>self._nodes.pop(node_name)<EOL><DEDENT>except Exception:<EOL><INDENT>raise KeyError('<STR_LIT>'.format(<EOL>node_name, self._nodes.keys()))<EOL><DEDENT>else:<EOL><INDENT>self._create_ring(self._nodes)<EOL><DEDENT>
|
Remove the given node from the continuum/ring.
:param node_name: the node name.
|
f5998:c0:m5
|
def __init__(self, hash_fn):
|
self._distribution = Counter()<EOL>self._keys = []<EOL>self._nodes = {}<EOL>self._ring = {}<EOL>if hash_fn and not hasattr(hash_fn, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._hash_fn = hash_fn or (<EOL>lambda key: int(md5(str(key).encode('<STR_LIT:utf-8>')).hexdigest(), <NUM_LIT:16>))<EOL>
|
Create a new HashRing.
:param hash_fn: use this callable function to hash keys.
|
f5999:c0:m0
|
def hashi(self, key):
|
return self._hash_fn(key)<EOL>
|
Returns an integer derived from the md5 hash of the given key.
|
f5999:c0:m1
|
def _create_ring(self, nodes):
|
for node_name, node_conf in nodes:<EOL><INDENT>for w in range(<NUM_LIT:0>, node_conf['<STR_LIT>'] * node_conf['<STR_LIT>']):<EOL><INDENT>self._distribution[node_name] += <NUM_LIT:1><EOL>self._ring[self.hashi('<STR_LIT>' % (node_name, w))] = node_name<EOL><DEDENT><DEDENT>self._keys = sorted(self._ring.keys())<EOL>
|
Generate a ketama compatible continuum/ring.
|
f5999:c0:m2
|
def _remove_node(self, node_name):
|
try:<EOL><INDENT>node_conf = self._nodes.pop(node_name)<EOL><DEDENT>except Exception:<EOL><INDENT>raise KeyError('<STR_LIT>'.format(<EOL>node_name, self._nodes.keys()))<EOL><DEDENT>else:<EOL><INDENT>self._distribution.pop(node_name)<EOL>for w in range(<NUM_LIT:0>, node_conf['<STR_LIT>'] * node_conf['<STR_LIT>']):<EOL><INDENT>del self._ring[self.hashi('<STR_LIT>' % (node_name, w))]<EOL><DEDENT>self._keys = sorted(self._ring.keys())<EOL><DEDENT>
|
Remove the given node from the continuum/ring.
:param node_name: the node name.
|
f5999:c0:m3
|
def lex(args):
|
if len(args) == <NUM_LIT:0> or args[<NUM_LIT:0>] == SHOW:<EOL><INDENT>return [(SHOW, None)]<EOL><DEDENT>elif args[<NUM_LIT:0>] == LOG:<EOL><INDENT>return [(LOG, None)]<EOL><DEDENT>elif args[<NUM_LIT:0>] == ECHO:<EOL><INDENT>return [(ECHO, None)]<EOL><DEDENT>elif args[<NUM_LIT:0>] == SET and args[<NUM_LIT:1>] == RATE:<EOL><INDENT>return tokenizeSetRate(args[<NUM_LIT:2>:])<EOL><DEDENT>elif args[<NUM_LIT:0>] == SET and args[<NUM_LIT:1>] == DAYS:<EOL><INDENT>return tokenizeSetDays(args[<NUM_LIT:2>:])<EOL><DEDENT>elif args[<NUM_LIT:0>] == TAKE:<EOL><INDENT>return tokenizeTake(args[<NUM_LIT:1>:])<EOL><DEDENT>elif args[<NUM_LIT:0>] == CANCEL:<EOL><INDENT>return tokenizeCancel(args[<NUM_LIT:1>:])<EOL><DEDENT>elif isMonth(args[<NUM_LIT:0>]):<EOL><INDENT>return tokenizeTake(args)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format('<STR_LIT:U+0020>'.join(args)))<EOL>return []<EOL><DEDENT>
|
Lex input and return a list of actions to perform.
|
f6007:m0
|
def isMonth(arg):
|
month = arg[:<NUM_LIT:3>].capitalize()<EOL>return month in calendar.month_abbr<EOL>
|
Determine if arg is in the calendar months, e.g. 'Jan' return True
|
f6007:m5
|
def execute(tokens):
|
if not validate_rc():<EOL><INDENT>print('<STR_LIT>')<EOL>echo_vacation_rc()<EOL>return<EOL><DEDENT>for action, value in tokens:<EOL><INDENT>if action == '<STR_LIT>':<EOL><INDENT>show()<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>log_vacation_days()<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>echo_vacation_rc()<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>take(value)<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>cancel(value)<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>setrate(value)<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>setdays(value)<EOL><DEDENT><DEDENT>
|
Perform the actions described by the input tokens.
|
f6008:m0
|
def unique(transactions):
|
seen = set()<EOL>return [x for x in transactions if not (x in seen or seen.add(x))]<EOL>
|
Remove any duplicate entries.
|
f6008:m6
|
def sort(transactions):
|
return transactions.sort(key=lambda x: datetime.datetime.strptime(x.split('<STR_LIT::>')[<NUM_LIT:0>], '<STR_LIT>'))[:]<EOL>
|
Return a list of sorted transactions by date.
|
f6008:m7
|
def validate_rc():
|
transactions = rc.read()<EOL>if not transactions:<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>transactions = sort(unique(transactions))<EOL>return validate_setup(transactions)<EOL>
|
Before we execute any actions, let's validate our .vacationrc.
|
f6008:m8
|
def validate_setup(transactions):
|
if not transactions:<EOL><INDENT>return True<EOL><DEDENT>try:<EOL><INDENT>first, second = transactions[:<NUM_LIT:2>]<EOL><DEDENT>except ValueError:<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>parts1, parts2 = first.split(), second.split()<EOL>if parts1[<NUM_LIT:0>] != parts2[<NUM_LIT:0>]:<EOL><INDENT>print('<STR_LIT>')<EOL>return False <EOL><DEDENT>if '<STR_LIT>' not in (parts1[<NUM_LIT:1>], parts2[<NUM_LIT:1>]) or '<STR_LIT>' not in (parts1[<NUM_LIT:1>], parts2[<NUM_LIT:1>]):<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>return True<EOL>
|
First two transactions must set rate & days.
|
f6008:m9
|
def _parse_transaction_entry(entry):
|
parts = entry.split()<EOL>date_string = parts[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>date = datetime.datetime.strptime(date_string[:-<NUM_LIT:1>], '<STR_LIT>').date()<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(entry))<EOL><DEDENT>if len(parts) < <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(entry))<EOL><DEDENT>action = parts[<NUM_LIT:1>].lower()<EOL>if action not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(entry))<EOL><DEDENT>try:<EOL><INDENT>value = float(parts[<NUM_LIT:2>])<EOL><DEDENT>except IndexError:<EOL><INDENT>value = None<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(entry))<EOL><DEDENT>return (date, action, value)<EOL>
|
Validate & parse a transaction into (date, action, value) tuple.
|
f6008:m10
|
def stat_holidays(province='<STR_LIT>', year=<NUM_LIT>):
|
return holidays.Canada(state=province, years=year).keys()<EOL>
|
Returns a list of holiday dates for a province and year.
|
f6008:m11
|
def sum_transactions(transactions):
|
workdays_per_year = <NUM_LIT><EOL>previous_date = None<EOL>rate = <NUM_LIT:0><EOL>day_sum = <NUM_LIT:0><EOL>for transaction in transactions:<EOL><INDENT>date, action, value = _parse_transaction_entry(transaction)<EOL>if previous_date is None:<EOL><INDENT>previous_date = date<EOL><DEDENT>elapsed = workdays.networkdays(previous_date, date, stat_holidays()) - <NUM_LIT:1><EOL>if action == '<STR_LIT>':<EOL><INDENT>rate = float(value) / workdays_per_year<EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>elapsed -= <NUM_LIT:1> <EOL>day_sum -= <NUM_LIT:1> <EOL><DEDENT>day_sum += rate * elapsed<EOL>if action == '<STR_LIT>':<EOL><INDENT>day_sum = value <EOL><DEDENT>previous_date = date<EOL><DEDENT>return day_sum<EOL>
|
Sums transactions into a total of remaining vacation days.
|
f6008:m12
|
def get_days_off(transactions):
|
days_off = []<EOL>for trans in transactions:<EOL><INDENT>date, action, _ = _parse_transaction_entry(trans)<EOL>if action == '<STR_LIT>':<EOL><INDENT>days_off.append(date)<EOL><DEDENT><DEDENT>return days_off<EOL>
|
Return the dates for any 'take day off' transactions.
|
f6008:m13
|
def log_vacation_days():
|
days_off = get_days_off(rc.read())<EOL>pretty_days = map(lambda day: day.strftime('<STR_LIT>'), days_off)<EOL>for day in pretty_days:<EOL><INDENT>print(day)<EOL><DEDENT>
|
Sum and report taken days off.
|
f6008:m14
|
def echo_vacation_rc():
|
contents = rc.read()<EOL>print('<STR_LIT>')<EOL>for line in contents:<EOL><INDENT>print(line.rstrip())<EOL><DEDENT>
|
Display all our .vacationrc file.
|
f6008:m15
|
def get_rc_path():
|
return os.path.join(os.path.expanduser('<STR_LIT>'), '<STR_LIT>')<EOL>
|
Return the full .vacationrc path for convenience.
|
f6010:m0
|
def touch():
|
if not os.path.isfile(get_rc_path()):<EOL><INDENT>open(get_rc_path(), '<STR_LIT:a>').close()<EOL>print('<STR_LIT>'.format(get_rc_path()))<EOL><DEDENT>
|
Create a .vacationrc file if none exists.
|
f6010:m1
|
def read():
|
try:<EOL><INDENT>with open(get_rc_path(), '<STR_LIT:r>') as rc:<EOL><INDENT>return rc.readlines()<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>print('<STR_LIT>')<EOL>return []<EOL><DEDENT>
|
Read file and return entries as a list.
|
f6010:m2
|
def write(entries):
|
try:<EOL><INDENT>with open(get_rc_path(), '<STR_LIT:w>') as rc:<EOL><INDENT>rc.writelines(entries)<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>
|
Write an entire rc file.
|
f6010:m3
|
def validate():
|
transactions = read()<EOL>if not transactions:<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>return validate_setup(transactions)<EOL>
|
Before we execute any actions, let's validate our .vacationrc.
|
f6010:m4
|
def append(entry):
|
if not entry:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>with open(get_rc_path(), '<STR_LIT:a>') as f:<EOL><INDENT>if isinstance(entry, list):<EOL><INDENT>f.writelines(entry)<EOL><DEDENT>else:<EOL><INDENT>f.write(entry + '<STR_LIT:\n>')<EOL><DEDENT><DEDENT><DEDENT>except IOError:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>
|
Append either a list of strings or a string to our file.
|
f6010:m5
|
def delete(bad_entry):
|
entries = read()<EOL>kept_entries = [x for x in entries if x.rstrip() != bad_entry]<EOL>write(kept_entries)<EOL>
|
Removes an entry from rc file.
|
f6010:m6
|
def readfiles():
|
tests = list(filter(lambda x: x.endswith('<STR_LIT>'), os.listdir(TESTPATH)))<EOL>tests.sort()<EOL>files = []<EOL>for test in tests:<EOL><INDENT>text = open(TESTPATH + test, '<STR_LIT:r>').read()<EOL>try:<EOL><INDENT>class_, desc, cause, workaround, code = [x.rstrip() for x inlist(filter(None, re.split(SPLIT, text)))]<EOL>output = Output(test, class_, desc, cause, workaround, code, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>files.append(output)<EOL><DEDENT>except IndexError:<EOL><INDENT>print('<STR_LIT>' + TESTPATH + test)<EOL><DEDENT><DEDENT>return files<EOL>
|
Reads test files
|
f6018:m0
|
def uimports(code):
|
for uimport in UIMPORTLIST:<EOL><INDENT>uimport = bytes(uimport, '<STR_LIT:utf8>')<EOL>code = code.replace(uimport, b'<STR_LIT:u>' + uimport)<EOL><DEDENT>return code<EOL>
|
converts CPython module names into MicroPython equivalents
|
f6018:m1
|
def indent(block, spaces):
|
new_block = '<STR_LIT>'<EOL>for line in block.split('<STR_LIT:\n>'):<EOL><INDENT>new_block += spaces + line + '<STR_LIT:\n>'<EOL><DEDENT>return new_block<EOL>
|
indents paragraphs of text for rst formatting
|
f6018:m3
|
def gen_table(contents):
|
xlengths = []<EOL>ylengths = []<EOL>for column in contents:<EOL><INDENT>col_len = <NUM_LIT:0><EOL>for entry in column:<EOL><INDENT>lines = entry.split('<STR_LIT:\n>')<EOL>for line in lines:<EOL><INDENT>col_len = max(len(line) + <NUM_LIT:2>, col_len)<EOL><DEDENT><DEDENT>xlengths.append(col_len)<EOL><DEDENT>for i in range(len(contents[<NUM_LIT:0>])):<EOL><INDENT>ymax = <NUM_LIT:0><EOL>for j in range(len(contents)):<EOL><INDENT>ymax = max(ymax, len(contents[j][i].split('<STR_LIT:\n>')))<EOL><DEDENT>ylengths.append(ymax)<EOL><DEDENT>table_divider = '<STR_LIT:+>' + '<STR_LIT>'.join(['<STR_LIT:->' * i + '<STR_LIT:+>' for i in xlengths]) + '<STR_LIT:\n>'<EOL>table = table_divider<EOL>for i in range(len(ylengths)):<EOL><INDENT>row = [column[i] for column in contents]<EOL>row = [entry + '<STR_LIT:\n>' * (ylengths[i]-len(entry.split('<STR_LIT:\n>'))) for entry in row]<EOL>row = [entry.split('<STR_LIT:\n>') for entry in row]<EOL>for j in range(ylengths[i]):<EOL><INDENT>k = <NUM_LIT:0><EOL>for entry in row:<EOL><INDENT>width = xlengths[k]<EOL>table += '<STR_LIT>'.join(['<STR_LIT>'.format(entry[j], width - <NUM_LIT:1>)])<EOL>k += <NUM_LIT:1><EOL><DEDENT>table += '<STR_LIT>'<EOL><DEDENT>table += table_divider<EOL><DEDENT>return table + '<STR_LIT:\n>'<EOL>
|
creates a table given any set of columns
|
f6018:m4
|
def gen_rst(results):
|
<EOL>try:<EOL><INDENT>os.mkdir(DOCPATH)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.args[<NUM_LIT:0>] != errno.EEXIST and e.args[<NUM_LIT:0>] != errno.EISDIR:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>toctree = []<EOL>class_ = []<EOL>for output in results:<EOL><INDENT>section = output.class_.split('<STR_LIT:U+002C>')<EOL>for i in range(len(section)):<EOL><INDENT>section[i] = section[i].rstrip()<EOL>if section[i] in CLASSMAP:<EOL><INDENT>section[i] = CLASSMAP[section[i]]<EOL><DEDENT>if i >= len(class_) or section[i] != class_[i]:<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>filename = section[i].replace('<STR_LIT:U+0020>', '<STR_LIT:_>').lower()<EOL>rst = open(DOCPATH + filename + '<STR_LIT>', '<STR_LIT:w>')<EOL>rst.write(HEADER)<EOL>rst.write(section[i] + '<STR_LIT:\n>')<EOL>rst.write(RSTCHARS[<NUM_LIT:0>] * len(section[i]))<EOL>rst.write(time.strftime("<STR_LIT>", time.gmtime()))<EOL>toctree.append(filename)<EOL><DEDENT>else:<EOL><INDENT>rst.write(section[i] + '<STR_LIT:\n>')<EOL>rst.write(RSTCHARS[min(i, len(RSTCHARS)-<NUM_LIT:1>)] * len(section[i]))<EOL>rst.write('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>class_ = section<EOL>rst.write('<STR_LIT>' % output.name.rsplit('<STR_LIT:.>', <NUM_LIT:1>)[<NUM_LIT:0>])<EOL>rst.write(output.desc + '<STR_LIT:\n>')<EOL>rst.write('<STR_LIT>' * len(output.desc) + '<STR_LIT>')<EOL>if output.cause != '<STR_LIT>':<EOL><INDENT>rst.write('<STR_LIT>' + output.cause + '<STR_LIT>')<EOL><DEDENT>if output.workaround != '<STR_LIT>':<EOL><INDENT>rst.write('<STR_LIT>' + output.workaround + '<STR_LIT>')<EOL><DEDENT>rst.write('<STR_LIT>' + indent(output.code, TAB) + '<STR_LIT:\n>')<EOL>output_cpy = indent('<STR_LIT>'.join(output.output_cpy[<NUM_LIT:0>:<NUM_LIT:2>]), TAB).rstrip()<EOL>output_cpy = ('<STR_LIT>' if output_cpy != '<STR_LIT>' else '<STR_LIT>') + output_cpy<EOL>output_upy = indent('<STR_LIT>'.join(output.output_upy[<NUM_LIT:0>:<NUM_LIT:2>]), TAB).rstrip()<EOL>output_upy = ('<STR_LIT>' if output_upy != '<STR_LIT>' else '<STR_LIT>') + output_upy<EOL>table = gen_table([['<STR_LIT>', output_cpy], ['<STR_LIT>', output_upy]])<EOL>rst.write(table)<EOL><DEDENT>template = open(INDEXTEMPLATE, '<STR_LIT:r>')<EOL>index = open(DOCPATH + INDEX, '<STR_LIT:w>')<EOL>index.write(HEADER)<EOL>index.write(template.read())<EOL>for section in INDEXPRIORITY:<EOL><INDENT>if section in toctree:<EOL><INDENT>index.write(indent(section + '<STR_LIT>', TAB))<EOL>toctree.remove(section)<EOL><DEDENT><DEDENT>for section in toctree:<EOL><INDENT>index.write(indent(section + '<STR_LIT>', TAB))<EOL><DEDENT>
|
creates restructured text documents to display tests
|
f6018:m5
|
def main():
|
<EOL>os.environ['<STR_LIT>'] = TESTPATH<EOL>os.environ['<STR_LIT>'] = TESTPATH<EOL>files = readfiles()<EOL>results = run_tests(files)<EOL>gen_rst(results)<EOL>
|
Main function
|
f6018:m6
|
def init():
|
global __dev, __cfg_descr<EOL>devices = get_dfu_devices(idVendor=__VID, idProduct=__PID)<EOL>if not devices:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(devices) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>__dev = devices[<NUM_LIT:0>]<EOL>__dev.set_configuration()<EOL>usb.util.claim_interface(__dev, __DFU_INTERFACE)<EOL>__cfg_descr = None<EOL>for cfg in __dev.configurations():<EOL><INDENT>__cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors)<EOL>if __cfg_descr:<EOL><INDENT>break<EOL><DEDENT>for itf in cfg.interfaces():<EOL><INDENT>__cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors)<EOL>if __cfg_descr:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>for attempt in range(<NUM_LIT:4>):<EOL><INDENT>status = get_status()<EOL>if status == __DFU_STATE_DFU_IDLE:<EOL><INDENT>break<EOL><DEDENT>elif (status == __DFU_STATE_DFU_DOWNLOAD_IDLE<EOL>or status == __DFU_STATE_DFU_UPLOAD_IDLE):<EOL><INDENT>abort_request()<EOL><DEDENT>else:<EOL><INDENT>clr_status()<EOL><DEDENT><DEDENT>
|
Initializes the found DFU device so that we can program it.
|
f6019:m1
|
def abort_request():
|
__dev.ctrl_transfer(<NUM_LIT>, __DFU_ABORT, <NUM_LIT:0>, __DFU_INTERFACE, None, __TIMEOUT)<EOL>
|
Sends an abort request.
|
f6019:m2
|
def clr_status():
|
__dev.ctrl_transfer(<NUM_LIT>, __DFU_CLRSTATUS, <NUM_LIT:0>, __DFU_INTERFACE,<EOL>None, __TIMEOUT)<EOL>
|
Clears any error status (perhaps left over from a previous session).
|
f6019:m3
|
def get_status():
|
stat = __dev.ctrl_transfer(<NUM_LIT>, __DFU_GETSTATUS, <NUM_LIT:0>, __DFU_INTERFACE,<EOL><NUM_LIT:6>, <NUM_LIT>)<EOL>return stat[<NUM_LIT:4>]<EOL>
|
Get the status of the last operation.
|
f6019:m4
|
def mass_erase():
|
<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:0>, __DFU_INTERFACE,<EOL>"<STR_LIT>", __TIMEOUT)<EOL>if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>
|
Performs a MASS erase (i.e. erases the entire device.
|
f6019:m5
|
def page_erase(addr):
|
if __verbose:<EOL><INDENT>print("<STR_LIT>" % (addr))<EOL><DEDENT>buf = struct.pack("<STR_LIT>", <NUM_LIT>, addr)<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:0>, __DFU_INTERFACE, buf, __TIMEOUT)<EOL>if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>
|
Erases a single page.
|
f6019:m6
|
def set_address(addr):
|
<EOL>buf = struct.pack("<STR_LIT>", <NUM_LIT>, addr)<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:0>, __DFU_INTERFACE, buf, __TIMEOUT)<EOL>if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>
|
Sets the address for the next operation.
|
f6019:m7
|
def write_memory(addr, buf, progress=None, progress_addr=<NUM_LIT:0>, progress_size=<NUM_LIT:0>):
|
xfer_count = <NUM_LIT:0><EOL>xfer_bytes = <NUM_LIT:0><EOL>xfer_total = len(buf)<EOL>xfer_base = addr<EOL>while xfer_bytes < xfer_total:<EOL><INDENT>if __verbose and xfer_count % <NUM_LIT> == <NUM_LIT:0>:<EOL><INDENT>print ("<STR_LIT>" % (xfer_base + xfer_bytes,<EOL>xfer_bytes // <NUM_LIT>,<EOL>xfer_total // <NUM_LIT>))<EOL><DEDENT>if progress and xfer_count % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>progress(progress_addr, xfer_base + xfer_bytes - progress_addr,<EOL>progress_size)<EOL><DEDENT>set_address(xfer_base+xfer_bytes)<EOL>chunk = min(__cfg_descr.wTransferSize, xfer_total-xfer_bytes)<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:2>, __DFU_INTERFACE,<EOL>buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT)<EOL>if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>xfer_count += <NUM_LIT:1><EOL>xfer_bytes += chunk<EOL><DEDENT>
|
Writes a buffer into memory. This routine assumes that memory has
already been erased.
|
f6019:m8
|
def write_page(buf, xfer_offset):
|
xfer_base = <NUM_LIT><EOL>set_address(xfer_base+xfer_offset)<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:2>, __DFU_INTERFACE, buf, __TIMEOUT)<EOL>if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if __verbose:<EOL><INDENT>print ("<STR_LIT>" % (xfer_base + xfer_offset))<EOL><DEDENT>
|
Writes a single page. This routine assumes that memory has already
been erased.
|
f6019:m9
|
def exit_dfu():
|
<EOL>set_address(<NUM_LIT>)<EOL>__dev.ctrl_transfer(<NUM_LIT>, __DFU_DNLOAD, <NUM_LIT:0>, __DFU_INTERFACE,<EOL>None, __TIMEOUT)<EOL>try:<EOL><INDENT>if get_status() != __DFU_STATE_DFU_MANIFEST:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>usb.util.dispose_resources(__dev)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>
|
Exit DFU mode, and start running the program.
|
f6019:m10
|
def named(values, names):
|
return dict(zip(names.split(), values))<EOL>
|
Creates a dict with `names` as fields, and `values` as values.
|
f6019:m11
|
def consume(fmt, data, names):
|
size = struct.calcsize(fmt)<EOL>return named(struct.unpack(fmt, data[:size]), names), data[size:]<EOL>
|
Parses the struct defined by `fmt` from `data`, stores the parsed fields
into a named tuple using `names`. Returns the named tuple, and the data
with the struct stripped off.
|
f6019:m12
|
def cstring(string):
|
return string.decode('<STR_LIT:utf-8>').split('<STR_LIT>', <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>
|
Extracts a null-terminated string from a byte array.
|
f6019:m13
|
def compute_crc(data):
|
return <NUM_LIT> & -zlib.crc32(data) - <NUM_LIT:1><EOL>
|
Computes the CRC32 value for the data passed in.
|
f6019:m14
|
def read_dfu_file(filename):
|
print("<STR_LIT>".format(filename))<EOL>with open(filename, '<STR_LIT:rb>') as fin:<EOL><INDENT>data = fin.read()<EOL><DEDENT>crc = compute_crc(data[:-<NUM_LIT:4>])<EOL>elements = []<EOL>dfu_prefix, data = consume('<STR_LIT>', data,<EOL>'<STR_LIT>')<EOL>print ("<STR_LIT>"<EOL>"<STR_LIT>" % dfu_prefix)<EOL>for target_idx in range(dfu_prefix['<STR_LIT>']):<EOL><INDENT>img_prefix, data = consume('<STR_LIT>', data,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>img_prefix['<STR_LIT>'] = target_idx<EOL>if img_prefix['<STR_LIT>']:<EOL><INDENT>img_prefix['<STR_LIT:name>'] = cstring(img_prefix['<STR_LIT:name>'])<EOL><DEDENT>else:<EOL><INDENT>img_prefix['<STR_LIT:name>'] = '<STR_LIT>'<EOL><DEDENT>print('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% img_prefix)<EOL>target_size = img_prefix['<STR_LIT:size>']<EOL>target_data, data = data[:target_size], data[target_size:]<EOL>for elem_idx in range(img_prefix['<STR_LIT>']):<EOL><INDENT>elem_prefix, target_data = consume('<STR_LIT>', target_data, '<STR_LIT>')<EOL>elem_prefix['<STR_LIT>'] = elem_idx<EOL>print('<STR_LIT>'<EOL>% elem_prefix)<EOL>elem_size = elem_prefix['<STR_LIT:size>']<EOL>elem_data = target_data[:elem_size]<EOL>target_data = target_data[elem_size:]<EOL>elem_prefix['<STR_LIT:data>'] = elem_data<EOL>elements.append(elem_prefix)<EOL><DEDENT>if len(target_data):<EOL><INDENT>print("<STR_LIT>" % target_idx)<EOL><DEDENT><DEDENT>dfu_suffix = named(struct.unpack('<STR_LIT>', data[:<NUM_LIT:16>]),<EOL>'<STR_LIT>')<EOL>print ('<STR_LIT>'<EOL>'<STR_LIT>' % dfu_suffix)<EOL>if crc != dfu_suffix['<STR_LIT>']:<EOL><INDENT>print("<STR_LIT>" % crc)<EOL>return<EOL><DEDENT>data = data[<NUM_LIT:16>:]<EOL>if data:<EOL><INDENT>print("<STR_LIT>")<EOL>return<EOL><DEDENT>return elements<EOL>
|
Reads a DFU file, and parses the individual elements from the file.
Returns an array of elements. Each element is a dictionary with the
following keys:
num - The element index
address - The address that the element data should be written to.
size - The size of the element ddata.
data - The element data.
If an error occurs while parsing the file, then None is returned.
|
f6019:m15
|
def get_dfu_devices(*args, **kwargs):
|
<EOL>return list(usb.core.find(*args, find_all=True,<EOL>custom_match=FilterDFU(), **kwargs))<EOL>
|
Returns a list of USB device which are currently in DFU mode.
Additional filters (like idProduct and idVendor) can be passed in to
refine the search.
|
f6019:m16
|
def get_memory_layout(device):
|
cfg = device[<NUM_LIT:0>]<EOL>intf = cfg[(<NUM_LIT:0>, <NUM_LIT:0>)]<EOL>mem_layout_str = get_string(device, intf.iInterface)<EOL>mem_layout = mem_layout_str.split('<STR_LIT:/>')<EOL>result = []<EOL>for mem_layout_index in range(<NUM_LIT:1>, len(mem_layout), <NUM_LIT:2>):<EOL><INDENT>addr = int(mem_layout[mem_layout_index], <NUM_LIT:0>)<EOL>segments = mem_layout[mem_layout_index + <NUM_LIT:1>].split('<STR_LIT:U+002C>')<EOL>seg_re = re.compile(r'<STR_LIT>')<EOL>for segment in segments:<EOL><INDENT>seg_match = seg_re.match(segment)<EOL>num_pages = int(seg_match.groups()[<NUM_LIT:0>], <NUM_LIT:10>)<EOL>page_size = int(seg_match.groups()[<NUM_LIT:1>], <NUM_LIT:10>)<EOL>multiplier = seg_match.groups()[<NUM_LIT:2>]<EOL>if multiplier == '<STR_LIT>':<EOL><INDENT>page_size *= <NUM_LIT><EOL><DEDENT>if multiplier == '<STR_LIT:M>':<EOL><INDENT>page_size *= <NUM_LIT> * <NUM_LIT><EOL><DEDENT>size = num_pages * page_size<EOL>last_addr = addr + size - <NUM_LIT:1><EOL>result.append(named((addr, last_addr, size, num_pages, page_size),<EOL>"<STR_LIT>"))<EOL>addr += size<EOL><DEDENT><DEDENT>return result<EOL>
|
Returns an array which identifies the memory layout. Each entry
of the array will contain a dictionary with the following keys:
addr - Address of this memory segment
last_addr - Last address contained within the memory segment.
size - size of the segment, in bytes
num_pages - number of pages in the segment
page_size - size of each page, in bytes
|
f6019:m17
|
def list_dfu_devices(*args, **kwargs):
|
devices = get_dfu_devices(*args, **kwargs)<EOL>if not devices:<EOL><INDENT>print("<STR_LIT>")<EOL>return<EOL><DEDENT>for device in devices:<EOL><INDENT>print("<STR_LIT>"<EOL>.format(device.bus, device.address,<EOL>device.idVendor, device.idProduct))<EOL>layout = get_memory_layout(device)<EOL>print("<STR_LIT>")<EOL>for entry in layout:<EOL><INDENT>print("<STR_LIT>"<EOL>.format(entry['<STR_LIT>'], entry['<STR_LIT>'],<EOL>entry['<STR_LIT>'] // <NUM_LIT>))<EOL><DEDENT><DEDENT>
|
Prints a lits of devices detected in DFU mode.
|
f6019:m18
|
def write_elements(elements, mass_erase_used, progress=None):
|
mem_layout = get_memory_layout(__dev)<EOL>for elem in elements:<EOL><INDENT>addr = elem['<STR_LIT>']<EOL>size = elem['<STR_LIT:size>']<EOL>data = elem['<STR_LIT:data>']<EOL>elem_size = size<EOL>elem_addr = addr<EOL>if progress:<EOL><INDENT>progress(elem_addr, <NUM_LIT:0>, elem_size)<EOL><DEDENT>while size > <NUM_LIT:0>:<EOL><INDENT>write_size = size<EOL>if not mass_erase_used:<EOL><INDENT>for segment in mem_layout:<EOL><INDENT>if addr >= segment['<STR_LIT>'] andaddr <= segment['<STR_LIT>']:<EOL><INDENT>page_size = segment['<STR_LIT>']<EOL>page_addr = addr & ~(page_size - <NUM_LIT:1>)<EOL>if addr + write_size > page_addr + page_size:<EOL><INDENT>write_size = page_addr + page_size - addr<EOL><DEDENT>page_erase(page_addr)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>write_memory(addr, data[:write_size], progress,<EOL>elem_addr, elem_size)<EOL>data = data[write_size:]<EOL>addr += write_size<EOL>size -= write_size<EOL>if progress:<EOL><INDENT>progress(elem_addr, addr - elem_addr, elem_size)<EOL><DEDENT><DEDENT><DEDENT>
|
Writes the indicated elements into the target memory,
erasing as needed.
|
f6019:m19
|
def cli_progress(addr, offset, size):
|
width = <NUM_LIT><EOL>done = offset * width // size<EOL>print("<STR_LIT>"<EOL>.format(addr, size, '<STR_LIT:=>' * done, '<STR_LIT:U+0020>' * (width - done),<EOL>offset * <NUM_LIT:100> // size), end="<STR_LIT>")<EOL>try:<EOL><INDENT>sys.stdout.flush()<EOL><DEDENT>except OSError:<EOL><INDENT>pass <EOL><DEDENT>if offset == size:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>
|
Prints a progress report suitable for use on the command line.
|
f6019:m20
|
def main():
|
global __verbose<EOL>parser = argparse.ArgumentParser(description='<STR_LIT>')<EOL>parser.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>help="<STR_LIT>",<EOL>action="<STR_LIT:store_true>",<EOL>default=False<EOL>)<EOL>parser.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>help="<STR_LIT>",<EOL>action="<STR_LIT:store_true>",<EOL>default=False<EOL>)<EOL>parser.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>help="<STR_LIT>",<EOL>dest="<STR_LIT:path>",<EOL>default=False<EOL>)<EOL>parser.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>help="<STR_LIT>",<EOL>action="<STR_LIT:store_true>",<EOL>default=False<EOL>)<EOL>args = parser.parse_args()<EOL>__verbose = args.verbose<EOL>if args.list:<EOL><INDENT>list_dfu_devices(idVendor=__VID, idProduct=__PID)<EOL>return<EOL><DEDENT>init()<EOL>if args.mass_erase:<EOL><INDENT>print ("<STR_LIT>")<EOL>mass_erase()<EOL><DEDENT>if args.path:<EOL><INDENT>elements = read_dfu_file(args.path)<EOL>if not elements:<EOL><INDENT>return<EOL><DEDENT>print("<STR_LIT>")<EOL>write_elements(elements, args.mass_erase, progress=cli_progress)<EOL>print("<STR_LIT>")<EOL>exit_dfu()<EOL>return<EOL><DEDENT>print("<STR_LIT>")<EOL>
|
Test program for verifying this files functionality.
|
f6019:m21
|
def run_loop(leds=all_leds):
|
print('<STR_LIT>')<EOL>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>if switch():<EOL><INDENT>[led.on() for led in leds]<EOL><DEDENT>else:<EOL><INDENT>[led.off() for led in leds]<EOL><DEDENT><DEDENT>except OSError: <EOL><INDENT>break<EOL><DEDENT><DEDENT>
|
Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects
|
f7001:m0
|
def find_c_file(obj_file, vpath):
|
c_file = None<EOL>relative_c_file = os.path.splitext(obj_file)[<NUM_LIT:0>] + "<STR_LIT>"<EOL>relative_c_file = relative_c_file.lstrip('<STR_LIT>')<EOL>for p in vpath:<EOL><INDENT>possible_c_file = os.path.join(p, relative_c_file)<EOL>if os.path.exists(possible_c_file):<EOL><INDENT>c_file = possible_c_file<EOL>break<EOL><DEDENT><DEDENT>return c_file<EOL>
|
Search vpaths for the c file that matches the provided object_file.
:param str obj_file: object file to find the matching c file for
:param List[str] vpath: List of base paths, similar to gcc vpath
:return: str path to c file or None
|
f7008:m0
|
def find_module_registrations(c_file):
|
global pattern<EOL>if c_file is None:<EOL><INDENT>return set()<EOL><DEDENT>with io.open(c_file, encoding='<STR_LIT:utf-8>') as c_file_obj:<EOL><INDENT>return set(re.findall(pattern, c_file_obj.read()))<EOL><DEDENT>
|
Find any MP_REGISTER_MODULE definitions in the provided c file.
:param str c_file: path to c file to check
:return: List[(module_name, obj_module, enabled_define)]
|
f7008:m1
|
def generate_module_table_header(modules):
|
<EOL>mod_defs = []<EOL>print("<STR_LIT>")<EOL>for module_name, obj_module, enabled_define in modules:<EOL><INDENT>mod_def = "<STR_LIT>".format(module_name.upper())<EOL>mod_defs.append(mod_def)<EOL>print((<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>).format(module_name=module_name, obj_module=obj_module,<EOL>enabled_define=enabled_define, mod_def=mod_def)<EOL>)<EOL><DEDENT>print("<STR_LIT>")<EOL>for mod_def in mod_defs:<EOL><INDENT>print("<STR_LIT>".format(mod_def=mod_def))<EOL><DEDENT>print("<STR_LIT>")<EOL>
|
Generate header with module table entries for builtin modules.
:param List[(module_name, obj_module, enabled_define)] modules: module defs
:return: None
|
f7008:m2
|
def parse_pin(name_str):
|
if len(name_str) < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if name_str[<NUM_LIT:0>] != '<STR_LIT:P>':<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>pin_str = name_str[<NUM_LIT:1>:].split('<STR_LIT:/>')[<NUM_LIT:0>]<EOL>if not pin_str.isdigit():<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return int(pin_str)<EOL>
|
Parses a string and returns a pin-num.
|
f7022:m0
|
def ptr(self):
|
if self.fn_num is None:<EOL><INDENT>return self.func<EOL><DEDENT>return '<STR_LIT>'.format(self.func, self.fn_num)<EOL>
|
Returns the numbered function (i.e. USART6) for this AF.
|
f7022:c0:m2
|
def print(self):
|
if self.supported:<EOL><INDENT>print('<STR_LIT>', end='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>', end='<STR_LIT>')<EOL><DEDENT>fn_num = self.fn_num<EOL>if fn_num is None:<EOL><INDENT>fn_num = <NUM_LIT:0><EOL><DEDENT>print('<STR_LIT>'.format(self.idx,<EOL>self.func, fn_num, self.pin_type, self.ptr(), self.af_str))<EOL>
|
Prints the C representation of this AF.
|
f7022:c0:m4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.