sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def multis_2_mono(table):
"""
Converts each multiline string in a table to single line.
Parameters
----------
table : list of list of str
A list of rows containing strings
Returns
-------
table : list of lists of str
"""
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = table[row][column].replace('\n', ' ')
return table
|
Converts each multiline string in a table to single line.
Parameters
----------
table : list of list of str
A list of rows containing strings
Returns
-------
table : list of lists of str
|
entailment
|
def get_html_row_count(spans):
"""Get the number of rows"""
if spans == []:
return 0
row_counts = {}
for span in spans:
span = sorted(span)
try:
row_counts[str(span[0][1])] += get_span_row_count(span)
except KeyError:
row_counts[str(span[0][1])] = get_span_row_count(span)
values = list(row_counts.values())
return max(values)
|
Get the number of rows
|
entailment
|
def levenshtein_distance(word1, word2):
"""
Computes the Levenshtein distance.
[Reference]: https://en.wikipedia.org/wiki/Levenshtein_distance
[Article]: Levenshtein, Vladimir I. (February 1966). "Binary codes capable of correcting deletions,
insertions,and reversals". Soviet Physics Doklady 10 (8): 707–710.
[Implementation]: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
"""
if len(word1) < len(word2):
return levenshtein_distance(word2, word1)
if len(word2) == 0:
return len(word1)
previous_row = list(range(len(word2) + 1))
for i, char1 in enumerate(word1):
current_row = [i + 1]
for j, char2 in enumerate(word2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (char1 != char2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
Computes the Levenshtein distance.
[Reference]: https://en.wikipedia.org/wiki/Levenshtein_distance
[Article]: Levenshtein, Vladimir I. (February 1966). "Binary codes capable of correcting deletions,
insertions,and reversals". Soviet Physics Doklady 10 (8): 707–710.
[Implementation]: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
|
entailment
|
def better_ts_function(f):
'''Decorator which check if timeseries has a better
implementation of the function.'''
fname = f.__name__
def _(ts, *args, **kwargs):
func = getattr(ts, fname, None)
if func:
return func(*args, **kwargs)
else:
return f(ts, *args, **kwargs)
_.__name__ = fname
return _
|
Decorator which check if timeseries has a better
implementation of the function.
|
entailment
|
def zscore(ts, **kwargs):
'''Rolling Z-Score statistics.
The Z-score is more formally known as ``standardised residuals``.
To calculate the standardised residuals of a data set,
the average value and the standard deviation of the data value
have to be estimated.
.. math::
z = \frac{x - \mu(x)}{\sigma(x)}
'''
m = ts.rollmean(**kwargs)
s = ts.rollstddev(**kwargs)
result = (ts - m)/s
name = kwargs.get('name', None)
if name:
result.name = name
return result
|
Rolling Z-Score statistics.
The Z-score is more formally known as ``standardised residuals``.
To calculate the standardised residuals of a data set,
the average value and the standard deviation of the data value
have to be estimated.
.. math::
z = \frac{x - \mu(x)}{\sigma(x)}
|
entailment
|
def prange(ts, **kwargs):
'''Rolling Percentage range.
Value between 0 and 1 indicating the position in the rolling range.
'''
mi = ts.rollmin(**kwargs)
ma = ts.rollmax(**kwargs)
return (ts - mi)/(ma - mi)
|
Rolling Percentage range.
Value between 0 and 1 indicating the position in the rolling range.
|
entailment
|
def bindata(data, maxbins = 30, reduction = 0.1):
'''
data must be numeric list with a len above 20
This function counts the number of data points in a reduced array
'''
tole = 0.01
N = len(data)
assert N > 20
vmin = min(data)
vmax = max(data)
DV = vmax - vmin
tol = tole*DV
vmax += tol
if vmin >= 0:
vmin -= tol
vmin = max(0.0,vmin)
else:
vmin -= tol
n = min(maxbins,max(2,int(round(reduction*N))))
DV = vmax - vmin
bbin = npy.linspace(vmin,vmax,n+1)
sso = npy.searchsorted(bbin,npy.sort(data))
x = []
y = []
for i in range(0,n):
x.append(0.5*(bbin[i+1]+bbin[i]))
y.append(0.0)
dy = 1.0/N
for i in sso:
y[i-1] += dy/(bbin[i]-bbin[i-1])
return (x,y)
|
data must be numeric list with a len above 20
This function counts the number of data points in a reduced array
|
entailment
|
def binOp(op, indx, amap, bmap, fill_vec):
'''
Combines the values from two map objects using the indx values
using the op operator. In situations where there is a missing value
it will use the callable function handle_missing
'''
def op_or_missing(id):
va = amap.get(id, None)
vb = bmap.get(id, None)
if va is None or vb is None:
# This should create as many elements as the number of columns!?
result = fill_vec
else:
try:
result = op(va, vb)
except Exception:
result = None
if result is None:
result = fill_vec
return result
seq_arys = map(op_or_missing, indx)
data = np.vstack(seq_arys)
return data
|
Combines the values from two map objects using the indx values
using the op operator. In situations where there is a missing value
it will use the callable function handle_missing
|
entailment
|
def _toVec(shape, val):
'''
takes a single value and creates a vecotor / matrix with that value filled
in it
'''
mat = np.empty(shape)
mat.fill(val)
return mat
|
takes a single value and creates a vecotor / matrix with that value filled
in it
|
entailment
|
def center_line(space, line):
"""
Add leading & trailing space to text to center it within an allowed
width
Parameters
----------
space : int
The maximum character width allowed for the text. If the length
of text is more than this value, no space will be added.\
line : str
The text that will be centered.
Returns
-------
line : str
The text with the leading space added to it
"""
line = line.strip()
left_length = math.floor((space - len(line)) / 2)
right_length = math.ceil((space - len(line)) / 2)
left_space = " " * int(left_length)
right_space = " " * int(right_length)
line = ''.join([left_space, line, right_space])
return line
|
Add leading & trailing space to text to center it within an allowed
width
Parameters
----------
space : int
The maximum character width allowed for the text. If the length
of text is more than this value, no space will be added.\
line : str
The text that will be centered.
Returns
-------
line : str
The text with the leading space added to it
|
entailment
|
def register(self, function):
"""Register a function in the function registry.
The function will be automatically instantiated if not already an
instance.
"""
function = inspect.isclass(function) and function() or function
name = function.name
self[name] = function
|
Register a function in the function registry.
The function will be automatically instantiated if not already an
instance.
|
entailment
|
def unregister(self, name):
"""Unregister function by name.
"""
try:
name = name.name
except AttributeError:
pass
return self.pop(name,None)
|
Unregister function by name.
|
entailment
|
def row_includes_spans(table, row, spans):
"""
Determine if there are spans within a row
Parameters
----------
table : list of lists of str
row : int
spans : list of lists of lists of int
Returns
-------
bool
Whether or not a table's row includes spans
"""
for column in range(len(table[row])):
for span in spans:
if [row, column] in span:
return True
return False
|
Determine if there are spans within a row
Parameters
----------
table : list of lists of str
row : int
spans : list of lists of lists of int
Returns
-------
bool
Whether or not a table's row includes spans
|
entailment
|
def _setup_states(state_definitions, prev=()):
"""Create a StateList object from a 'states' Workflow attribute."""
states = list(prev)
for state_def in state_definitions:
if len(state_def) != 2:
raise TypeError(
"The 'state' attribute of a workflow should be "
"a two-tuple of strings; got %r instead." % (state_def,)
)
name, title = state_def
state = State(name, title)
if any(st.name == name for st in states):
# Replacing an existing state
states = [state if st.name == name else st for st in states]
else:
states.append(state)
return StateList(states)
|
Create a StateList object from a 'states' Workflow attribute.
|
entailment
|
def _setup_transitions(tdef, states, prev=()):
"""Create a TransitionList object from a 'transitions' Workflow attribute.
Args:
tdef: list of transition definitions
states (StateList): already parsed state definitions.
prev (TransitionList): transition definitions from a parent.
Returns:
TransitionList: the list of transitions defined in the 'tdef' argument.
"""
trs = list(prev)
for transition in tdef:
if len(transition) == 3:
(name, source, target) = transition
if is_string(source) or isinstance(source, State):
source = [source]
source = [states[src] for src in source]
target = states[target]
tr = Transition(name, source, target)
else:
raise TypeError(
"Elements of the 'transition' attribute of a "
"workflow should be three-tuples; got %r instead." % (transition,)
)
if any(prev_tr.name == tr.name for prev_tr in trs):
# Replacing an existing state
trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs]
else:
trs.append(tr)
return TransitionList(trs)
|
Create a TransitionList object from a 'transitions' Workflow attribute.
Args:
tdef: list of transition definitions
states (StateList): already parsed state definitions.
prev (TransitionList): transition definitions from a parent.
Returns:
TransitionList: the list of transitions defined in the 'tdef' argument.
|
entailment
|
def transition(trname='', field='', check=None, before=None, after=None):
"""Decorator to declare a function as a transition implementation."""
if is_callable(trname):
raise ValueError(
"The @transition decorator should be called as "
"@transition(['transition_name'], **kwargs)")
if check or before or after:
warnings.warn(
"The use of check=, before= and after= in @transition decorators is "
"deprecated in favor of @transition_check, @before_transition and "
"@after_transition decorators.",
DeprecationWarning,
stacklevel=2)
return TransitionWrapper(trname, field=field, check=check, before=before, after=after)
|
Decorator to declare a function as a transition implementation.
|
entailment
|
def _make_hook_dict(fun):
"""Ensure the given function has a xworkflows_hook attribute.
That attribute has the following structure:
>>> {
... 'before': [('state', <TransitionHook>), ...],
... }
"""
if not hasattr(fun, 'xworkflows_hook'):
fun.xworkflows_hook = {
HOOK_BEFORE: [],
HOOK_AFTER: [],
HOOK_CHECK: [],
HOOK_ON_ENTER: [],
HOOK_ON_LEAVE: [],
}
return fun.xworkflows_hook
|
Ensure the given function has a xworkflows_hook attribute.
That attribute has the following structure:
>>> {
... 'before': [('state', <TransitionHook>), ...],
... }
|
entailment
|
def _match_state(self, state):
"""Checks whether a given State matches self.names."""
return (self.names == '*'
or state in self.names
or state.name in self.names)
|
Checks whether a given State matches self.names.
|
entailment
|
def _match_transition(self, transition):
"""Checks whether a given Transition matches self.names."""
return (self.names == '*'
or transition in self.names
or transition.name in self.names)
|
Checks whether a given Transition matches self.names.
|
entailment
|
def applies_to(self, transition, from_state=None):
"""Whether this hook applies to the given transition/state.
Args:
transition (Transition): the transition to check
from_state (State or None): the state to check. If absent, the check
is 'might this hook apply to the related transition, given a
valid source state'.
"""
if '*' in self.names:
return True
elif self.kind in (HOOK_BEFORE, HOOK_AFTER, HOOK_CHECK):
return self._match_transition(transition)
elif self.kind == HOOK_ON_ENTER:
return self._match_state(transition.target)
elif from_state is None:
# Testing whether the hook may apply to at least one source of the
# transition
return any(self._match_state(src) for src in transition.source)
else:
return self._match_state(from_state)
|
Whether this hook applies to the given transition/state.
Args:
transition (Transition): the transition to check
from_state (State or None): the state to check. If absent, the check
is 'might this hook apply to the related transition, given a
valid source state'.
|
entailment
|
def _pre_transition_checks(self):
"""Run the pre-transition checks."""
current_state = getattr(self.instance, self.field_name)
if current_state not in self.transition.source:
raise InvalidTransitionError(
"Transition '%s' isn't available from state '%s'." %
(self.transition.name, current_state.name))
for check in self._filter_hooks(HOOK_CHECK):
if not check(self.instance):
raise ForbiddenTransition(
"Transition '%s' was forbidden by "
"custom pre-transition check." % self.transition.name)
|
Run the pre-transition checks.
|
entailment
|
def _filter_hooks(self, *hook_kinds):
"""Filter a list of hooks, keeping only applicable ones."""
hooks = sum((self.hooks.get(kind, []) for kind in hook_kinds), [])
return sorted(hook for hook in hooks
if hook.applies_to(self.transition, self.current_state))
|
Filter a list of hooks, keeping only applicable ones.
|
entailment
|
def _post_transition(self, result, *args, **kwargs):
"""Performs post-transition actions."""
for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER):
hook(self.instance, result, *args, **kwargs)
|
Performs post-transition actions.
|
entailment
|
def load_parent_implems(self, parent_implems):
"""Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class.
"""
for trname, attr, implem in parent_implems.get_custom_implementations():
self.implementations[trname] = implem.copy()
self.transitions_at[trname] = attr
self.custom_implems.add(trname)
|
Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class.
|
entailment
|
def add_implem(self, transition, attribute, function, **kwargs):
"""Add an implementation.
Args:
transition (Transition): the transition for which the implementation
is added
attribute (str): the name of the attribute where the implementation
will be available
function (callable): the actual implementation function
**kwargs: extra arguments for the related ImplementationProperty.
"""
implem = ImplementationProperty(
field_name=self.state_field,
transition=transition,
workflow=self.workflow,
implementation=function,
**kwargs)
self.implementations[transition.name] = implem
self.transitions_at[transition.name] = attribute
return implem
|
Add an implementation.
Args:
transition (Transition): the transition for which the implementation
is added
attribute (str): the name of the attribute where the implementation
will be available
function (callable): the actual implementation function
**kwargs: extra arguments for the related ImplementationProperty.
|
entailment
|
def should_collect(self, value):
"""Decide whether a given value should be collected."""
return (
# decorated with @transition
isinstance(value, TransitionWrapper)
# Relates to a compatible transition
and value.trname in self.workflow.transitions
# Either not bound to a state field or bound to the current one
and (not value.field or value.field == self.state_field))
|
Decide whether a given value should be collected.
|
entailment
|
def collect(self, attrs):
"""Collect the implementations from a given attributes dict."""
for name, value in attrs.items():
if self.should_collect(value):
transition = self.workflow.transitions[value.trname]
if (
value.trname in self.implementations
and value.trname in self.custom_implems
and name != self.transitions_at[value.trname]):
# We already have an implementation registered.
other_implem_at = self.transitions_at[value.trname]
raise ValueError(
"Error for attribute %s: it defines implementation "
"%s for transition %s, which is already implemented "
"at %s." % (name, value, transition, other_implem_at))
implem = self.add_implem(transition, name, value.func)
self.custom_implems.add(transition.name)
if value.check:
implem.add_hook(Hook(HOOK_CHECK, value.check))
if value.before:
implem.add_hook(Hook(HOOK_BEFORE, value.before))
if value.after:
implem.add_hook(Hook(HOOK_AFTER, value.after))
|
Collect the implementations from a given attributes dict.
|
entailment
|
def get_custom_implementations(self):
"""Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation.
"""
for trname in self.custom_implems:
attr = self.transitions_at[trname]
implem = self.implementations[trname]
yield (trname, attr, implem)
|
Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation.
|
entailment
|
def register_function_hooks(self, func):
"""Looks at an object method and registers it for relevent transitions."""
for hook_kind, hooks in func.xworkflows_hook.items():
for field_name, hook in hooks:
if field_name and field_name != self.state_field:
continue
for transition in self.workflow.transitions:
if hook.applies_to(transition):
implem = self.implementations[transition.name]
implem.add_hook(hook)
|
Looks at an object method and registers it for relevent transitions.
|
entailment
|
def _may_override(self, implem, other):
"""Checks whether an ImplementationProperty may override an attribute."""
if isinstance(other, ImplementationProperty):
# Overriding another custom implementation for the same transition
# and field
return (other.transition == implem.transition and other.field_name == self.state_field)
elif isinstance(other, TransitionWrapper):
# Overriding the definition that led to adding the current
# ImplementationProperty.
return (
other.trname == implem.transition.name
and (not other.field or other.field == self.state_field)
and other.func == implem.implementation)
return False
|
Checks whether an ImplementationProperty may override an attribute.
|
entailment
|
def fill_attrs(self, attrs):
"""Update the 'attrs' dict with generated ImplementationProperty."""
for trname, attrname in self.transitions_at.items():
implem = self.implementations[trname]
if attrname in attrs:
conflicting = attrs[attrname]
if not self._may_override(implem, conflicting):
raise ValueError(
"Can't override transition implementation %s=%r with %r" %
(attrname, conflicting, implem))
attrs[attrname] = implem
return attrs
|
Update the 'attrs' dict with generated ImplementationProperty.
|
entailment
|
def transform(self, attrs):
"""Perform all actions on a given attribute dict."""
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs)
|
Perform all actions on a given attribute dict.
|
entailment
|
def log_transition(self, transition, from_state, instance, *args, **kwargs):
"""Log a transition.
Args:
transition (Transition): the name of the performed transition
from_state (State): the source state
instance (object): the modified object
Kwargs:
Any passed when calling the transition
"""
logger = logging.getLogger('xworkflows.transitions')
try:
instance_repr = u(repr(instance), 'ignore')
except (UnicodeEncodeError, UnicodeDecodeError):
instance_repr = u("<bad repr>")
logger.info(
u("%s performed transition %s.%s (%s -> %s)"), instance_repr,
self.__class__.__name__, transition.name, from_state.name,
transition.target.name)
|
Log a transition.
Args:
transition (Transition): the name of the performed transition
from_state (State): the source state
instance (object): the modified object
Kwargs:
Any passed when calling the transition
|
entailment
|
def _add_workflow(mcs, field_name, state_field, attrs):
"""Attach a workflow to the attribute list (create a StateProperty)."""
attrs[field_name] = StateProperty(state_field.workflow, field_name)
|
Attach a workflow to the attribute list (create a StateProperty).
|
entailment
|
def _find_workflows(mcs, attrs):
"""Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
"""
workflows = {}
for attribute, value in attrs.items():
if isinstance(value, Workflow):
workflows[attribute] = StateField(value)
return workflows
|
Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
|
entailment
|
def _add_transitions(mcs, field_name, workflow, attrs, implems=None):
"""Collect and enhance transition definitions to a workflow.
Modifies the 'attrs' dict in-place.
Args:
field_name (str): name of the field transitions should update
workflow (Workflow): workflow we're working on
attrs (dict): dictionary of attributes to be updated.
implems (ImplementationList): Implementation list from parent
classes (optional)
Returns:
ImplementationList: The new implementation list for this field.
"""
new_implems = ImplementationList(field_name, workflow)
if implems:
new_implems.load_parent_implems(implems)
new_implems.transform(attrs)
return new_implems
|
Collect and enhance transition definitions to a workflow.
Modifies the 'attrs' dict in-place.
Args:
field_name (str): name of the field transitions should update
workflow (Workflow): workflow we're working on
attrs (dict): dictionary of attributes to be updated.
implems (ImplementationList): Implementation list from parent
classes (optional)
Returns:
ImplementationList: The new implementation list for this field.
|
entailment
|
def update(self):
"Updates cartesian coordinates for drawing tree graph"
# get new shape and clear for attrs
self.edges = np.zeros((self.ttree.nnodes - 1, 2), dtype=int)
self.verts = np.zeros((self.ttree.nnodes, 2), dtype=float)
self.lines = []
self.coords = []
# fill with updates
self.update_idxs() # get dimensions of tree
self.update_fixed_order() # in case ntips changed
self.assign_vertices() # get node locations
self.assign_coordinates() # get edge locations
self.reorient_coordinates()
|
Updates cartesian coordinates for drawing tree graph
|
entailment
|
def update_idxs(self):
"set root idx highest, tip idxs lowest ordered as ladderized"
# internal nodes: root is highest idx
idx = self.ttree.nnodes - 1
for node in self.ttree.treenode.traverse("levelorder"):
if not node.is_leaf():
node.add_feature("idx", idx)
if not node.name:
node.name = str(idx)
idx -= 1
# external nodes: lowest numbers are for tips (0-N)
for node in self.ttree.treenode.get_leaves():
node.add_feature("idx", idx)
if not node.name:
node.name = str(idx)
idx -= 1
|
set root idx highest, tip idxs lowest ordered as ladderized
|
entailment
|
def update_fixed_order(self):
"after pruning fixed order needs update to match new nnodes/ntips."
# set tips order if fixing for multi-tree plotting (default None)
fixed_order = self.ttree._fixed_order
self.ttree_fixed_order = None
self.ttree_fixed_idx = list(range(self.ttree.ntips))
# check if fixed_order changed:
if fixed_order:
fixed_order = [
i for i in fixed_order if i in self.ttree.get_tip_labels()]
self.ttree._set_fixed_order(fixed_order)
else:
self.ttree._fixed_idx = list(range(self.ttree.ntips))
|
after pruning fixed order needs update to match new nnodes/ntips.
|
entailment
|
def assign_vertices(self):
"""
Sets .edges, .verts for node positions.
X and Y positions here refer to base assumption that tree is right
facing, reorient_coordinates() will handle re-translating this.
"""
# shortname
uselen = bool(self.ttree.style.use_edge_lengths)
# postorder: children then parents (nidxs from 0 up)
# store edge array for connecting child nodes to parent nodes
nidx = 0
for node in self.ttree.treenode.traverse("postorder"):
if not node.is_root():
self.edges[nidx, :] = [node.up.idx, node.idx]
nidx += 1
# store verts array with x,y positions of nodes (lengths of branches)
# we want tips to align at the right face (larger axis number)
_root = self.ttree.treenode.get_tree_root()
_treeheight = _root.get_distance(_root.get_farthest_leaf()[0])
# set node x, y
tidx = len(self.ttree) - 1
for node in self.ttree.treenode.traverse("postorder"):
# Just leaves: x positions are evenly spread and ordered on axis
if node.is_leaf() and (not node.is_root()):
# set y-positions (heights). Distance from root or zero
node.y = _treeheight - _root.get_distance(node)
if not uselen:
node.y = 0.0
# set x-positions (order of samples)
if self.ttree._fixed_order:
node.x = self.ttree._fixed_order.index(node.name)# - tidx
else:
node.x = tidx
tidx -= 1
# store the x,y vertex positions
self.verts[node.idx] = [node.x, node.y]
# All internal node positions are not evenly spread or ordered
else:
# height is either distance or nnodes from root
node.y = _treeheight - _root.get_distance(node)
if not uselen:
node.y = max([i.y for i in node.children]) + 1
# x position is halfway between childrens x-positions
if node.children:
nch = node.children
node.x = sum(i.x for i in nch) / float(len(nch))
else:
node.x = tidx
# store the x,y vertex positions
self.verts[node.idx] = [node.x, node.y]
|
Sets .edges, .verts for node positions.
X and Y positions here refer to base assumption that tree is right
facing, reorient_coordinates() will handle re-translating this.
|
entailment
|
def reorient_coordinates(self):
"""
Returns a modified .verts array with new coordinates for nodes.
This does not need to modify .edges. The order of nodes, and therefore
of verts rows is still the same because it is still based on the tree
branching order (ladderized usually).
"""
# if tree is empty then bail out
if len(self.ttree) < 2:
return
# down is the default orientation
# down-facing tips align at y=0, first ladderized tip at x=0
if self.ttree.style.orient in ('down', 0):
pass
# right-facing tips align at x=0, last ladderized tip at y=0
elif self.ttree.style.orient in ('right', 3):
# verts swap x and ys and make xs 0 to negative
tmp = np.zeros(self.verts.shape)
tmp[:, 1] = self.verts[:, 0]
tmp[:, 0] = self.verts[:, 1] * -1
self.verts = tmp
# coords...
tmp = np.zeros(self.coords.shape)
tmp[:, 1] = self.coords[:, 0]
tmp[:, 0] = self.coords[:, 1] * -1
self.coords = tmp
elif self.ttree.style.orient in ('left', 1):
raise NotImplementedError("todo: left facing")
else:
raise NotImplementedError("todo: up facing")
|
Returns a modified .verts array with new coordinates for nodes.
This does not need to modify .edges. The order of nodes, and therefore
of verts rows is still the same because it is still based on the tree
branching order (ladderized usually).
|
entailment
|
def tsiterator(ts, dateconverter=None, desc=None,
clean=False, start_value=None, **kwargs):
'''An iterator of timeseries as tuples.'''
dateconverter = dateconverter or default_converter
yield ['Date'] + ts.names()
if clean == 'full':
for dt, value in full_clean(ts, dateconverter, desc, start_value):
yield (dt,) + tuple(value)
else:
if clean:
ts = ts.clean()
for dt, value in ts.items(desc=desc, start_value=start_value):
dt = dateconverter(dt)
yield (dt,) + tuple(value)
|
An iterator of timeseries as tuples.
|
entailment
|
def set_baselines(self):
"""
Modify coords to shift tree position for x,y baseline arguments. This
is useful for arrangeing trees onto a Canvas with other plots, but
still sharing a common cartesian axes coordinates.
"""
if self.style.xbaseline:
if self.style.orient in ("up", "down"):
self.coords.coords[:, 0] += self.style.xbaseline
self.coords.verts[:, 0] += self.style.xbaseline
else:
self.coords.coords[:, 1] += self.style.xbaseline
self.coords.verts[:, 1] += self.style.xbaseline
|
Modify coords to shift tree position for x,y baseline arguments. This
is useful for arrangeing trees onto a Canvas with other plots, but
still sharing a common cartesian axes coordinates.
|
entailment
|
def add_tip_labels_to_axes(self):
"""
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
"""
# get tip-coords and replace if using fixed_order
xpos = self.ttree.get_tip_coordinates('x')
ypos = self.ttree.get_tip_coordinates('y')
if self.style.orient in ("up", "down"):
if self.ttree._fixed_order:
xpos = list(range(self.ttree.ntips))
ypos = ypos[self.ttree._fixed_idx]
if self.style.tip_labels_align:
ypos = np.zeros(self.ttree.ntips)
if self.style.orient in ("right", "left"):
if self.ttree._fixed_order:
xpos = xpos[self.ttree._fixed_idx]
ypos = list(range(self.ttree.ntips))
if self.style.tip_labels_align:
xpos = np.zeros(self.ttree.ntips)
# pop fill from color dict if using color
tstyle = deepcopy(self.style.tip_labels_style)
if self.style.tip_labels_colors:
tstyle.pop("fill")
# add tip names to coordinates calculated above
self.axes.text(
xpos,
ypos,
self.tip_labels,
angle=(0 if self.style.orient in ("right", "left") else -90),
style=tstyle,
color=self.style.tip_labels_colors,
)
# get stroke-width for aligned tip-label lines (optional)
# copy stroke-width from the edge_style unless user set it
if not self.style.edge_align_style.get("stroke-width"):
self.style.edge_align_style["stroke-width"] = (
self.style.edge_style["stroke-width"])
|
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
|
entailment
|
def add_tip_lines_to_axes(self):
"add lines to connect tips to zero axis for tip_labels_align=True"
# get tip-coords and align-coords from verts
xpos, ypos, aedges, averts = self.get_tip_label_coords()
if self.style.tip_labels_align:
self.axes.graph(
aedges,
vcoordinates=averts,
estyle=self.style.edge_align_style,
vlshow=False,
vsize=0,
)
|
add lines to connect tips to zero axis for tip_labels_align=True
|
entailment
|
def fit_tip_labels(self):
"""
Modifies display range to ensure tip labels fit. This is a bit hackish
still. The problem is that the 'extents' range of the rendered text
is totally correct. So we add a little buffer here. Should add for
user to be able to modify this if needed. If not using edge lengths
then need to use unit length for treeheight.
"""
# user entered values
#if self.style.axes.x_domain_max or self.style.axes.y_domain_min:
# self.axes.x.domain.max = self.style.axes.x_domain_max
# self.axes.y.domain.min = self.style.axes.y_domain_min
# IF USE WANTS TO CHANGE IT THEN DO IT AFTER USING AXES
# or auto-fit (tree height)
#else:
if self.style.use_edge_lengths:
addon = self.ttree.treenode.height * .85
else:
addon = self.ttree.treenode.get_farthest_leaf(True)[1]
# modify display for orientations
if self.style.tip_labels:
if self.style.orient == "right":
self.axes.x.domain.max = addon
elif self.style.orient == "down":
self.axes.y.domain.min = -1 * addon
|
Modifies display range to ensure tip labels fit. This is a bit hackish
still. The problem is that the 'extents' range of the rendered text
is totally correct. So we add a little buffer here. Should add for
user to be able to modify this if needed. If not using edge lengths
then need to use unit length for treeheight.
|
entailment
|
def assign_node_colors_and_style(self):
"""
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
"""
# SET node_colors and POP node_style.fill
colors = self.style.node_colors
style = self.style.node_style
if colors is None:
if style["fill"] in (None, "none"):
style.pop("fill")
else:
if isinstance(style["fill"], (list, tuple)):
raise ToytreeError(
"Use node_color not node_style for multiple node colors")
# check the color
color = style["fill"]
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.node_colors = [color] * self.ttree.nnodes
# otherwise parse node_color
else:
style.pop("fill")
if isinstance(colors, str):
# check the color
color = colors
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.node_colors = [color] * self.ttree.nnodes
elif isinstance(colors, (list, tuple)):
if len(colors) != len(self.node_colors):
raise ToytreeError("node_colors arg is the wrong length")
for cidx in range(len(self.node_colors)):
color = colors[cidx]
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.node_colors[cidx] = color
# use CSS none for stroke=None
if self.style.node_style["stroke"] is None:
self.style.node_style.stroke = "none"
# apply node markers
markers = self.style.node_markers
if markers is None:
self.node_markers = ["o"] * self.ttree.nnodes
else:
if isinstance(markers, str):
self.node_markers = [markers] * self.ttree.nnodes
elif isinstance(markers, (list, tuple)):
for cidx in range(len(self.node_markers)):
self.node_markers[cidx] = markers[cidx]
|
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
|
entailment
|
def assign_node_labels_and_sizes(self):
"assign features of nodes to be plotted based on user kwargs"
# shorthand
nvals = self.ttree.get_node_values()
# False == Hide nodes and labels unless user entered size
if self.style.node_labels is False:
self.node_labels = ["" for i in nvals]
if self.style.node_sizes is not None:
if isinstance(self.style.node_sizes, (list, tuple, np.ndarray)):
assert len(self.node_sizes) == len(self.style.node_sizes)
self.node_sizes = self.style.node_sizes
elif isinstance(self.style.node_sizes, (int, str)):
self.node_sizes = (
[int(self.style.node_sizes)] * len(nvals)
)
self.node_labels = [" " if i else "" for i in self.node_sizes]
# True == Show nodes, label=idx, and show hover
elif self.style.node_labels is True:
# turn on node hover even if user did not set it explicit
self.style.node_hover = True
# get idx labels
self.node_labels = self.ttree.get_node_values('idx', 1, 1)
# use default node size as a list if not provided
if not self.style.node_sizes:
self.node_sizes = [18] * len(nvals)
else:
assert isinstance(self.style.node_sizes, (int, str))
self.node_sizes = (
[int(self.style.node_sizes)] * len(nvals)
)
# User entered lists or other for node labels or sizes; check lengths.
else:
# make node labels into a list of values
if isinstance(self.style.node_labels, list):
assert len(self.style.node_labels) == len(nvals)
self.node_labels = self.style.node_labels
# check if user entered a feature else use entered val
elif isinstance(self.style.node_labels, str):
self.node_labels = [self.style.node_labels] * len(nvals)
if self.style.node_labels in self.ttree.features:
self.node_labels = self.ttree.get_node_values(
self.style.node_labels, 1, 0)
# default to idx at internals if nothing else
else:
self.node_labels = self.ttree.get_node_values("idx", 1, 0)
# make node sizes as a list; set to zero if node label is ""
if isinstance(self.style.node_sizes, list):
assert len(self.style.node_sizes) == len(nvals)
self.node_sizes = self.style.node_sizes
elif isinstance(self.style.node_sizes, (str, int, float)):
self.node_sizes = [int(self.style.node_sizes)] * len(nvals)
else:
self.node_sizes = [18] * len(nvals)
# override node sizes to hide based on node labels
for nidx, node in enumerate(self.node_labels):
if self.node_labels[nidx] == "":
self.node_sizes[nidx] = 0
# ensure string type
self.node_labels = [str(i) for i in self.node_labels]
|
assign features of nodes to be plotted based on user kwargs
|
entailment
|
def assign_tip_labels_and_colors(self):
"assign tip labels based on user provided kwargs"
# COLOR
# tip color overrides tipstyle.fill
if self.style.tip_labels_colors:
#if self.style.tip_labels_style.fill:
# self.style.tip_labels_style.fill = None
if self.ttree._fixed_order:
if isinstance(self.style.tip_labels_colors, (list, np.ndarray)):
cols = np.array(self.style.tip_labels_colors)
orde = cols[self.ttree._fixed_idx]
self.style.tip_labels_colors = list(orde)
# LABELS
# False == hide tip labels
if self.style.tip_labels is False:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px"
self.tip_labels = ["" for i in self.ttree.get_tip_labels()]
# LABELS
# user entered something...
else:
# if user did not change label-offset then shift it here
if not self.style.tip_labels_style["-toyplot-anchor-shift"]:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px"
# if user entered list in get_tip_labels order reverse it for plot
if isinstance(self.style.tip_labels, list):
self.tip_labels = self.style.tip_labels
# True assigns tip labels from tree
else:
if self.ttree._fixed_order:
self.tip_labels = self.ttree._fixed_order
else:
self.tip_labels = self.ttree.get_tip_labels()
|
assign tip labels based on user provided kwargs
|
entailment
|
def assign_edge_colors_and_widths(self):
"""
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
"""
# node_color overrides fill. Tricky to catch cuz it can be many types.
# SET edge_widths and POP edge_style.stroke-width
if self.style.edge_widths is None:
if not self.style.edge_style["stroke-width"]:
self.style.edge_style.pop("stroke-width")
self.style.edge_style.pop("stroke")
self.edge_widths = [None] * self.nedges
else:
if isinstance(self.style.edge_style["stroke-width"], (list, tuple)):
raise ToytreeError(
"Use edge_widths not edge_style for multiple edge widths")
# check the color
width = self.style.edge_style["stroke-width"]
self.style.edge_style.pop("stroke-width")
self.edge_widths = [width] * self.nedges
else:
self.style.edge_style.pop("stroke-width")
if isinstance(self.style.edge_widths, (str, int)):
self.edge_widths = [int(self.style.edge_widths)] * self.nedges
elif isinstance(self.style.edge_widths, (list, tuple)):
if len(self.style.edge_widths) != self.nedges:
raise ToytreeError("edge_widths arg is the wrong length")
for cidx in range(self.nedges):
self.edge_widths[cidx] = self.style.edge_widths[cidx]
# SET edge_colors and POP edge_style.stroke
if self.style.edge_colors is None:
if self.style.edge_style["stroke"] is None:
self.style.edge_style.pop("stroke")
self.edge_colors = [None] * self.nedges
else:
if isinstance(self.style.edge_style["stroke"], (list, tuple)):
raise ToytreeError(
"Use edge_colors not edge_style for multiple edge colors")
# check the color
color = self.style.edge_style["stroke"]
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.style.edge_style.pop("stroke")
self.edge_colors = [color] * self.nedges
# otherwise parse node_color
else:
self.style.edge_style.pop("stroke")
if isinstance(self.style.edge_colors, (str, int)):
# check the color
color = self.style.edge_colors
if isinstance(color, (np.ndarray, np.void, list, tuple)):
color = toyplot.color.to_css(color)
self.edge_colors = [color] * self.nedges
elif isinstance(self.style.edge_colors, (list, tuple)):
if len(self.style.edge_colors) != self.nedges:
raise ToytreeError("edge_colors arg is the wrong length")
for cidx in range(self.nedges):
self.edge_colors[cidx] = self.style.edge_colors[cidx]
# do not allow empty edge_colors or widths
self.edge_colors = [i if i else "#262626" for i in self.edge_colors]
self.edge_widths = [i if i else 2 for i in self.edge_widths]
|
Resolve conflict of 'node_color' and 'node_style['fill'] args which are
redundant. Default is node_style.fill unless user entered node_color.
To enter multiple colors user must use node_color not style fill.
Either way, we build a list of colors to pass to Drawing.node_colors
which is then written to the marker as a fill CSS attribute.
|
entailment
|
def add_nodes_to_axes(self):
"""
Creates a new marker for every node from idx indexes and lists of
node_values, node_colors, node_sizes, node_style, node_labels_style.
Pulls from node_color and adds to a copy of the style dict for each
node to create marker.
Node_colors has priority to overwrite node_style['fill']
"""
# bail out if not any visible nodes (e.g., none w/ size>0)
if all([i == "" for i in self.node_labels]):
return
# build markers for each node.
marks = []
for nidx in self.ttree.get_node_values('idx', 1, 1):
# select node value from deconstructed lists
nlabel = self.node_labels[nidx]
nsize = self.node_sizes[nidx]
nmarker = self.node_markers[nidx]
# get styledict copies
nstyle = deepcopy(self.style.node_style)
nlstyle = deepcopy(self.style.node_labels_style)
# and mod style dict copies from deconstructed lists
nstyle["fill"] = self.node_colors[nidx]
# create mark if text or node
if (nlabel or nsize):
mark = toyplot.marker.create(
shape=nmarker,
label=str(nlabel),
size=nsize,
mstyle=nstyle,
lstyle=nlstyle,
)
else:
mark = ""
# store the nodes/marks
marks.append(mark)
# node_hover == True to show all features interactive
if self.style.node_hover is True:
title = self.get_hover()
elif isinstance(self.style.node_hover, list):
# todo: return advice if improperly formatted
title = self.style.node_hover
# if hover is false then no hover
else:
title = None
# add nodes
self.axes.scatterplot(
self.coords.verts[:, 0],
self.coords.verts[:, 1],
marker=marks,
title=title,
)
|
Creates a new marker for every node from idx indexes and lists of
node_values, node_colors, node_sizes, node_style, node_labels_style.
Pulls from node_color and adds to a copy of the style dict for each
node to create marker.
Node_colors has priority to overwrite node_style['fill']
|
entailment
|
def get_tip_label_coords(self):
"""
Get starting position of tip labels text based on locations of the
leaf nodes on the tree and style offset and align options. Node
positions are found using the .verts attribute of coords and is
already oriented for the tree face direction.
"""
# number of tips
ns = self.ttree.ntips
# x-coordinate of tips assuming down-face
tip_xpos = self.coords.verts[:ns, 0]
tip_ypos = self.coords.verts[:ns, 1]
align_edges = None
align_verts = None
# handle orientations
if self.style.orient in (0, 'down'):
# align tips at zero
if self.style.tip_labels_align:
tip_yend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_ypos), i) for i in range(len(tip_ypos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xpos, tip_yend))
)
tip_ypos = tip_yend
else:
# tip labels align finds the zero axis for orientation...
if self.style.tip_labels_align:
tip_xend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_xpos), i) for i in range(len(tip_xpos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xend, tip_ypos))
)
tip_xpos = tip_xend
return tip_xpos, tip_ypos, align_edges, align_verts
|
Get starting position of tip labels text based on locations of the
leaf nodes on the tree and style offset and align options. Node
positions are found using the .verts attribute of coords and is
already oriented for the tree face direction.
|
entailment
|
def get_dims_from_tree_size(self):
"Calculate reasonable canvas height and width for tree given N tips"
ntips = len(self.ttree)
if self.style.orient in ("right", "left"):
# height is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(1000, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(500, 18 * ntips))
else:
# width is long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(500, 18 * ntips))
if not self.style.width:
self.style.width = max(350, min(1000, 18 * ntips))
|
Calculate reasonable canvas height and width for tree given N tips
|
entailment
|
def get_longest_line_length(text):
"""Get the length longest line in a paragraph"""
lines = text.split("\n")
length = 0
for i in range(len(lines)):
if len(lines[i]) > length:
length = len(lines[i])
return length
|
Get the length longest line in a paragraph
|
entailment
|
def isnumeric(obj):
'''
Return true if obj is a numeric value
'''
from decimal import Decimal
if type(obj) == Decimal:
return True
else:
try:
float(obj)
except:
return False
return True
|
Return true if obj is a numeric value
|
entailment
|
def significant_format(number, decimal_sep='.', thousand_sep=',', n=3):
"""Format a number according to a given number of significant figures.
"""
str_number = significant(number, n)
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
if str_number[0] == '-':
str_number = str_number[1:]
if '.' in str_number:
int_part, dec_part = str_number.split('.')
else:
int_part, dec_part = str_number, ''
if dec_part:
dec_part = decimal_sep + dec_part
if thousand_sep:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % 3:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
Format a number according to a given number of significant figures.
|
entailment
|
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
|
Convert `obj` to (unicode) text string
|
entailment
|
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
|
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
|
entailment
|
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not is_text_string(tup[0]) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
|
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
|
entailment
|
def fedit(data, title="", comment="", icon=None, parent=None, apply=None,
ok=True, cancel=True, result='list', outfile=None, type='form',
scrollbar=False, background_color=None, widget_color=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
:param tuple data: datalist, datagroup (see below)
:param str title: form title
:param str comment: header comment
:param QIcon icon: dialog box icon
:param QWidget parent: parent widget
:param str ok: customized ok button label
:param str cancel: customized cancel button label
:param tuple apply: (label, function) customized button label and callback
:param function apply: function taking two arguments (result, widgets)
:param str result: result serialization ('list', 'dict', 'OrderedDict',
'JSON' or 'XML')
:param str outfile: write result to the file outfile.[py|json|xml]
:param str type: layout type ('form' or 'questions')
:param bool scrollbar: vertical scrollbar
:param str background_color: color of the background
:param str widget_color: color of the widgets
:return: Serialized result (data type depends on `result` parameter)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
Tips:
* one field for each member of a datalist
* one tab for each member of a top-level datagroup
* one page (of a multipage widget, each page can be selected with a
combo box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g. if the module is used directly from the interpreter)
test_travis = os.environ.get('TEST_CI_WIDGETS', None)
if test_travis is not None:
app = QApplication.instance()
if app is None:
app = QApplication([])
timer = QTimer(app)
timer.timeout.connect(app.quit)
timer.start(1000)
elif QApplication.startingUp():
_app = QApplication([])
translator_qt = QTranslator()
translator_qt.load('qt_' + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
_app.installTranslator(translator_qt)
serial = ['list', 'dict', 'OrderedDict', 'JSON', 'XML']
if result not in serial:
print("Warning: '%s' not in %s, default to list" %
(result, ', '.join(serial)), file=sys.stderr)
result = 'list'
layouts = ['form', 'questions']
if type not in layouts:
print("Warning: '%s' not in %s, default to form" %
(type, ', '.join(layouts)), file=sys.stderr)
type = 'form'
dialog = FormDialog(data, title, comment, icon, parent, apply, ok, cancel,
result, outfile, type, scrollbar, background_color,
widget_color)
if dialog.exec_():
return dialog.get()
|
Create form dialog and return result
(if Cancel button is pressed, return None)
:param tuple data: datalist, datagroup (see below)
:param str title: form title
:param str comment: header comment
:param QIcon icon: dialog box icon
:param QWidget parent: parent widget
:param str ok: customized ok button label
:param str cancel: customized cancel button label
:param tuple apply: (label, function) customized button label and callback
:param function apply: function taking two arguments (result, widgets)
:param str result: result serialization ('list', 'dict', 'OrderedDict',
'JSON' or 'XML')
:param str outfile: write result to the file outfile.[py|json|xml]
:param str type: layout type ('form' or 'questions')
:param bool scrollbar: vertical scrollbar
:param str background_color: color of the background
:param str widget_color: color of the widgets
:return: Serialized result (data type depends on `result` parameter)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
Tips:
* one field for each member of a datalist
* one tab for each member of a top-level datagroup
* one page (of a multipage widget, each page can be selected with a
combo box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
|
entailment
|
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QDialog):
dialog = dialog.parent()
return dialog
|
Return FormDialog instance
|
entailment
|
def get(self):
"""Return form result"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
if self.outfile:
if self.result in ['list', 'dict', 'OrderedDict']:
fd = open(self.outfile + '.py', 'w')
fd.write(str(self.data))
elif self.result == 'JSON':
fd = open(self.outfile + '.json', 'w')
data = json.loads(self.data, object_pairs_hook=OrderedDict)
json.dump(data, fd)
elif self.result == 'XML':
fd = open(self.outfile + '.xml', 'w')
root = ET.fromstring(self.data)
tree = ET.ElementTree(root)
tree.write(fd, encoding='UTF-8')
fd.close()
else:
return self.data
|
Return form result
|
entailment
|
def ts_merge(series):
'''Merge timeseries into a new :class:`~.TimeSeries` instance.
:parameter series: an iterable over :class:`~.TimeSeries`.
'''
series = iter(series)
ts = next(series)
return ts.merge(series)
|
Merge timeseries into a new :class:`~.TimeSeries` instance.
:parameter series: an iterable over :class:`~.TimeSeries`.
|
entailment
|
def ts_bin_op(op_name, ts1, ts2, all=True, fill=None, name=None):
'''Entry point for any arithmetic type function performed on a timeseries
and/or a scalar.
op_name - name of the function to be performed
ts1, ts2 - timeseries or scalars that the function is to performed over
all - whether all dates should be included in the result
fill - the value that should be used to represent "missing values"
name - the name of the resulting time series
'''
op = op_get(op_name)
fill = fill if fill is not None else settings.missing_value
if hasattr(fill, '__call__'):
fill_fn = fill
else:
fill_fn = lambda: fill
name = name or '%s(%s,%s)' % (op_name, ts1, ts2)
if is_timeseries(ts1):
ts = ts1
if is_timeseries(ts2):
dts, data = op_ts_ts(op_name, op, ts1, ts2, all, fill_fn)
else:
dts, data = op_ts_scalar(op_name, op, ts1, ts2, fill_fn)
else:
if is_timeseries(ts2):
ts = ts2
dts, data = op_scalar_ts(op_name, op, ts1, ts2, fill_fn)
else:
return op(ts1, ts2)
return ts.clone(date=dts, data=data, name=name)
|
Entry point for any arithmetic type function performed on a timeseries
and/or a scalar.
op_name - name of the function to be performed
ts1, ts2 - timeseries or scalars that the function is to performed over
all - whether all dates should be included in the result
fill - the value that should be used to represent "missing values"
name - the name of the resulting time series
|
entailment
|
def getalgo(self, operation, name):
'''Return the algorithm for *operation* named *name*'''
if operation not in self._algorithms:
raise NotAvailable('{0} not registered.'.format(operation))
oper = self._algorithms[operation]
try:
return oper[name]
except KeyError:
raise NotAvailable('{0} algorithm {1} not registered.'
.format(operation, name))
|
Return the algorithm for *operation* named *name*
|
entailment
|
def dates(self, desc=None):
'''Returns an iterable over ``datetime.date`` instances
in the timeseries.'''
c = self.dateinverse
for key in self.keys(desc=desc):
yield c(key)
|
Returns an iterable over ``datetime.date`` instances
in the timeseries.
|
entailment
|
def items(self, desc=None, start_value=None, shift_by=None):
'''Returns a python ``generator`` which can be used to iterate over
:func:`dynts.TimeSeries.dates` and :func:`dynts.TimeSeries.values`
returning a two dimensional
tuple ``(date,value)`` in each iteration.
Similar to the python dictionary items
function.
:parameter desc: if ``True`` the iteratioon starts from the more
recent data and proceeds backwards.
:parameter shift_by: optional parallel shift in values.
:parameter start_value: optional start value of timeseries.
'''
if self:
if shift_by is None and start_value is not None:
for cross in self.values():
missings = 0
if shift_by is None:
shift_by = []
for v in cross:
shift_by.append(start_value - v)
if v != v:
missings += 1
else:
for j in range(len(shift_by)):
s = shift_by[j]
v = cross[j]
if s != s:
if v == v:
shift_by[j] = start_value - v
else:
missings += 1
if not missings:
break
if shift_by:
for d, v in zip(self.dates(desc=desc), self.values(desc=desc)):
yield d, v + shift_by
else:
for d, v in zip(self.dates(desc=desc), self.values(desc=desc)):
yield d, v
|
Returns a python ``generator`` which can be used to iterate over
:func:`dynts.TimeSeries.dates` and :func:`dynts.TimeSeries.values`
returning a two dimensional
tuple ``(date,value)`` in each iteration.
Similar to the python dictionary items
function.
:parameter desc: if ``True`` the iteratioon starts from the more
recent data and proceeds backwards.
:parameter shift_by: optional parallel shift in values.
:parameter start_value: optional start value of timeseries.
|
entailment
|
def series(self):
'''Generator of single series data (no dates are included).'''
data = self.values()
if len(data):
for c in range(self.count()):
yield data[:, c]
else:
raise StopIteration
|
Generator of single series data (no dates are included).
|
entailment
|
def named_series(self, ordering=None):
'''Generator of tuples with name and serie data.'''
series = self.series()
if ordering:
series = list(series)
todo = dict(((n, idx) for idx, n in enumerate(self.names())))
for name in ordering:
if name in todo:
idx = todo.pop(name)
yield name, series[idx]
for name in todo:
idx = todo[name]
yield name, series[idx]
else:
for name_serie in zip(self.names(), series):
yield name_serie
|
Generator of tuples with name and serie data.
|
entailment
|
def clone(self, date=None, data=None, name=None):
'''Create a clone of timeseries'''
name = name or self.name
data = data if data is not None else self.values()
ts = self.__class__(name)
ts._dtype = self._dtype
if date is None:
# dates not provided
ts.make(self.keys(), data, raw=True)
else:
ts.make(date, data)
return ts
|
Create a clone of timeseries
|
entailment
|
def reduce(self, size, method='simple', **kwargs):
'''Trim :class:`Timeseries` to a new *size* using the algorithm
*method*. If *size* is greater or equal than len(self) it does nothing.'''
if size >= len(self):
return self
return self.getalgo('reduce', method)(self, size, **kwargs)
|
Trim :class:`Timeseries` to a new *size* using the algorithm
*method*. If *size* is greater or equal than len(self) it does nothing.
|
entailment
|
def clean(self, algorithm=None):
'''Create a new :class:`TimeSeries` with missing data removed or
replaced by the *algorithm* provided'''
# all dates
original_dates = list(self.dates())
series = []
all_dates = set()
for serie in self.series():
dstart, dend, vend = None, None, None
new_dates = []
new_values = []
missings = []
values = {}
for d, v in zip(original_dates, serie):
if v == v:
if dstart is None:
dstart = d
if missings:
for dx, vx in algorithm(dend, vend, d, v, missings):
new_dates.append(dx)
new_values.append(vx)
missings = []
dend = d
vend = v
values[d] = v
elif dstart is not None and algorithm:
missings.append((dt, v))
if missings:
for dx, vx in algorithm(dend, vend, None, None, missings):
new_dates.append(dx)
new_values.append(vx)
dend = dx
series.append((dstart, dend, values))
all_dates = all_dates.union(values)
cdate = []
cdata = []
for dt in sorted(all_dates):
cross = []
for start, end, values in series:
if start is None or (dt >= start and dt <= end):
value = values.get(dt)
if value is None:
cross = None
break
else:
value = nan
cross.append(value)
if cross:
cdate.append(dt)
cdata.append(cross)
return self.clone(date=cdate, data=cdata)
|
Create a new :class:`TimeSeries` with missing data removed or
replaced by the *algorithm* provided
|
entailment
|
def isconsistent(self):
'''Check if the timeseries is consistent'''
for dt1, dt0 in laggeddates(self):
if dt1 <= dt0:
return False
return True
|
Check if the timeseries is consistent
|
entailment
|
def var(self, ddof=0):
'''Calculate variance of timeseries. Return a vector containing
the variances of each series in the timeseries.
:parameter ddof: delta degree of freedom, the divisor used in the calculation
is given by ``N - ddof`` where ``N`` represents the length
of timeseries. Default ``0``.
.. math::
var = \\frac{\\sum_i^N (x - \\mu)^2}{N-ddof}
'''
N = len(self)
if N:
v = self.values()
mu = sum(v)
return (sum(v*v) - mu*mu/N)/(N-ddof)
else:
return None
|
Calculate variance of timeseries. Return a vector containing
the variances of each series in the timeseries.
:parameter ddof: delta degree of freedom, the divisor used in the calculation
is given by ``N - ddof`` where ``N`` represents the length
of timeseries. Default ``0``.
.. math::
var = \\frac{\\sum_i^N (x - \\mu)^2}{N-ddof}
|
entailment
|
def sd(self):
'''Calculate standard deviation of timeseries'''
v = self.var()
if len(v):
return np.sqrt(v)
else:
return None
|
Calculate standard deviation of timeseries
|
entailment
|
def apply(self, func, window=None, bycolumn=True, align=None, **kwargs):
'''Apply function ``func`` to the timeseries.
:keyword func: string indicating function to apply
:keyword window: Rolling window, If not defined ``func`` is applied on
the whole dataset. Default ``None``.
:keyword bycolumn: If ``True``, function ``func`` is applied on
each column separately. Default ``True``.
:keyword align: string specifying whether the index of the result
should be ``left`` or ``right`` (default) or ``centered``
aligned compared to the rolling window of observations.
:keyword kwargs: dictionary of auxiliary parameters used by
function ``func``.
'''
N = len(self)
window = window or N
self.precondition(window <= N and window > 0, OutOfBound)
return self._rollapply(func,
window=window,
align=align or self.default_align,
bycolumn=bycolumn,
**kwargs)
|
Apply function ``func`` to the timeseries.
:keyword func: string indicating function to apply
:keyword window: Rolling window, If not defined ``func`` is applied on
the whole dataset. Default ``None``.
:keyword bycolumn: If ``True``, function ``func`` is applied on
each column separately. Default ``True``.
:keyword align: string specifying whether the index of the result
should be ``left`` or ``right`` (default) or ``centered``
aligned compared to the rolling window of observations.
:keyword kwargs: dictionary of auxiliary parameters used by
function ``func``.
|
entailment
|
def rollapply(self, func, window=20, **kwargs):
'''A generic :ref:`rolling function <rolling-function>`
for function *func*.
Same construct as :meth:`dynts.TimeSeries.apply` but with default
``window`` set to ``20``.
'''
return self.apply(func, window=window, **kwargs)
|
A generic :ref:`rolling function <rolling-function>`
for function *func*.
Same construct as :meth:`dynts.TimeSeries.apply` but with default
``window`` set to ``20``.
|
entailment
|
def rollsd(self, scale=1, **kwargs):
'''A :ref:`rolling function <rolling-function>` for
stadard-deviation values:
Same as::
self.rollapply('sd', **kwargs)
'''
ts = self.rollapply('sd', **kwargs)
if scale != 1:
ts *= scale
return ts
|
A :ref:`rolling function <rolling-function>` for
stadard-deviation values:
Same as::
self.rollapply('sd', **kwargs)
|
entailment
|
def unwind(self, values, backend, **kwargs):
'''Unwind expression by applying *values* to the abstract nodes.
The ``kwargs`` dictionary can contain data which can be used
to override values
'''
if not hasattr(self, "_unwind_value"):
self._unwind_value = self._unwind(values, backend, **kwargs)
return self._unwind_value
|
Unwind expression by applying *values* to the abstract nodes.
The ``kwargs`` dictionary can contain data which can be used
to override values
|
entailment
|
def removeduplicates(self, entries = None):
'''
Loop over children a remove duplicate entries.
@return - a list of removed entries
'''
removed = []
if entries == None:
entries = {}
new_children = []
for c in self.children:
cs = str(c)
cp = entries.get(cs,None)
if cp:
new_children.append(cp)
removed.append(c)
else:
dups = c.removeduplicates(entries)
if dups:
removed.extend(dups)
entries[cs] = c
new_children.append(c)
self.children = new_children
return removed
|
Loop over children a remove duplicate entries.
@return - a list of removed entries
|
entailment
|
def html2md(html_string):
"""
Convert a string or html file to a markdown table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
Returns
-------
str
The html table converted to a Markdown table
Notes
-----
This function requires BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... Just text
... </td>
... <td>
... Hot dog
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2md(html_text))
| Header 1 | Header 2 | Header 3 |
|---------------------|-----------|----------|
| This is a paragraph | Just text | Hot dog |
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
"""
if os.path.isfile(html_string):
file = open(html_string, 'r', encoding='utf-8')
lines = file.readlines()
file.close()
html_string = ''.join(lines)
table_data, spans, use_headers = html2data(html_string)
if table_data == '':
return ''
return data2md(table_data)
|
Convert a string or html file to a markdown table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
Returns
-------
str
The html table converted to a Markdown table
Notes
-----
This function requires BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... Just text
... </td>
... <td>
... Hot dog
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2md(html_text))
| Header 1 | Header 2 | Header 3 |
|---------------------|-----------|----------|
| This is a paragraph | Just text | Hot dog |
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
|
entailment
|
def table_cells_2_spans(table, spans):
"""
Converts the table to a list of spans, for consistency.
This method combines the table data with the span data into a
single, more consistent type. Any normal cell will become a span
of just 1 column and 1 row.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
table : list of lists of lists of int
As you can imagine, this is pretty confusing for a human which
is why data2rst accepts table data and span data separately.
"""
new_spans = []
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
if not span:
new_spans.append([[row, column]])
new_spans.extend(spans)
new_spans = list(sorted(new_spans))
return new_spans
|
Converts the table to a list of spans, for consistency.
This method combines the table data with the span data into a
single, more consistent type. Any normal cell will become a span
of just 1 column and 1 row.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
table : list of lists of lists of int
As you can imagine, this is pretty confusing for a human which
is why data2rst accepts table data and span data separately.
|
entailment
|
def keys(self, desc = None):
'''numpy asarray does not copy data'''
res = asarray(self.rc('index'))
if desc == True:
return reversed(res)
else:
return res
|
numpy asarray does not copy data
|
entailment
|
def values(self, desc = None):
'''numpy asarray does not copy data'''
if self._ts:
res = asarray(self._ts)
if desc == True:
return reversed(res)
else:
return res
else:
return ndarray([0,0])
|
numpy asarray does not copy data
|
entailment
|
def rcts(self, command, *args, **kwargs):
'''General function for applying a rolling R function to a timeserie'''
cls = self.__class__
name = kwargs.pop('name','')
date = kwargs.pop('date',None)
data = kwargs.pop('data',None)
kwargs.pop('bycolumn',None)
ts = cls(name=name,date=date,data=data)
ts._ts = self.rc(command, *args, **kwargs)
return ts
|
General function for applying a rolling R function to a timeserie
|
entailment
|
def get_html_column_count(html_string):
"""
Gets the number of columns in an html table.
Paramters
---------
html_string : str
Returns
-------
int
The number of columns in the table
"""
try:
from bs4 import BeautifulSoup
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return 0
column_counts = []
trs = table.findAll('tr')
if len(trs) == 0:
return 0
for tr in range(len(trs)):
if tr == 0:
tds = trs[tr].findAll('th')
if len(tds) == 0:
tds = trs[tr].findAll('td')
else:
tds = trs[tr].findAll('td')
count = 0
for td in tds:
if td.has_attr('colspan'):
count += int(td['colspan'])
else:
count += 1
column_counts.append(count)
return max(column_counts)
|
Gets the number of columns in an html table.
Paramters
---------
html_string : str
Returns
-------
int
The number of columns in the table
|
entailment
|
def add_cushions(table):
"""
Add space to start and end of each string in a list of lists
Parameters
----------
table : list of lists of str
A table of rows of strings. For example::
[
['dog', 'cat', 'bicycle'],
['mouse', trumpet', '']
]
Returns
-------
table : list of lists of str
Note
----
Each cell in an rst grid table should to have a cushion of at least
one space on each side of the string it contains. For example::
+-----+-------+
| foo | bar |
+-----+-------+
| cat | steve |
+-----+-------+
is better than::
+-----+---+
|foo| bar |
+-----+---+
|cat|steve|
+-----+---+
"""
for row in range(len(table)):
for column in range(len(table[row])):
lines = table[row][column].split("\n")
for i in range(len(lines)):
if not lines[i] == "":
lines[i] = " " + lines[i].rstrip() + " "
table[row][column] = "\n".join(lines)
return table
|
Add space to start and end of each string in a list of lists
Parameters
----------
table : list of lists of str
A table of rows of strings. For example::
[
['dog', 'cat', 'bicycle'],
['mouse', trumpet', '']
]
Returns
-------
table : list of lists of str
Note
----
Each cell in an rst grid table should to have a cushion of at least
one space on each side of the string it contains. For example::
+-----+-------+
| foo | bar |
+-----+-------+
| cat | steve |
+-----+-------+
is better than::
+-----+---+
|foo| bar |
+-----+---+
|cat|steve|
+-----+---+
|
entailment
|
def rollsingle(self, func, window=20, name=None, fallback=False,
align='right', **kwargs):
'''Efficient rolling window calculation for min, max type functions
'''
rname = 'roll_{0}'.format(func)
if fallback:
rfunc = getattr(lib.fallback, rname)
else:
rfunc = getattr(lib, rname, None)
if not rfunc:
rfunc = getattr(lib.fallback, rname)
data = np.array([list(rfunc(serie, window)) for serie in self.series()])
name = name or self.makename(func, window=window)
dates = asarray(self.dates())
desc = settings.desc
if (align == 'right' and not desc) or desc:
dates = dates[window-1:]
else:
dates = dates[:-window+1]
return self.clone(dates, data.transpose(), name=name)
|
Efficient rolling window calculation for min, max type functions
|
entailment
|
def find_ge(self, dt):
'''Building block of all searches. Find the index
corresponding to the leftmost value greater or equal to *dt*.
If *dt* is greater than the
:func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.RightOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance.'''
i = bisect_left(self.dates, dt)
if i != len(self.dates):
return i
raise RightOutOfBound
|
Building block of all searches. Find the index
corresponding to the leftmost value greater or equal to *dt*.
If *dt* is greater than the
:func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.RightOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance.
|
entailment
|
def find_le(self, dt):
'''Find the index corresponding to the rightmost
value less than or equal to *dt*.
If *dt* is less than :func:`dynts.TimeSeries.end`
a :class:`dynts.exceptions.LeftOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance.'''
i = bisect_right(self.dates, dt)
if i:
return i-1
raise LeftOutOfBound
|
Find the index corresponding to the rightmost
value less than or equal to *dt*.
If *dt* is less than :func:`dynts.TimeSeries.end`
a :class:`dynts.exceptions.LeftOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance.
|
entailment
|
def upgrade():
"""Update database."""
op.create_table(
'transaction',
sa.Column('issued_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('remote_addr', sa.String(length=50), nullable=True),
)
op.create_primary_key('pk_transaction', 'transaction', ['id'])
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(CreateSequence(Sequence('transaction_id_seq')))
|
Update database.
|
entailment
|
def downgrade():
"""Downgrade database."""
op.drop_table('transaction')
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(DropSequence(Sequence('transaction_id_seq')))
|
Downgrade database.
|
entailment
|
def t_NUMBER(self, t):
r'([0-9]+\.?[0-9]*|\.[0-9]+)([eE](\+|-)?[0-9]+)?'
try:
sv = t.value
v = float(sv)
iv = int(v)
t.value = (iv if iv == v else v, sv)
except ValueError:
print("Number %s is too large!" % t.value)
t.value = 0
return t
|
r'([0-9]+\.?[0-9]*|\.[0-9]+)([eE](\+|-)?[0-9]+)?
|
entailment
|
def t_ID(self, t):
r'`[^`]*`|[a-zA-Z_][a-zA-Z_0-9:@]*'
res = self.oper.get(t.value, None) # Check for reserved words
if res is None:
res = t.value.upper()
if res == 'FALSE':
t.type = 'BOOL'
t.value = False
elif res == 'TRUE':
t.type = 'BOOL'
t.value = True
else:
t.type = 'ID'
else:
t.value = res
t.type = 'FUNCTION'
return t
|
r'`[^`]*`|[a-zA-Z_][a-zA-Z_0-9:@]*
|
entailment
|
def read_newick(newick, root_node=None, format=0):
"""
Reads a newick tree from either a string or a file, and returns
an ETE tree structure.
A previously existent node object can be passed as the root of the
tree, which means that all its new children will belong to the same
class as the root (This allows to work with custom TreeNode objects).
You can also take advantage from this behaviour to concatenate
several tree structures.
"""
## check newick type as a string or filepath, Toytree parses urls to str's
if isinstance(newick, six.string_types):
if os.path.exists(newick):
if newick.endswith('.gz'):
import gzip
nw = gzip.open(newick).read()
else:
nw = open(newick, 'rU').read()
else:
nw = newick
## get re matcher for testing newick formats
matcher = compile_matchers(formatcode=format)
nw = nw.strip()
if not nw.startswith('(') and nw.endswith(';'):
return _read_node_data(nw[:-1], root_node, "single", matcher, format)
elif not nw.startswith('(') or not nw.endswith(';'):
raise NewickError('Unexisting tree file or Malformed newick tree structure.')
else:
return _read_newick_from_string(nw, root_node, matcher, format)
else:
raise NewickError("'newick' argument must be either a filename or a newick string.")
|
Reads a newick tree from either a string or a file, and returns
an ETE tree structure.
A previously existent node object can be passed as the root of the
tree, which means that all its new children will belong to the same
class as the root (This allows to work with custom TreeNode objects).
You can also take advantage from this behaviour to concatenate
several tree structures.
|
entailment
|
def _read_newick_from_string(nw, root_node, matcher, formatcode):
""" Reads a newick string in the New Hampshire format. """
if nw.count('(') != nw.count(')'):
raise NewickError('Parentheses do not match. Broken tree structure?')
# white spaces and separators are removed
nw = re.sub("[\n\r\t]+", "", nw)
current_parent = None
# Each chunk represents the content of a parent node, and it could contain
# leaves and closing parentheses.
# We may find:
# leaf, ..., leaf,
# leaf, ..., leaf))),
# leaf)), leaf, leaf))
# leaf))
# ) only if formatcode == 100
for chunk in nw.split("(")[1:]:
# If no node has been created so far, this is the root, so use the node.
current_parent = root_node if current_parent is None else current_parent.add_child()
subchunks = [ch.strip() for ch in chunk.split(",")]
# We should expect that the chunk finished with a comma (if next chunk
# is an internal sister node) or a subchunk containing closing parenthesis until the end of the tree.
#[leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf), leaf, 'leaf);']
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError('Broken newick structure at: %s' %chunk)
# lets process the subchunks. Every closing parenthesis will close a
# node and go up one level.
for i, leaf in enumerate(subchunks):
if leaf.strip() == '' and i == len(subchunks) - 1:
continue # "blah blah ,( blah blah"
closing_nodes = leaf.split(")")
# first part after splitting by ) always contain leaf info
_read_node_data(closing_nodes[0], current_parent, "leaf", matcher, formatcode)
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes)>1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
_read_node_data(closing_internal, current_parent, "internal", matcher, formatcode)
current_parent = current_parent.up
return root_node
|
Reads a newick string in the New Hampshire format.
|
entailment
|
def _parse_extra_features(node, NHX_string):
"""
Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2]
"""
NHX_string = NHX_string.replace("[&&NHX:", "")
NHX_string = NHX_string.replace("]", "")
for field in NHX_string.split(":"):
try:
pname, pvalue = field.split("=")
except ValueError as e:
raise NewickError('Invalid NHX format %s' %field)
node.add_feature(pname, pvalue)
|
Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2]
|
entailment
|
def compile_matchers(formatcode):
"""
Tests newick string against format types? and makes a re.compile
"""
matchers = {}
for node_type in ["leaf", "single", "internal"]:
if node_type == "leaf" or node_type == "single":
container1 = NW_FORMAT[formatcode][0][0]
container2 = NW_FORMAT[formatcode][1][0]
converterFn1 = NW_FORMAT[formatcode][0][1]
converterFn2 = NW_FORMAT[formatcode][1][1]
flexible1 = NW_FORMAT[formatcode][0][2]
flexible2 = NW_FORMAT[formatcode][1][2]
else:
container1 = NW_FORMAT[formatcode][2][0]
container2 = NW_FORMAT[formatcode][3][0]
converterFn1 = NW_FORMAT[formatcode][2][1]
converterFn2 = NW_FORMAT[formatcode][3][1]
flexible1 = NW_FORMAT[formatcode][2][2]
flexible2 = NW_FORMAT[formatcode][3][2]
if converterFn1 == str:
FIRST_MATCH = "("+_NAME_RE+")"
elif converterFn1 == float:
FIRST_MATCH = "("+_FLOAT_RE+")"
elif converterFn1 is None:
FIRST_MATCH = '()'
if converterFn2 == str:
SECOND_MATCH = "(:"+_NAME_RE+")"
elif converterFn2 == float:
SECOND_MATCH = "(:"+_FLOAT_RE+")"
elif converterFn2 is None:
SECOND_MATCH = '()'
if flexible1 and node_type != 'leaf':
FIRST_MATCH += "?"
if flexible2:
SECOND_MATCH += "?"
matcher_str= '^\s*%s\s*%s\s*(%s)?\s*$' % (FIRST_MATCH, SECOND_MATCH, _NHX_RE)
compiled_matcher = re.compile(matcher_str)
matchers[node_type] = [container1, container2, converterFn1, converterFn2, compiled_matcher]
return matchers
|
Tests newick string against format types? and makes a re.compile
|
entailment
|
def _read_node_data(subnw, current_node, node_type, matcher, formatcode):
"""
Reads a leaf node from a subpart of the original newicktree
"""
if node_type == "leaf" or node_type == "single":
if node_type == "leaf":
node = current_node.add_child()
else:
node = current_node
else:
node = current_node
subnw = subnw.strip()
if not subnw and node_type == 'leaf' and formatcode != 100:
raise NewickError('Empty leaf node found')
elif not subnw:
return
container1, container2, converterFn1, converterFn2, compiled_matcher = matcher[node_type]
data = re.match(compiled_matcher, subnw)
if data:
data = data.groups()
# This prevents ignoring errors even in flexible nodes:
if subnw and data[0] is None and data[1] is None and data[2] is None:
raise NewickError("Unexpected newick format '%s'" %subnw)
if data[0] is not None and data[0] != '':
node.add_feature(container1, converterFn1(data[0].strip()))
if data[1] is not None and data[1] != '':
node.add_feature(container2, converterFn2(data[1][1:].strip()))
if data[2] is not None \
and data[2].startswith("[&&NHX"):
_parse_extra_features(node, data[2])
else:
raise NewickError("Unexpected newick format '%s' " %subnw[0:50])
return
|
Reads a leaf node from a subpart of the original newicktree
|
entailment
|
def write_newick(rootnode,
features=None,
format=1,
format_root_node=True,
is_leaf_fn=None,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Iteratively export a tree structure and returns its NHX
representation.
"""
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, "name")))
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick)
|
Iteratively export a tree structure and returns its NHX
representation.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.