sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def do_sqlite_connect(dbapi_connection, connection_record):
"""Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
"""
# Enable foreign key constraint checking
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
|
Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
|
entailment
|
def apply_driver_hacks(self, app, info, options):
"""Call before engine creation."""
# Don't forget to apply hacks defined on parent object.
super(SQLAlchemy, self).apply_driver_hacks(app, info, options)
if info.drivername == 'sqlite':
connect_args = options.setdefault('connect_args', {})
if 'isolation_level' not in connect_args:
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
connect_args['isolation_level'] = None
if not event.contains(Engine, 'connect', do_sqlite_connect):
event.listen(Engine, 'connect', do_sqlite_connect)
if not event.contains(Engine, 'begin', do_sqlite_begin):
event.listen(Engine, 'begin', do_sqlite_begin)
from sqlite3 import register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return proxy._get_current_object()
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'postgresql+psycopg2': # pragma: no cover
from psycopg2.extensions import adapt, register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return adapt(proxy._get_current_object())
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'mysql+pymysql': # pragma: no cover
from pymysql import converters
def escape_local_proxy(val, mapping):
"""Get current object and try to adapt it again."""
return converters.escape_item(
val._get_current_object(),
self.engine.dialect.encoding,
mapping=mapping,
)
converters.conversions[LocalProxy] = escape_local_proxy
converters.encoders[LocalProxy] = escape_local_proxy
|
Call before engine creation.
|
entailment
|
def create(verbose):
"""Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green')
|
Create tables.
|
entailment
|
def drop(verbose):
"""Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green')
|
Drop tables.
|
entailment
|
def init():
"""Create database."""
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
|
Create database.
|
entailment
|
def destroy():
"""Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url)
|
Drop database.
|
entailment
|
def rolling(self, op):
"""Fast rolling operation with O(log n) updates where n is the
window size
"""
missing = self.missing
ismissing = self.ismissing
window = self.window
it = iter(self.iterable)
queue = deque(islice(it, window))
ol = self.skiplist((e for e in queue if e == e))
yield op(ol,missing)
for newelem in it:
oldelem = queue.popleft()
if not ismissing(oldelem):
ol.remove(oldelem)
queue.append(newelem)
if not ismissing(newelem):
ol.insert(newelem)
yield op(ol, missing)
|
Fast rolling operation with O(log n) updates where n is the
window size
|
entailment
|
def get_span_column_count(span):
"""
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
"""
columns = 1
first_column = span[0][1]
for i in range(len(span)):
if span[i][1] > first_column:
columns += 1
first_column = span[i][1]
return columns
|
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
|
entailment
|
def to_dict(self):
"returns self as a dictionary with _underscore subdicts corrected."
ndict = {}
for key, val in self.__dict__.items():
if key[0] == "_":
ndict[key[1:]] = val
else:
ndict[key] = val
return ndict
|
returns self as a dictionary with _underscore subdicts corrected.
|
entailment
|
def get_span_char_width(span, column_widths):
"""
Sum the widths of the columns that make up the span, plus the extra.
Parameters
----------
span : list of lists of int
list of [row, column] pairs that make up the span
column_widths : list of int
The widths of the columns that make up the table
Returns
-------
total_width : int
The total width of the span
"""
start_column = span[0][1]
column_count = get_span_column_count(span)
total_width = 0
for i in range(start_column, start_column + column_count):
total_width += column_widths[i]
total_width += column_count - 1
return total_width
|
Sum the widths of the columns that make up the span, plus the extra.
Parameters
----------
span : list of lists of int
list of [row, column] pairs that make up the span
column_widths : list of int
The widths of the columns that make up the table
Returns
-------
total_width : int
The total width of the span
|
entailment
|
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
|
Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
|
entailment
|
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
|
Create alembic_version table.
|
entailment
|
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
|
Drop alembic_version table.
|
entailment
|
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
|
Get the name of the versioned model class.
|
entailment
|
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
Return True if all versioning models have been registered.
|
entailment
|
def vector_to_symmetric(v):
'''Convert an iterable into a symmetric matrix.'''
np = len(v)
N = (int(sqrt(1 + 8*np)) - 1)//2
if N*(N+1)//2 != np:
raise ValueError('Cannot convert vector to symmetric matrix')
sym = ndarray((N,N))
iterable = iter(v)
for r in range(N):
for c in range(r+1):
sym[r,c] = sym[c,r] = iterable.next()
return sym
|
Convert an iterable into a symmetric matrix.
|
entailment
|
def cov(self, ddof=None, bias=0):
'''The covariance matrix from the aggregate sample. It accepts an
optional parameter for the degree of freedoms.
:parameter ddof: If not ``None`` normalization is by (N - ddof), where N is
the number of observations; this overrides the value implied by bias.
The default value is None.
'''
N = self.n
M = N if bias else N-1
M = M if ddof is None else N-ddof
return (self.sxx - outer(self.sx,self.sx)/N)/M
|
The covariance matrix from the aggregate sample. It accepts an
optional parameter for the degree of freedoms.
:parameter ddof: If not ``None`` normalization is by (N - ddof), where N is
the number of observations; this overrides the value implied by bias.
The default value is None.
|
entailment
|
def corr(self):
'''The correlation matrix'''
cov = self.cov()
N = cov.shape[0]
corr = ndarray((N,N))
for r in range(N):
for c in range(r):
corr[r,c] = corr[c,r] = cov[r,c]/sqrt(cov[r,r]*cov[c,c])
corr[r,r] = 1.
return corr
|
The correlation matrix
|
entailment
|
def calmar(sharpe, T = 1.0):
'''
Calculate the Calmar ratio for a Weiner process
@param sharpe: Annualized Sharpe ratio
@param T: Time interval in years
'''
x = 0.5*T*sharpe*sharpe
return x/qp(x)
|
Calculate the Calmar ratio for a Weiner process
@param sharpe: Annualized Sharpe ratio
@param T: Time interval in years
|
entailment
|
def calmarnorm(sharpe, T, tau = 1.0):
'''
Multiplicator for normalizing calmar ratio to period tau
'''
return calmar(sharpe,tau)/calmar(sharpe,T)
|
Multiplicator for normalizing calmar ratio to period tau
|
entailment
|
def upgrade():
"""Upgrade database."""
op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123
ctx = op.get_context()
metadata = ctx.opts['target_metadata']
metadata.naming_convention = NAMING_CONVENTION
metadata.bind = ctx.connection.engine
insp = Inspector.from_engine(ctx.connection.engine)
for table_name in insp.get_table_names():
if table_name not in metadata.tables:
continue
table = metadata.tables[table_name]
ixs = {}
uqs = {}
fks = {}
for ix in insp.get_indexes(table_name):
ixs[tuple(ix['column_names'])] = ix
for uq in insp.get_unique_constraints(table_name):
uqs[tuple(uq['column_names'])] = uq
for fk in insp.get_foreign_keys(table_name):
fks[(tuple(fk['constrained_columns']), fk['referred_table'])] = fk
with op.batch_alter_table(
table_name, naming_convention=NAMING_CONVENTION) as batch_op:
for c in list(table.constraints) + list(table.indexes):
key = None
if isinstance(c, sa.schema.ForeignKeyConstraint):
key = (tuple(c.column_keys), c.referred_table.name)
fk = fks.get(key)
if fk and c.name != fk['name']:
batch_op.drop_constraint(
fk['name'], type_='foreignkey')
batch_op.create_foreign_key(
op.f(c.name), fk['referred_table'],
fk['constrained_columns'],
fk['referred_columns'],
**fk['options']
)
elif isinstance(c, sa.schema.UniqueConstraint):
key = tuple(c.columns.keys())
uq = uqs.get(key)
if uq and c.name != uq['name']:
batch_op.drop_constraint(uq['name'], type_='unique')
batch_op.create_unique_constraint(
op.f(c.name), uq['column_names'])
elif isinstance(c, sa.schema.CheckConstraint):
util.warn('Update {0.table.name} CHECK {0.name} '
'manually'.format(c))
elif isinstance(c, sa.schema.Index):
key = tuple(c.columns.keys())
ix = ixs.get(key)
if ix and c.name != ix['name']:
batch_op.drop_index(ix['name'])
batch_op.create_index(
op.f(c.name), ix['column_names'],
unique=ix['unique'],
)
elif isinstance(c, sa.schema.PrimaryKeyConstraint) or \
c.name == '_unnamed_':
# NOTE we don't care about primary keys since they have
# specific syntax.
pass
else:
raise RuntimeError('Missing {0!r}'.format(c))
|
Upgrade database.
|
entailment
|
def data2simplerst(table, spans=[[[0, 0]]], use_headers=True, headers_row=0):
"""
Convert table data to a simple rst table
Parameters
----------
table : list of lists of str
A table of strings.
spans : list of lists of lists of int
A list of spans. A span is a list of [Row, Column] pairs of
table cells that are joined together.
use_headers : bool, optional
Whether or not to include headers in the table. A header is
a cell that is underlined with "="
headers_row : int
The row that will be the headers. In a simple rst table, the
headers do not need to be at the top.
Returns
-------
str
The simple rst table
Example
-------
>>> table = [
... ["Inputs", "", "Output"],
... ["A", "B", "A or B"],
... ["False", "False", "False"],
... ["True", "False", "True"],
... ["False", "True", "True"],
... ["True", "True", "True"],
... ]
>>> spans = [
... [ [0, 0], [0, 1] ]
... ]
>>> print(data2simplerst(table, spans, headers_row=1))
====== ===== ======
Inputs Output
------------- ------
A B A or B
====== ===== ======
False False False
True False True
False True True
True True True
====== ===== ======
"""
table = copy.deepcopy(table)
table_ok = check_table(table)
if not table_ok == "":
return "ERROR: " + table_ok
if not spans == [[[0, 0]]]:
for span in spans:
span_ok = check_span(span, table)
if not span_ok == "":
return "ERROR: " + span_ok
table = ensure_table_strings(table)
table = multis_2_mono(table)
output = []
column_widths = []
for col in table[0]:
column_widths.append(0)
for row in range(len(table)):
for column in range(len(table[row])):
if len(table[row][column]) > column_widths[column]:
column_widths[column] = len(table[row][column])
underline = ''
for col in column_widths:
underline = ''.join([underline + col * '=', ' '])
output.append(underline)
for row in range(len(table)):
string = ''
column = 0
while column < len(table[row]):
span = get_span(spans, row, column)
if (span and span[0] == [row, column] and
not table[row][column] == ''):
span_col_count = get_span_column_count(span)
end_col = column + span_col_count
width = sum(column_widths[column:end_col])
width += 2 * (span_col_count - 1)
string += center_line(width, table[row][column]) + ' '
elif table[row][column] == '':
pass
else:
string += center_line(
column_widths[column], table[row][column]) + ' '
column += 1
output.append(string)
if row == headers_row and use_headers:
output.append(underline)
else:
if row_includes_spans(table, row, spans):
new_underline = ''
column = 0
while column < len(table[row]):
span = get_span(spans, row, column)
if (span and span[0] == [row, column] and
not table[row][column] == ''):
span_col_count = get_span_column_count(span)
end_column = column + span_col_count
width = sum(column_widths[column:end_column])
width += 2 * (span_col_count - 1)
new_underline += (width * '-') + ' '
elif table[row][column] == '':
pass
else:
new_underline += (column_widths[column] * '-') + ' '
column += 1
output.append(new_underline)
for i in range(len(output)):
output[i] = output[i].rstrip()
output.append(underline)
return '\n'.join(output)
|
Convert table data to a simple rst table
Parameters
----------
table : list of lists of str
A table of strings.
spans : list of lists of lists of int
A list of spans. A span is a list of [Row, Column] pairs of
table cells that are joined together.
use_headers : bool, optional
Whether or not to include headers in the table. A header is
a cell that is underlined with "="
headers_row : int
The row that will be the headers. In a simple rst table, the
headers do not need to be at the top.
Returns
-------
str
The simple rst table
Example
-------
>>> table = [
... ["Inputs", "", "Output"],
... ["A", "B", "A or B"],
... ["False", "False", "False"],
... ["True", "False", "True"],
... ["False", "True", "True"],
... ["True", "True", "True"],
... ]
>>> spans = [
... [ [0, 0], [0, 1] ]
... ]
>>> print(data2simplerst(table, spans, headers_row=1))
====== ===== ======
Inputs Output
------------- ------
A B A or B
====== ===== ======
False False False
True False True
False True True
True True True
====== ===== ======
|
entailment
|
def add_links(converted_text, html):
"""
Add the links to the bottom of the text
"""
soup = BeautifulSoup(html, 'html.parser')
link_exceptions = [
'footnote-reference',
'fn-backref',
'citation-reference'
]
footnotes = {}
citations = {}
backrefs = {}
links = soup.find_all('a')
for link in links:
href = link.get('href')
text = process_tag(link)
classes = dict(link.attrs).get('class', '')
if 'footnote-reference' in classes:
footnotes[href] = '#' + link.get('id')
elif 'citation-reference' in classes:
text = process_tag(link)
citations[text] = '#' + link.get('id')
elif 'fn-backref' in classes:
sibling = link.findNext('td')
text = process_tag(sibling)
backrefs[href] = text
excepted_link = False
for class_type in classes:
if class_type in link_exceptions:
excepted_link = True
if not excepted_link:
if text.endswith('_'):
text = text[0:-1]
if len(text.split(' ')) > 1:
text = text[1:-1]
converted_text += '.. _' + text + ': ' + href + '\n'
if len(footnotes.keys()) > 0:
converted_text += '\n'
for key in footnotes.keys():
text = backrefs[footnotes[key]]
converted_text += '.. [' + key + '] ' + text + '\n'
if len(citations.keys()) > 0:
converted_text += '\n'
for key in citations.keys():
text = backrefs[citations[key]]
converted_text += '.. ' + key[0:-1] + ' ' + text + '\n'
return converted_text.rstrip()
|
Add the links to the bottom of the text
|
entailment
|
def load(self, providers, symbols, start, end, logger, backend, **kwargs):
'''Load symbols data.
:keyword providers: Dictionary of registered data providers.
:keyword symbols: list of symbols to load.
:keyword start: start date.
:keyword end: end date.
:keyword logger: instance of :class:`logging.Logger` or ``None``.
:keyword backend: :class:`dynts.TimeSeries` backend name.
There is no need to override this function, just use one
the three hooks available.
'''
# Preconditioning on dates
logger = logger or logging.getLogger(self.__class__.__name__)
start, end = self.dates(start, end)
data = {}
for sym in symbols:
# Get ticker, field and provider
symbol = self.parse_symbol(sym, providers)
provider = symbol.provider
if not provider:
raise MissingDataProvider(
'data provider for %s not available' % sym
)
pre = self.preprocess(symbol, start, end, logger, backend, **kwargs)
if pre.intervals:
result = None
for st, en in pre.intervals:
logger.info('Loading %s from %s. From %s to %s',
symbol.ticker, provider, st, en)
res = provider.load(symbol, st, en, logger, backend,
**kwargs)
if result is None:
result = res
else:
result.update(res)
else:
result = pre.result
# onresult hook
result = self.onresult(symbol, result, logger, backend, **kwargs)
data[sym] = result
# last hook
return self.onfinishload(data, logger, backend, **kwargs)
|
Load symbols data.
:keyword providers: Dictionary of registered data providers.
:keyword symbols: list of symbols to load.
:keyword start: start date.
:keyword end: end date.
:keyword logger: instance of :class:`logging.Logger` or ``None``.
:keyword backend: :class:`dynts.TimeSeries` backend name.
There is no need to override this function, just use one
the three hooks available.
|
entailment
|
def dates(self, start, end):
'''Internal function which perform pre-conditioning on dates:
:keyword start: start date.
:keyword end: end date.
This function makes sure the *start* and *end* date are consistent.
It *never fails* and always return a two-element tuple
containing *start*, *end* with *start* less or equal *end*
and *end* never after today.
There should be no reason to override this function.'''
td = date.today()
end = safetodate(end) or td
end = end if end <= td else td
start = safetodate(start)
if not start or start > end:
start = end - timedelta(days=int(round(30.4*
settings.months_history)))
return start,end
|
Internal function which perform pre-conditioning on dates:
:keyword start: start date.
:keyword end: end date.
This function makes sure the *start* and *end* date are consistent.
It *never fails* and always return a two-element tuple
containing *start*, *end* with *start* less or equal *end*
and *end* never after today.
There should be no reason to override this function.
|
entailment
|
def parse_symbol(self, symbol, providers):
'''Parse a symbol to obtain information regarding ticker,
field and provider. Must return an instance of :attr:`symboldata`.
:keyword symbol: string associated with market data to load.
:keyword providers: dictionary of :class:`dynts.data.DataProvider`
instances available.
For example::
intc
intc:open
intc:volume:google
intc:google
are all valid inputs returning a :class:`SymbolData` instance with
the following triplet of information::
intc,None,yahoo
intc,open,yahoo
intc,volume,google
intc,None,google
assuming ``yahoo`` is the provider in
:attr:`dynts.conf.Settings.default_provider`.
This function is called before retrieving data.
'''
separator = settings.field_separator
symbol = str(symbol)
bits = symbol.split(separator)
pnames = providers.keys()
ticker = symbol
provider = None
field = None
if len(bits) == 2:
ticker = bits[0]
if bits[1] in pnames:
provider = bits[1]
else:
field = bits[1]
elif len(bits) == 3:
ticker = bits[0]
if bits[1] in pnames:
provider = bits[1]
field = bits[2]
elif bits[2] in pnames:
provider = bits[2]
field = bits[1]
else:
raise BadSymbol(
'Could not parse %s. Unrecognized provider.' % symbol)
elif len(bits) > 3:
raise BadSymbol('Could not parse %s.' % symbol)
return self.symbol_for_ticker(ticker, field, provider, providers)
|
Parse a symbol to obtain information regarding ticker,
field and provider. Must return an instance of :attr:`symboldata`.
:keyword symbol: string associated with market data to load.
:keyword providers: dictionary of :class:`dynts.data.DataProvider`
instances available.
For example::
intc
intc:open
intc:volume:google
intc:google
are all valid inputs returning a :class:`SymbolData` instance with
the following triplet of information::
intc,None,yahoo
intc,open,yahoo
intc,volume,google
intc,None,google
assuming ``yahoo`` is the provider in
:attr:`dynts.conf.Settings.default_provider`.
This function is called before retrieving data.
|
entailment
|
def symbol_for_ticker(self, ticker, field, provider, providers):
'''Return an instance of *symboldata* containing
information about the data provider, the data provider ticker name
and the data provider field.'''
provider = provider or settings.default_provider
if provider:
provider = providers.get(provider, None)
return self.symboldata(ticker, field, provider)
|
Return an instance of *symboldata* containing
information about the data provider, the data provider ticker name
and the data provider field.
|
entailment
|
def preprocess(self, ticker, start, end, logger, backend, **kwargs):
'''Preprocess **hook**. This is first loading hook and it is
**called before requesting data** from a dataprovider.
It must return an instance of :attr:`TimeSerieLoader.preprocessdata`.
By default it returns::
self.preprocessdata(intervals = ((start,end),))
It could be overritten to modify the intervals.
If the intervals is ``None`` or an empty container,
the :func:`dynts.data.DataProvider.load` method won't be called,
otherwise it will be called as many times as the number of intervals
in the return tuple (by default once).
'''
return self.preprocessdata(intervals = ((start, end),))
|
Preprocess **hook**. This is first loading hook and it is
**called before requesting data** from a dataprovider.
It must return an instance of :attr:`TimeSerieLoader.preprocessdata`.
By default it returns::
self.preprocessdata(intervals = ((start,end),))
It could be overritten to modify the intervals.
If the intervals is ``None`` or an empty container,
the :func:`dynts.data.DataProvider.load` method won't be called,
otherwise it will be called as many times as the number of intervals
in the return tuple (by default once).
|
entailment
|
def register(self, provider):
'''Register a new data provider. *provider* must be an instance of
DataProvider. If provider name is already available, it will be replaced.'''
if isinstance(provider,type):
provider = provider()
self[provider.code] = provider
|
Register a new data provider. *provider* must be an instance of
DataProvider. If provider name is already available, it will be replaced.
|
entailment
|
def unregister(self, provider):
'''Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced.
'''
if isinstance(provider, type):
provider = provider()
if isinstance(provider, DataProvider):
provider = provider.code
return self.pop(str(provider).upper(), None)
|
Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced.
|
entailment
|
def parse(timeseries_expression, method=None, functions=None, debug=False):
'''Function for parsing :ref:`timeseries expressions <dsl-script>`.
If succesful, it returns an instance of :class:`dynts.dsl.Expr` which
can be used to to populate timeseries or scatters once data is available.
Parsing is implemented using the ply_ module,
an implementation of lex and yacc parsing tools for Python.
:parameter expression: A :ref:`timeseries expressions <dsl-script>` string.
:parameter method: Not yet used.
:parameter functions: dictionary of functions to use when parsing.
If not provided the :data:`dynts.function_registry`
will be used.
Default ``None``.
:parameter debug: debug flag for ply_. Default ``False``.
For examples and usage check the :ref:`dsl documentation <dsl>`.
.. _ply: http://www.dabeaz.com/ply/
'''
if not parsefunc:
raise ExpressionError('Could not parse. No parser installed.')
functions = functions if functions is not None else function_registry
expr_str = str(timeseries_expression).lower()
return parsefunc(expr_str, functions, method, debug)
|
Function for parsing :ref:`timeseries expressions <dsl-script>`.
If succesful, it returns an instance of :class:`dynts.dsl.Expr` which
can be used to to populate timeseries or scatters once data is available.
Parsing is implemented using the ply_ module,
an implementation of lex and yacc parsing tools for Python.
:parameter expression: A :ref:`timeseries expressions <dsl-script>` string.
:parameter method: Not yet used.
:parameter functions: dictionary of functions to use when parsing.
If not provided the :data:`dynts.function_registry`
will be used.
Default ``None``.
:parameter debug: debug flag for ply_. Default ``False``.
For examples and usage check the :ref:`dsl documentation <dsl>`.
.. _ply: http://www.dabeaz.com/ply/
|
entailment
|
def evaluate(expression, start=None, end=None, loader=None, logger=None,
backend=None, **kwargs):
'''Evaluate a timeseries ``expression`` into
an instance of :class:`dynts.dsl.dslresult` which can be used
to obtain timeseries and/or scatters.
This is probably the most used function of the library.
:parameter expression: A timeseries expression string or an instance
of :class:`dynts.dsl.Expr` obtained using the :func:`~.parse`
function.
:parameter start: Start date or ``None``.
:parameter end: End date or ``None``. If not provided today values is used.
:parameter loader: Optional :class:`dynts.data.TimeSerieLoader`
class or instance to use.
Default ``None``.
:parameter logger: Optional python logging instance, used if you required
logging.
Default ``None``.
:parameter backend: :class:`dynts.TimeSeries` backend name or ``None``.
The ``expression`` is parsed and the :class:`~.Symbol` are sent to the
:class:`dynts.data.TimeSerieLoader` instance for retrieving
actual timeseries data.
It returns an instance of :class:`~.DSLResult`.
Typical usage::
>>> from dynts import api
>>> r = api.evaluate('min(GS,window=30)')
>>> r
min(GS,window=30)
>>> ts = r.ts()
'''
if isinstance(expression, str):
expression = parse(expression)
if not expression or expression.malformed():
raise CouldNotParse(expression)
symbols = expression.symbols()
start = start if not start else todate(start)
end = end if not end else todate(end)
data = providers.load(symbols, start, end, loader=loader,
logger=logger, backend=backend, **kwargs)
return DSLResult(expression, data, backend=backend)
|
Evaluate a timeseries ``expression`` into
an instance of :class:`dynts.dsl.dslresult` which can be used
to obtain timeseries and/or scatters.
This is probably the most used function of the library.
:parameter expression: A timeseries expression string or an instance
of :class:`dynts.dsl.Expr` obtained using the :func:`~.parse`
function.
:parameter start: Start date or ``None``.
:parameter end: End date or ``None``. If not provided today values is used.
:parameter loader: Optional :class:`dynts.data.TimeSerieLoader`
class or instance to use.
Default ``None``.
:parameter logger: Optional python logging instance, used if you required
logging.
Default ``None``.
:parameter backend: :class:`dynts.TimeSeries` backend name or ``None``.
The ``expression`` is parsed and the :class:`~.Symbol` are sent to the
:class:`dynts.data.TimeSerieLoader` instance for retrieving
actual timeseries data.
It returns an instance of :class:`~.DSLResult`.
Typical usage::
>>> from dynts import api
>>> r = api.evaluate('min(GS,window=30)')
>>> r
min(GS,window=30)
>>> ts = r.ts()
|
entailment
|
def grid2data(text):
"""
Convert Grid table to data (the kind used by Dashtable)
Parameters
----------
text : str
The text must be a valid rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define a group of
combined table cells
use_headers : bool
Whether or not the table was using headers
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> text = '''
... +------------+------------+-----------+
... | Header 1 | Header 2 | Header 3 |
... +============+============+===========+
... | body row 1 | column 2 | column 3 |
... +------------+------------+-----------+
... | body row 2 | Cells may span columns.|
... +------------+------------+-----------+
... | body row 3 | Cells may | - Cells |
... +------------+ span rows. | - contain |
... | body row 4 | | - blocks. |
... +------------+------------+-----------+
... '''
>>> import dashtable
>>> table, spans, use_headers = dashtable.grid2data(text)
>>> from pprint import pprint
>>> pprint(table)
[['Header 1', 'Header 2', 'Header 3'],
['body row 1', 'column 2', 'column 3'],
['body row 2', 'Cells may span columns.', ''],
['body row 3', 'Cells may\\nspan rows.', '- Cells\\n- contain\\n- blocks.'],
['body row 4', '', '']]
>>> print(spans)
[[[2, 1], [2, 2]], [[3, 1], [4, 1]], [[3, 2], [4, 2]]]
>>> print(use_headers)
True
"""
try:
import docutils.statemachine
import docutils.parsers.rst.tableparser
except ImportError:
print("ERROR: You must install the docutils library to use grid2data")
return
text = text.strip()
lines = text.split('\n')
for i in range(len(lines)):
lines[i] = lines[i].strip()
parser = docutils.parsers.rst.tableparser.GridTableParser()
grid_data = parser.parse(docutils.statemachine.StringList(list(lines)))
grid_data = list(grid_data)
column_widths = grid_data.pop(0)
column_count = len(column_widths)
if len(grid_data[0]) > 0:
use_headers = True
headers = grid_data[0][0]
row_count = len(grid_data[1]) + 1
grid_data[1].insert(0, headers)
grid_data.pop(0)
else:
use_headers = False
grid_data.pop(0)
row_count = len(grid_data[0])
grid_data = grid_data[0]
table = make_empty_table(row_count, column_count)
spans = []
for row in range(len(grid_data)):
for column in range(len(grid_data[row])):
try:
text = '\n'.join(grid_data[row][column][3]).rstrip()
table[row][column] = text
extra_rows = grid_data[row][column][0]
extra_columns = grid_data[row][column][1]
span = make_span(row, column, extra_rows, extra_columns)
span = sorted(span)
span = list(span for span,_ in itertools.groupby(span))
if not len(span) == 1:
spans.append(span)
except TypeError:
pass
spans = sorted(spans)
return table, spans, use_headers
|
Convert Grid table to data (the kind used by Dashtable)
Parameters
----------
text : str
The text must be a valid rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define a group of
combined table cells
use_headers : bool
Whether or not the table was using headers
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> text = '''
... +------------+------------+-----------+
... | Header 1 | Header 2 | Header 3 |
... +============+============+===========+
... | body row 1 | column 2 | column 3 |
... +------------+------------+-----------+
... | body row 2 | Cells may span columns.|
... +------------+------------+-----------+
... | body row 3 | Cells may | - Cells |
... +------------+ span rows. | - contain |
... | body row 4 | | - blocks. |
... +------------+------------+-----------+
... '''
>>> import dashtable
>>> table, spans, use_headers = dashtable.grid2data(text)
>>> from pprint import pprint
>>> pprint(table)
[['Header 1', 'Header 2', 'Header 3'],
['body row 1', 'column 2', 'column 3'],
['body row 2', 'Cells may span columns.', ''],
['body row 3', 'Cells may\\nspan rows.', '- Cells\\n- contain\\n- blocks.'],
['body row 4', '', '']]
>>> print(spans)
[[[2, 1], [2, 2]], [[3, 1], [4, 1]], [[3, 2], [4, 2]]]
>>> print(use_headers)
True
|
entailment
|
def get_consensus_tree(self, cutoff=0.0, best_tree=None):
"""
Returns an extended majority rule consensus tree as a Toytree object.
Node labels include 'support' values showing the occurrence of clades
in the consensus tree across trees in the input treelist.
Clades with support below 'cutoff' are collapsed into polytomies.
If you enter an optional 'best_tree' then support values from
the treelist calculated for clades in this tree, and the best_tree is
returned with support values added to nodes.
Params
------
cutoff (float; default=0.0):
Cutoff below which clades are collapsed in the majority rule
consensus tree. This is a proportion (e.g., 0.5 means 50%).
best_tree (Toytree; optional):
A tree that support values should be calculated for and added to.
For example, you want to calculate how often clades in your best
ML tree are supported in 100 bootstrap trees.
"""
if best_tree:
raise NotImplementedError("best_tree option not yet supported.")
cons = ConsensusTree(self.treelist, cutoff)
cons.update()
return cons.ttree
|
Returns an extended majority rule consensus tree as a Toytree object.
Node labels include 'support' values showing the occurrence of clades
in the consensus tree across trees in the input treelist.
Clades with support below 'cutoff' are collapsed into polytomies.
If you enter an optional 'best_tree' then support values from
the treelist calculated for clades in this tree, and the best_tree is
returned with support values added to nodes.
Params
------
cutoff (float; default=0.0):
Cutoff below which clades are collapsed in the majority rule
consensus tree. This is a proportion (e.g., 0.5 means 50%).
best_tree (Toytree; optional):
A tree that support values should be calculated for and added to.
For example, you want to calculate how often clades in your best
ML tree are supported in 100 bootstrap trees.
|
entailment
|
def draw_tree_grid(self,
nrows=None,
ncols=None,
start=0,
fixed_order=False,
shared_axis=False,
**kwargs):
"""
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary.
"""
# return nothing if tree is empty
if not self.treelist:
print("Treelist is empty")
return None, None
# make a copy of the treelist so we don't modify the original
if not fixed_order:
treelist = self.copy().treelist
else:
if fixed_order is True:
fixed_order = self.treelist[0].get_tip_labels()
treelist = [
ToyTree(i, fixed_order=fixed_order)
for i in self.copy().treelist
]
# apply kwargs styles to the individual tree styles
for tree in treelist:
tree.style.update(kwargs)
# get reasonable values for x,y given treelist length
if not (ncols or nrows):
ncols = 5
nrows = 1
elif not (ncols and nrows):
if ncols:
if ncols == 1:
if self.ntrees <= 5:
nrows = self.ntrees
else:
nrows = 2
else:
if self.ntrees <= 10:
nrows = 2
else:
nrows = 3
if nrows:
if nrows == 1:
if self.ntrees <= 5:
ncols = self.ntrees
else:
ncols = 5
else:
if self.ntrees <= 10:
ncols = 5
else:
ncols = 3
else:
pass
# Return TereGrid object for debugging
draw = TreeGrid(treelist)
if kwargs.get("debug"):
return draw
# Call update to draw plot. Kwargs still here for width, height, axes
canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs)
return canvas, axes
|
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary.
|
entailment
|
def draw_cloud_tree(self,
axes=None,
html=False,
fixed_order=True,
**kwargs):
"""
Draw a series of trees overlapping each other in coordinate space.
The order of tip_labels is fixed in cloud trees so that trees with
discordant relationships can be seen in conflict. To change the tip
order use the 'fixed_order' argument in toytree.mtree() when creating
the MultiTree object.
Parameters:
axes (toyplot.Cartesian): toyplot Cartesian axes object.
html (bool): whether to return the drawing as html (default=PNG).
edge_styles: (list): option to enter a list of edge dictionaries.
**kwargs (dict): styling options should be input as a dictionary.
"""
# return nothing if tree is empty
if not self.treelist:
print("Treelist is empty")
return None, None
# return nothing if tree is empty
if not self.all_tips_shared:
print("All trees in treelist do not share the same tips")
return None, None
# make a copy of the treelist so we don't modify the original
if not fixed_order:
raise Exception(
"fixed_order must be either True or a list with the tip order")
# set fixed order on a copy of the tree list
if isinstance(fixed_order, (list, tuple)):
pass
elif fixed_order is True:
fixed_order = self.treelist[0].get_tip_labels()
else:
raise Exception(
"fixed_order argument must be True or a list with the tip order")
treelist = [
ToyTree(i, fixed_order=fixed_order) for i in self.copy().treelist
]
# give advice if user tries to enter tip_labels
if kwargs.get("tip_labels"):
print(TIP_LABELS_ADVICE)
# set autorender format to png so we don't bog down notebooks
try:
changed_autoformat = False
if not html:
toyplot.config.autoformat = "png"
changed_autoformat = True
# dict of global cloud tree style
mstyle = STYLES['m']
# if trees in treelist already have some then we don't quash...
mstyle.update(
{i: j for (i, j) in kwargs.items() if
(j is not None) & (i != "tip_labels")}
)
for tree in treelist:
tree.style.update(mstyle)
# Send a copy of MultiTree to init Drawing object.
draw = CloudTree(treelist, **kwargs)
# and create drawing
if kwargs.get("debug"):
return draw
# allow user axes, and kwargs for width, height
canvas, axes = draw.update(axes)
return canvas, axes
finally:
if changed_autoformat:
toyplot.config.autoformat = "html"
|
Draw a series of trees overlapping each other in coordinate space.
The order of tip_labels is fixed in cloud trees so that trees with
discordant relationships can be seen in conflict. To change the tip
order use the 'fixed_order' argument in toytree.mtree() when creating
the MultiTree object.
Parameters:
axes (toyplot.Cartesian): toyplot Cartesian axes object.
html (bool): whether to return the drawing as html (default=PNG).
edge_styles: (list): option to enter a list of edge dictionaries.
**kwargs (dict): styling options should be input as a dictionary.
|
entailment
|
def hash_trees(self):
"hash ladderized tree topologies"
observed = {}
for idx, tree in enumerate(self.treelist):
nwk = tree.write(tree_format=9)
hashed = md5(nwk.encode("utf-8")).hexdigest()
if hashed not in observed:
observed[hashed] = idx
self.treedict[idx] = 1
else:
idx = observed[hashed]
self.treedict[idx] += 1
|
hash ladderized tree topologies
|
entailment
|
def find_clades(self):
"Count clade occurrences."
# index names from the first tree
ndict = {j: i for i, j in enumerate(self.names)}
namedict = {i: j for i, j in enumerate(self.names)}
# store counts
clade_counts = {}
for tidx, ncopies in self.treedict.items():
# testing on unrooted trees is easiest but for some reason slow
ttree = self.treelist[tidx].unroot()
# traverse over tree
for node in ttree.treenode.traverse('preorder'):
bits = np.zeros(len(ttree), dtype=np.bool_)
for child in node.iter_leaf_names():
bits[ndict[child]] = True
# get bit string and its reverse
bitstring = bits.tobytes()
revstring = np.invert(bits).tobytes()
# add to clades first time, then check for inverse next hits
if bitstring in clade_counts:
clade_counts[bitstring] += ncopies
else:
if revstring not in clade_counts:
clade_counts[bitstring] = ncopies
else:
clade_counts[revstring] += ncopies
# convert to freq
for key, val in clade_counts.items():
clade_counts[key] = val / float(len(self.treelist))
## return in sorted order
self.namedict = namedict
self.clade_counts = sorted(
clade_counts.items(),
key=lambda x: x[1],
reverse=True)
|
Count clade occurrences.
|
entailment
|
def filter_clades(self):
"Remove conflicting clades and those < cutoff to get majority rule"
passed = []
carrs = np.array([list(i[0]) for i in self.clade_counts], dtype=int)
freqs = np.array([i[1] for i in self.clade_counts])
for idx in range(carrs.shape[0]):
conflict = False
if freqs[idx] < self.cutoff:
continue
for pidx in passed:
intersect = np.max(carrs[idx] + carrs[pidx]) > 1
# is either one a subset of the other?
subset_test0 = np.all(carrs[idx] - carrs[pidx] >= 0)
subset_test1 = np.all(carrs[pidx] - carrs[idx] >= 0)
if intersect:
if (not subset_test0) and (not subset_test1):
conflict = True
if not conflict:
passed.append(idx)
rclades = []
for idx in passed:
rclades.append((carrs[idx], freqs[idx]))
self.fclade_counts = rclades
|
Remove conflicting clades and those < cutoff to get majority rule
|
entailment
|
def build_trees(self):
"Build an unrooted consensus tree from filtered clade counts."
# storage
nodes = {}
idxarr = np.arange(len(self.fclade_counts[0][0]))
queue = []
## create dict of clade counts and set keys
countdict = defaultdict(int)
for clade, count in self.fclade_counts:
mask = np.int_(list(clade)).astype(np.bool)
ccx = idxarr[mask]
queue.append((len(ccx), frozenset(ccx)))
countdict[frozenset(ccx)] = count
while queue:
queue.sort()
(clade_size, clade) = queue.pop(0)
new_queue = []
# search for ancestors of clade
for (_, ancestor) in queue:
if clade.issubset(ancestor):
# update ancestor such that, in the following example:
# ancestor == {1, 2, 3, 4}
# clade == {2, 3}
# new_ancestor == {1, {2, 3}, 4}
new_ancestor = (ancestor - clade) | frozenset([clade])
countdict[new_ancestor] = countdict.pop(ancestor)
ancestor = new_ancestor
new_queue.append((len(ancestor), ancestor))
# if the clade is a tip, then we have a name
if clade_size == 1:
name = list(clade)[0]
name = self.namedict[name]
else:
name = None
# the clade will not be in nodes if it is a tip
children = [nodes.pop(c) for c in clade if c in nodes]
node = TreeNode(name=name)
for child in children:
node.add_child(child)
if not node.is_leaf():
node.dist = int(round(100 * countdict[clade]))
node.support = int(round(100 * countdict[clade]))
else:
node.dist = int(100)
node.support = int(100)
nodes[clade] = node
queue = new_queue
nodelist = list(nodes.values())
tre = nodelist[0]
#tre.unroot()
## return the tree and other trees if present
self.ttree = ToyTree(tre.write(format=0))
self.ttree._coords.update()
self.nodelist = nodelist
|
Build an unrooted consensus tree from filtered clade counts.
|
entailment
|
def sounds_like(self, word1, word2):
"""Compare the phonetic representations of 2 words, and return a boolean value."""
return self.phonetics(word1) == self.phonetics(word2)
|
Compare the phonetic representations of 2 words, and return a boolean value.
|
entailment
|
def distance(self, word1, word2, metric='levenshtein'):
"""Get the similarity of the words, using the supported distance metrics."""
if metric in self.distances:
distance_func = self.distances[metric]
return distance_func(self.phonetics(word1), self.phonetics(word2))
else:
raise DistanceMetricError('Distance metric not supported! Choose from levenshtein, hamming.')
|
Get the similarity of the words, using the supported distance metrics.
|
entailment
|
def get_output_row_heights(table, spans):
"""
Get the heights of the rows of the output table.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
heights : list of int
The heights of each row in the output table
"""
heights = []
for row in table:
heights.append(-1)
for row in range(len(table)):
for column in range(len(table[row])):
text = table[row][column]
span = get_span(spans, row, column)
row_count = get_span_row_count(span)
height = len(text.split('\n'))
if row_count == 1 and height > heights[row]:
heights[row] = height
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
row_count = get_span_row_count(span)
if row_count > 1:
text_row = span[0][0]
text_column = span[0][1]
end_row = text_row + row_count
text = table[text_row][text_column]
height = len(text.split('\n')) - (row_count - 1)
add_row = 0
while height > sum(heights[text_row:end_row]):
heights[text_row + add_row] += 1
if add_row + 1 < row_count:
add_row += 1
else:
add_row = 0
return heights
|
Get the heights of the rows of the output table.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
heights : list of int
The heights of each row in the output table
|
entailment
|
def smedian(olist,nobs):
'''Generalised media for odd and even number of samples'''
if nobs:
rem = nobs % 2
midpoint = nobs // 2
me = olist[midpoint]
if not rem:
me = 0.5 * (me + olist[midpoint-1])
return me
else:
return NaN
|
Generalised media for odd and even number of samples
|
entailment
|
def roll_mean(input, window):
'''Apply a rolling mean function to an array.
This is a simple rolling aggregation.'''
nobs, i, j, sum_x = 0,0,0,0.
N = len(input)
if window > N:
raise ValueError('Out of bound')
output = np.ndarray(N-window+1,dtype=input.dtype)
for val in input[:window]:
if val == val:
nobs += 1
sum_x += val
output[j] = NaN if not nobs else sum_x / nobs
for val in input[window:]:
prev = input[j]
if prev == prev:
sum_x -= prev
nobs -= 1
if val == val:
nobs += 1
sum_x += val
j += 1
output[j] = NaN if not nobs else sum_x / nobs
return output
|
Apply a rolling mean function to an array.
This is a simple rolling aggregation.
|
entailment
|
def roll_sd(input, window, scale = 1.0, ddof = 0):
'''Apply a rolling standard deviation function
to an array. This is a simple rolling aggregation of squared
sums.'''
nobs, i, j, sx, sxx = 0,0,0,0.,0.
N = len(input)
sqrt = np.sqrt
if window > N:
raise ValueError('Out of bound')
output = np.ndarray(N-window+1,dtype=input.dtype)
for val in input[:window]:
if val == val:
nobs += 1
sx += val
sxx += val*val
nn = nobs - ddof
output[j] = NaN if nn<=0 else sqrt(scale * (sxx - sx*sx/nobs) / nn)
for val in input[window:]:
prev = input[j]
if prev == prev:
sx -= prev
sxx -= prev*prev
nobs -= 1
if val == val:
nobs += 1
sx += val
sxx += val*val
j += 1
nn = nobs - ddof
output[j] = NaN if nn<=0 else sqrt(scale * (sxx - sx*sx/nobs) / nn)
return output
|
Apply a rolling standard deviation function
to an array. This is a simple rolling aggregation of squared
sums.
|
entailment
|
def check_span(span, table):
"""
Ensure the span is valid.
A span is a list of [row, column] pairs. These coordinates
must form a rectangular shape. For example, this span will cause an
error because it is not rectangular in shape.::
span = [[0, 1], [0, 2], [1, 0]]
Spans must be
* Rectanglular
* A list of lists of int
*
Parameters
----------
span : list of lists of int
table : list of lists of str
Return
------
exception_string : str
A message that states there was something wrong.
"""
if not type(span) is list:
return "Spans must be a list of lists"
for pair in span:
if not type(pair) is list:
return "Spans must be a list of lists of int"
if not len(pair) == 2:
return "Spans must be a [Row, Column] pair of integers"
total_rows = get_span_row_count(span)
total_columns = get_span_column_count(span)
if not len(span) == total_rows * total_columns:
return ''.join(["Spans must be rectangular in shape. ",
str(span) + " is invalid"])
if max(span, key=lambda x: x[0])[0] > len(table) - 1:
return ' '.join(["One of the span's rows extends beyond the",
"bounds of the table:", str(span)])
if max(span, key=lambda x: x[1])[1] > len(table[0]) - 1:
return ' '.join(["One of the span's columns extends beyond the",
"bounds of the table:", str(span)])
test_span = copy.deepcopy(span)
checked = [test_span.pop(0)]
while len(test_span) > 0:
row = test_span[0][0]
col = test_span[0][1]
matched = False
for i in range(len(checked)):
if row == checked[i][0] and abs(col - checked[i][1]) == 1:
matched = True
elif abs(row - checked[i][0]) == 1 and col == checked[i][1]:
matched = True
if matched:
checked.append(test_span.pop(0))
else:
checked.extend(test_span)
return 'This span is not valid: ' + str(checked)
return ""
|
Ensure the span is valid.
A span is a list of [row, column] pairs. These coordinates
must form a rectangular shape. For example, this span will cause an
error because it is not rectangular in shape.::
span = [[0, 1], [0, 2], [1, 0]]
Spans must be
* Rectanglular
* A list of lists of int
*
Parameters
----------
span : list of lists of int
table : list of lists of str
Return
------
exception_string : str
A message that states there was something wrong.
|
entailment
|
def merge_all_cells(cells):
"""
Loop through list of cells and piece them together one by one
Parameters
----------
cells : list of dashtable.data2rst.Cell
Returns
-------
grid_table : str
The final grid table
"""
current = 0
while len(cells) > 1:
count = 0
while count < len(cells):
cell1 = cells[current]
cell2 = cells[count]
merge_direction = get_merge_direction(cell1, cell2)
if not merge_direction == "NONE":
merge_cells(cell1, cell2, merge_direction)
if current > count:
current -= 1
cells.pop(count)
else:
count += 1
current += 1
if current >= len(cells):
current = 0
return cells[0].text
|
Loop through list of cells and piece them together one by one
Parameters
----------
cells : list of dashtable.data2rst.Cell
Returns
-------
grid_table : str
The final grid table
|
entailment
|
def bpp2newick(bppnewick):
"converts bpp newick format to normal newick"
regex1 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[:]")
regex2 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[;]")
regex3 = re.compile(r": ")
new = regex1.sub(":", bppnewick)
new = regex2.sub(";", new)
new = regex3.sub(":", new)
return new
|
converts bpp newick format to normal newick
|
entailment
|
def return_small_clade(treenode):
"used to produce balanced trees, returns a tip node from the smaller clade"
node = treenode
while 1:
if node.children:
c1, c2 = node.children
node = sorted([c1, c2], key=lambda x: len(x.get_leaves()))[0]
else:
return node
|
used to produce balanced trees, returns a tip node from the smaller clade
|
entailment
|
def fuzzy_match_tipnames(ttree, names, wildcard, regex, mrca=True, mono=True):
"""
Used in multiple internal functions (e.g., .root()) and .drop_tips())
to select an internal mrca node, or multiple tipnames, using fuzzy matching
so that every name does not need to be written out by hand.
name: verbose list
wildcard: matching unique string
regex: regex expression
mrca: return mrca node of selected tipnames.
mono: raise error if selected tipnames are not monophyletic
"""
# require arguments
if not any([names, wildcard, regex]):
raise ToytreeError(
"must enter an outgroup, wildcard selector, or regex pattern")
# get list of **nodes** from {list, wildcard, or regex}
tips = []
if names:
if isinstance(names, (str, int)):
names = [names]
notfound = [i for i in names if i not in ttree.get_tip_labels()]
if any(notfound):
raise ToytreeError(
"Sample {} is not in the tree".format(notfound))
tips = [i for i in ttree.treenode.get_leaves() if i.name in names]
# use regex to match tipnames
elif regex:
tips = [
i for i in ttree.treenode.get_leaves() if re.match(regex, i.name)
]
if not any(tips):
raise ToytreeError("No Samples matched the regular expression")
# use wildcard substring matching
elif wildcard:
tips = [i for i in ttree.treenode.get_leaves() if wildcard in i.name]
if not any(tips):
raise ToytreeError("No Samples matched the wildcard")
# build list of **tipnames** from matched nodes
if not tips:
raise ToytreeError("no matching tipnames")
tipnames = [i.name for i in tips]
# if a single tipname matched no need to check for monophyly
if len(tips) == 1:
if mrca:
return tips[0]
else:
return tipnames
# if multiple nodes matched, check if they're monophyletic
mbool, mtype, mnames = (
ttree.treenode.check_monophyly(
tipnames, "name", ignore_missing=True)
)
# get mrca node
node = ttree.treenode.get_common_ancestor(tips)
# raise an error if required to be monophyletic but not
if mono:
if not mbool:
raise ToytreeError(
"Taxon list cannot be paraphyletic")
# return tips or nodes
if not mrca:
return tipnames
else:
return node
|
Used in multiple internal functions (e.g., .root()) and .drop_tips())
to select an internal mrca node, or multiple tipnames, using fuzzy matching
so that every name does not need to be written out by hand.
name: verbose list
wildcard: matching unique string
regex: regex expression
mrca: return mrca node of selected tipnames.
mono: raise error if selected tipnames are not monophyletic
|
entailment
|
def node_scale_root_height(self, treeheight=1):
"""
Returns a toytree copy with all nodes scaled so that the root
height equals the value entered for treeheight.
"""
# make tree height = 1 * treeheight
ctree = self._ttree.copy()
_height = ctree.treenode.height
for node in ctree.treenode.traverse():
node.dist = (node.dist / _height) * treeheight
ctree._coords.update()
return ctree
|
Returns a toytree copy with all nodes scaled so that the root
height equals the value entered for treeheight.
|
entailment
|
def node_slider(self, seed=None):
"""
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
"""
# I don't think user's should need to access prop
prop = 0.999
assert isinstance(prop, float), "prop must be a float"
assert prop < 1, "prop must be a proportion >0 and < 1."
random.seed(seed)
ctree = self._ttree.copy()
for node in ctree.treenode.traverse():
## slide internal nodes
if node.up and node.children:
## get min and max slides
minjit = max([i.dist for i in node.children]) * prop
maxjit = (node.up.height * prop) - node.height
newheight = random.uniform(-minjit, maxjit)
## slide children
for child in node.children:
child.dist += newheight
## slide self to match
node.dist -= newheight
ctree._coords.update()
return ctree
|
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
|
entailment
|
def node_multiplier(self, multiplier=0.5, seed=None):
"""
Returns a toytree copy with all nodes multiplied by a constant
sampled uniformly between (multiplier, 1/multiplier).
"""
random.seed(seed)
ctree = self._ttree.copy()
low, high = sorted([multiplier, 1. / multiplier])
mult = random.uniform(low, high)
for node in ctree.treenode.traverse():
node.dist = node.dist * mult
ctree._coords.update()
return ctree
|
Returns a toytree copy with all nodes multiplied by a constant
sampled uniformly between (multiplier, 1/multiplier).
|
entailment
|
def make_ultrametric(self, strategy=1):
"""
Returns a tree with branch lengths transformed so that the tree is
ultrametric. Strategies include (1) tip-align: extend tips to the length
of the fartest tip from the root; (2) non-parametric rate-smoothing:
minimize ancestor-descendant local rates on branches to align tips (
not yet supported); and (3) penalized-likelihood: not yet supported.
"""
ctree = self._ttree.copy()
if strategy == 1:
for node in ctree.treenode.traverse():
if node.is_leaf():
node.dist = node.height + 1
else:
raise NotImplementedError(
"Strategy {} not yet implemented. Seeking developers."
.format(strategy))
return ctree
|
Returns a tree with branch lengths transformed so that the tree is
ultrametric. Strategies include (1) tip-align: extend tips to the length
of the fartest tip from the root; (2) non-parametric rate-smoothing:
minimize ancestor-descendant local rates on branches to align tips (
not yet supported); and (3) penalized-likelihood: not yet supported.
|
entailment
|
def coaltree(ntips, ne=None, seed=None):
"""
Returns a coalescent tree with ntips samples and waiting times
between coalescent events drawn from the kingman coalescent:
(4N)/(k*(k-1)), where N is population size and k is sample size.
Edge lengths on the tree are in generations.
If no Ne argument is entered then edge lengths are returned in units
of 2*Ne, i.e., coalescent time units.
"""
# seed generator
random.seed(seed)
# convert units
coalunits = False
if not ne:
coalunits = True
ne = 10000
# build tree: generate N tips as separate Nodes then attach together
# at internal nodes drawn randomly from coalescent waiting times.
tips = [
toytree.tree().treenode.add_child(name=str(i))
for i in range(ntips)
]
while len(tips) > 1:
rtree = toytree.tree()
tip1 = tips.pop(random.choice(range(len(tips))))
tip2 = tips.pop(random.choice(range(len(tips))))
kingman = (4. * ne) / float(ntips * (ntips - 1))
dist = random.expovariate(1. / kingman)
rtree.treenode.add_child(tip1, dist=tip2.height + dist)
rtree.treenode.add_child(tip2, dist=tip1.height + dist)
tips.append(rtree.treenode)
# build new tree from the newick string
self = toytree.tree(tips[0].write())
self.treenode.ladderize()
# make tree edges in units of 2N (then N doesn't matter!)
if coalunits:
for node in self.treenode.traverse():
node.dist /= (2. * ne)
# ensure tips are at zero (they sometime vary just slightly)
for node in self.treenode.traverse():
if node.is_leaf():
node.dist += node.height
# set tipnames
for tip in self.get_tip_labels():
node = self.treenode.search_nodes(name=tip)[0]
node.name = "r{}".format(node.name)
# decompose fills in internal node names and idx
self._coords.update()
return self
|
Returns a coalescent tree with ntips samples and waiting times
between coalescent events drawn from the kingman coalescent:
(4N)/(k*(k-1)), where N is population size and k is sample size.
Edge lengths on the tree are in generations.
If no Ne argument is entered then edge lengths are returned in units
of 2*Ne, i.e., coalescent time units.
|
entailment
|
def unittree(ntips, treeheight=1.0, seed=None):
"""
Returns a random tree topology w/ N tips and a root height set to
1 or a user-entered treeheight value. Descendant nodes are evenly
spaced between the root and time 0.
Parameters
-----------
ntips (int):
The number of tips in the randomly generated tree
treeheight(float):
Scale tree height (all edges) so that root is at this height.
seed (int):
Random number generator seed.
"""
# seed generator
random.seed(seed)
# generate tree with N tips.
tmptree = TreeNode()
tmptree.populate(ntips)
self = toytree.tree(newick=tmptree.write())
# set tip names by labeling sequentially from 0
self = (
self
.ladderize()
.mod.make_ultrametric()
.mod.node_scale_root_height(treeheight)
)
# set tipnames randomly (doesn't have to match idx)
nidx = list(range(self.ntips))
random.shuffle(nidx)
for tidx, node in enumerate(self.treenode.get_leaves()):
node.name = "r{}".format(nidx[tidx])
# fill internal node names and idx
self._coords.update()
return self
|
Returns a random tree topology w/ N tips and a root height set to
1 or a user-entered treeheight value. Descendant nodes are evenly
spaced between the root and time 0.
Parameters
-----------
ntips (int):
The number of tips in the randomly generated tree
treeheight(float):
Scale tree height (all edges) so that root is at this height.
seed (int):
Random number generator seed.
|
entailment
|
def imbtree(ntips, treeheight=1.0):
"""
Return an imbalanced (comb-like) tree topology.
"""
rtree = toytree.tree()
rtree.treenode.add_child(name="0")
rtree.treenode.add_child(name="1")
for i in range(2, ntips):
# empty node
cherry = toytree.tree()
# add new child
cherry.treenode.add_child(name=str(i))
# add old tree
cherry.treenode.add_child(rtree.treenode)
# update rtree
rtree = cherry
# get toytree from newick
tre = toytree.tree(rtree.write(tree_format=9))
tre = tre.mod.make_ultrametric()
self = tre.mod.node_scale_root_height(treeheight)
self._coords.update()
return self
|
Return an imbalanced (comb-like) tree topology.
|
entailment
|
def baltree(ntips, treeheight=1.0):
"""
Returns a balanced tree topology.
"""
# require even number of tips
if ntips % 2:
raise ToytreeError("balanced trees must have even number of tips.")
# make first cherry
rtree = toytree.tree()
rtree.treenode.add_child(name="0")
rtree.treenode.add_child(name="1")
# add tips in a balanced way
for i in range(2, ntips):
# get node to split
node = return_small_clade(rtree.treenode)
# add two children
node.add_child(name=node.name)
node.add_child(name=str(i))
# rename ancestral node
node.name = None
# rename tips so names are in order
idx = 0
for node in rtree.treenode.traverse("postorder"):
if node.is_leaf():
node.name = str(idx)
idx += 1
# get toytree from newick
tre = toytree.tree(rtree.write(tree_format=9))
tre = tre.mod.make_ultrametric()
self = tre.mod.node_scale_root_height(treeheight)
self._coords.update()
return self
|
Returns a balanced tree topology.
|
entailment
|
def get_span_char_height(span, row_heights):
"""
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
"""
start_row = span[0][0]
row_count = get_span_row_count(span)
total_height = 0
for i in range(start_row, start_row + row_count):
total_height += row_heights[i]
total_height += row_count - 1
return total_height
|
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
|
entailment
|
def html2data(html_string):
"""
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
"""
spans = extract_spans(html_string)
column_count = get_html_column_count(html_string)
row_count = get_html_row_count(spans)
count = 0
while count < len(spans):
if len(spans[count]) == 1:
spans.pop(count)
else:
count += 1
table = extract_table(html_string, row_count, column_count)
use_headers = headers_present(html_string)
return table, spans, use_headers
|
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
|
entailment
|
def newick(self, tree_format=0):
"Returns newick represenation of the tree in its current state."
# checks one of root's children for features and extra feats.
if self.treenode.children:
features = {"name", "dist", "support", "height", "idx"}
testnode = self.treenode.children[0]
extrafeat = {i for i in testnode.features if i not in features}
features.update(extrafeat)
return self.treenode.write(format=tree_format)
|
Returns newick represenation of the tree in its current state.
|
entailment
|
def get_edge_values(self, feature='idx'):
"""
Returns edge values in the order they are plotted (see .get_edges())
"""
elist = []
for cidx in self._coords.edges[:, 1]:
node = self.treenode.search_nodes(idx=cidx)[0]
elist.append(
(node.__getattribute__(feature) if hasattr(node, feature) else "")
)
return elist
|
Returns edge values in the order they are plotted (see .get_edges())
|
entailment
|
def get_edge_values_from_dict(self, node_value_dict=None, include_stem=True):
"""
Enter a dictionary mapping node 'idx' or tuple of tipnames to values
that you want mapped to the stem and descendant edges that node.
Edge values are returned in proper plot order to be entered to the
edge_colors or edge_widths arguments to draw(). To see node idx values
use node_labels=True in draw(). If dictionary keys are integers it is
assumed they are node idxs.
Note: it is safer to use tip labels to identify clades than node idxs
since tree tranformations (e.g., rooting) can change the mapping of
idx values to nodes on the tree.
This function is most convenient for applying values to clades. To
instead map values to specific edges (e.g., a single internal edge)
it will be easier to use tre.get_edge_values() and then to set the
values of the internal edges manually.
Example 1:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_from_dict({5: 'green', 6: 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
Example 2:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_from_dict({(a, b): 'green', (c, d): 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
"""
# map node idxs to the order in which edges are plotted
idxs = {j: i for (i, j) in enumerate(self.get_edge_values())}
values = [None] * self._coords.edges.shape[0]
if node_value_dict is None:
return values
# convert tipname lists to node idxs
rmap = {}
for (key, val) in node_value_dict.items():
if isinstance(key, (str, tuple)):
node = fuzzy_match_tipnames(self, key, None, None, True, False)
rmap[node.idx] = val
else:
rmap[key] = val
node_value_dict = rmap
# map over tree
for node in self.treenode.traverse("levelorder"):
if node.idx in node_value_dict:
# add value to stem edge
if include_stem:
if not node.is_root():
values[idxs[node.idx]] = node_value_dict[node.idx]
# add value to descendants edges
for desc in node.get_descendants():
values[idxs[desc.idx]] = node_value_dict[node.idx]
return values
|
Enter a dictionary mapping node 'idx' or tuple of tipnames to values
that you want mapped to the stem and descendant edges that node.
Edge values are returned in proper plot order to be entered to the
edge_colors or edge_widths arguments to draw(). To see node idx values
use node_labels=True in draw(). If dictionary keys are integers it is
assumed they are node idxs.
Note: it is safer to use tip labels to identify clades than node idxs
since tree tranformations (e.g., rooting) can change the mapping of
idx values to nodes on the tree.
This function is most convenient for applying values to clades. To
instead map values to specific edges (e.g., a single internal edge)
it will be easier to use tre.get_edge_values() and then to set the
values of the internal edges manually.
Example 1:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_from_dict({5: 'green', 6: 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
Example 2:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_from_dict({(a, b): 'green', (c, d): 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
|
entailment
|
def get_mrca_idx_from_tip_labels(self, names=None, wildcard=None, regex=None):
"""
Returns the node idx label of the most recent common ancestor node
for the clade that includes the selected tips. Arguments can use fuzzy
name matching: a list of tip names, wildcard selector, or regex string.
"""
if not any([names, wildcard, regex]):
raise ToytreeError("at least one argument required")
node = fuzzy_match_tipnames(
self, names, wildcard, regex, True, False)
return node.idx
|
Returns the node idx label of the most recent common ancestor node
for the clade that includes the selected tips. Arguments can use fuzzy
name matching: a list of tip names, wildcard selector, or regex string.
|
entailment
|
def get_node_values(
self,
feature=None,
show_root=False,
show_tips=False,
):
"""
Returns node values from tree object in node plot order. To modify
values you must modify the .treenode object directly by setting new
'features'. For example
for node in ttree.treenode.traverse():
node.add_feature("PP", 100)
By default node and tip values are hidden (set to "") so that they
are not shown on the tree plot. To include values for these nodes
use the 'show_root'=True, or 'show_tips'=True arguments.
tree.get_node_values("support", True, True)
"""
# access nodes in the order they will be plotted
ndict = self.get_node_dict(return_internal=True, return_nodes=True)
nodes = [ndict[i] for i in range(self.nnodes)[::-1]]
# get features
if feature:
vals = [i.__getattribute__(feature) if hasattr(i, feature)
else "" for i in nodes]
else:
vals = [" " for i in nodes]
# apply hiding rules
if not show_root:
vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)]
if not show_tips:
vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)]
# convert float to ints for prettier printing unless all floats
# raise exception and skip if there are true strings (names)
try:
if all([Decimal(str(i)) % 1 == 0 for i in vals if i]):
vals = [int(i) if isinstance(i, float) else i for i in vals]
except Exception:
pass
return vals
|
Returns node values from tree object in node plot order. To modify
values you must modify the .treenode object directly by setting new
'features'. For example
for node in ttree.treenode.traverse():
node.add_feature("PP", 100)
By default node and tip values are hidden (set to "") so that they
are not shown on the tree plot. To include values for these nodes
use the 'show_root'=True, or 'show_tips'=True arguments.
tree.get_node_values("support", True, True)
|
entailment
|
def get_node_dict(self, return_internal=False, return_nodes=False):
"""
Return node labels as a dictionary mapping {idx: name} where idx is
the order of nodes in 'preorder' traversal. Used internally by the
func .get_node_values() to return values in proper order.
return_internal: if True all nodes are returned, if False only tips.
return_nodes: if True returns TreeNodes, if False return node names.
"""
if return_internal:
if return_nodes:
return {
i.idx: i for i in self.treenode.traverse("preorder")
}
else:
return {
i.idx: i.name for i in self.treenode.traverse("preorder")
}
else:
if return_nodes:
return {
i.idx: i for i in self.treenode.traverse("preorder")
if i.is_leaf()
}
else:
return {
i.idx: i.name for i in self.treenode.traverse("preorder")
if i.is_leaf()
}
|
Return node labels as a dictionary mapping {idx: name} where idx is
the order of nodes in 'preorder' traversal. Used internally by the
func .get_node_values() to return values in proper order.
return_internal: if True all nodes are returned, if False only tips.
return_nodes: if True returns TreeNodes, if False return node names.
|
entailment
|
def get_tip_coordinates(self, axis=None):
"""
Returns coordinates of the tip positions for a tree. If no argument
for axis then a 2-d array is returned. The first column is the x
coordinates the second column is the y-coordinates. If you enter an
argument for axis then a 1-d array will be returned of just that axis.
"""
# get coordinates array
coords = self.get_node_coordinates()
if axis == 'x':
return coords[:self.ntips, 0]
elif axis == 'y':
return coords[:self.ntips, 1]
return coords[:self.ntips]
|
Returns coordinates of the tip positions for a tree. If no argument
for axis then a 2-d array is returned. The first column is the x
coordinates the second column is the y-coordinates. If you enter an
argument for axis then a 1-d array will be returned of just that axis.
|
entailment
|
def get_tip_labels(self, idx=None):
"""
Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
Example:
# select a clade of the tree and use it for rooting.
tiplist = tre.get_descenants_from_idx(21)
tre.root(names=tiplist)
"""
if not idx:
return self.treenode.get_leaf_names()[::-1]
else:
treenode = self.treenode.search_nodes(idx=idx)[0]
return treenode.get_leaf_names()[::-1]
|
Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
Example:
# select a clade of the tree and use it for rooting.
tiplist = tre.get_descenants_from_idx(21)
tre.root(names=tiplist)
|
entailment
|
def is_bifurcating(self, include_root=True):
"""
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
"""
ctn1 = -1 + (2 * len(self))
ctn2 = -2 + (2 * len(self))
if self.is_rooted():
return bool(ctn1 == sum(1 for i in self.treenode.traverse()))
if include_root:
return bool(ctn2 == -1 + sum(1 for i in self.treenode.traverse()))
return bool(ctn2 == sum(1 for i in self.treenode.traverse()))
|
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
|
entailment
|
def ladderize(self, direction=0):
"""
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
"""
nself = deepcopy(self)
nself.treenode.ladderize(direction=direction)
nself._fixed_order = None
nself._coords.update()
return nself
|
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
|
entailment
|
def collapse_nodes(self, min_dist=1e-6, min_support=0):
"""
Returns a copy of the tree where internal nodes with dist <= min_dist
are deleted, resulting in a collapsed tree. e.g.:
newtre = tre.collapse_nodes(min_dist=0.001)
newtre = tre.collapse_nodes(min_support=50)
"""
nself = self.copy()
for node in nself.treenode.traverse():
if not node.is_leaf():
if (node.dist <= min_dist) | (node.support < min_support):
node.delete()
nself._coords.update()
return nself
|
Returns a copy of the tree where internal nodes with dist <= min_dist
are deleted, resulting in a collapsed tree. e.g.:
newtre = tre.collapse_nodes(min_dist=0.001)
newtre = tre.collapse_nodes(min_support=50)
|
entailment
|
def drop_tips(self, names=None, wildcard=None, regex=None):
"""
Returns a copy of the tree with the selected tips removed. The entered
value can be a name or list of names. To prune on an internal node to
create a subtree see the .prune() function instead.
Parameters:
tips: list of tip names.
# example:
ptre = tre.drop_tips(['a', 'b'])
"""
# make a deepcopy of the tree
nself = self.copy()
# return if nothing to drop
if not any([names, wildcard, regex]):
return nself
# get matching names list with fuzzy match
tipnames = fuzzy_match_tipnames(
ttree=nself,
names=names,
wildcard=wildcard,
regex=regex,
mrca=False,
mono=False,
)
if len(tipnames) == len(nself):
raise ToytreeError("You cannot drop all tips from the tree.")
if not tipnames:
raise ToytreeError("No tips selected.")
keeptips = [i for i in nself.get_tip_labels() if i not in tipnames]
nself.treenode.prune(keeptips, preserve_branch_length=True)
nself._coords.update()
return nself
|
Returns a copy of the tree with the selected tips removed. The entered
value can be a name or list of names. To prune on an internal node to
create a subtree see the .prune() function instead.
Parameters:
tips: list of tip names.
# example:
ptre = tre.drop_tips(['a', 'b'])
|
entailment
|
def rotate_node(
self,
names=None,
wildcard=None,
regex=None,
idx=None,
# modify_tree=False,
):
"""
Returns a ToyTree with the selected node rotated for plotting.
tip colors do not align correct currently if nodes are rotated...
"""
# make a copy
revd = {j: i for (i, j) in enumerate(self.get_tip_labels())}
neworder = {}
# get node to rotate
treenode = fuzzy_match_tipnames(
self, names, wildcard, regex, True, True)
children = treenode.up.children
names = [[j.name for j in i.get_leaves()] for i in children]
nidxs = [[revd[i] for i in j] for j in names]
# get size of the big clade
move = max((len(i) for i in nidxs))
if len(nidxs[0]) > len(nidxs[1]):
move = min((len(i) for i in nidxs))
# newdict
cnames = list(itertools.chain(*names))
tdict = {i: None for i in cnames}
cycle = itertools.cycle(itertools.chain(*nidxs))
for m in range(move):
next(cycle)
for t in cnames:
tdict[t] = next(cycle)
for key in revd:
if key in tdict:
neworder[key] = tdict[key]
else:
neworder[key] = revd[key]
revd = {j: i for (i, j) in neworder.items()}
neworder = [revd[i] for i in range(self.ntips)]
# returns a new tree (i.e., copy) modified w/ a fixed order
nself = ToyTree(self.newick, fixed_order=neworder)
nself._coords.update()
return nself
|
Returns a ToyTree with the selected node rotated for plotting.
tip colors do not align correct currently if nodes are rotated...
|
entailment
|
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself
|
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
|
entailment
|
def unroot(self):
"""
Returns a copy of the tree unrooted. Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.unroot()
nself.treenode.ladderize()
nself._coords.update()
return nself
|
Returns a copy of the tree unrooted. Does not transform tree in-place.
|
entailment
|
def root(self, names=None, wildcard=None, regex=None):
"""
(Re-)root a tree by creating selecting a existing split in the tree,
or creating a new node to split an edge in the tree. Rooting location
is selected by entering the tips descendant from one child of the root
split (e.g., names='a' or names=['a', 'b']). You can alternatively
select a list of tip names using a fuzzy selector based on a unique
shared string (wildcard="prz") or a regex matching pattern.
Example:
To root on a clade that includes the samples "1-A" and "1-B" you can
do any of the following:
rtre = tre.root(outgroup=["1-A", "1-B"])
rtre = tre.root(wildcard="1-")
rtre = tre.root(regex="1-[A,B]")
"""
# make a deepcopy of the tree
nself = self.copy()
# get treenode of the common ancestor of selected tips
try:
node = fuzzy_match_tipnames(
nself, names, wildcard, regex, True, True)
except ToytreeError:
# try reciprocal taxon list
tipnames = fuzzy_match_tipnames(
nself, names, wildcard, regex, False, False)
tipnames = list(set(self.get_tip_labels()) - set(node))
node = fuzzy_match_tipnames(
nself, tipnames, None, None, True, True)
# split root node if more than di- as this is the unrooted state
if not nself.is_bifurcating():
nself.treenode.resolve_polytomy()
# root the object with ete's translate
nself.treenode.set_outgroup(node)
nself._coords.update()
# get features
testnode = nself.treenode.get_leaves()[0]
features = {"name", "dist", "support", "height"}
extrafeat = {i for i in testnode.features if i not in features}
features.update(extrafeat)
# if there is a new node now, clean up its features
nnode = [i for i in nself.treenode.traverse() if not hasattr(i, "idx")]
if nnode:
# nnode is the node that was added
# rnode is the location where it *should* have been added
nnode = nnode[0]
rnode = [i for i in nself.treenode.children if i != node][0]
# get idxs of existing nodes
idxs = [int(i.idx) for i in nself.treenode.traverse()
if hasattr(i, "idx")]
# newnode is a tip
if len(node.is_leaf()) == 1:
nnode.name = str("rerooted")
rnode.name = node
rnode.add_feature("idx", max(idxs) + 1)
rnode.dist *= 2
sister = rnode.get_sisters()[0]
sister.dist *= 2
rnode.support = 100
for feature in extrafeat:
nnode.add_feature(feature, getattr(rnode, feature))
rnode.del_feature(feature)
# newnode is internal
else:
nnode.add_feature("idx", max(idxs) + 1)
nnode.name = str("rerooted")
nnode.dist *= 2
sister = nnode.get_sisters()[0]
sister.dist *= 2
nnode.support = 100
# store tree back into newick and reinit Toytree with new newick
# if NHX format then preserve the NHX features.
nself.treenode.ladderize()
nself._coords.update()
return nself
|
(Re-)root a tree by creating selecting a existing split in the tree,
or creating a new node to split an edge in the tree. Rooting location
is selected by entering the tips descendant from one child of the root
split (e.g., names='a' or names=['a', 'b']). You can alternatively
select a list of tip names using a fuzzy selector based on a unique
shared string (wildcard="prz") or a regex matching pattern.
Example:
To root on a clade that includes the samples "1-A" and "1-B" you can
do any of the following:
rtre = tre.root(outgroup=["1-A", "1-B"])
rtre = tre.root(wildcard="1-")
rtre = tre.root(regex="1-[A,B]")
|
entailment
|
def draw(
self,
tree_style=None,
height=None,
width=None,
axes=None,
orient=None,
tip_labels=None,
tip_labels_colors=None,
tip_labels_style=None,
tip_labels_align=None,
node_labels=None,
node_labels_style=None,
node_sizes=None,
node_colors=None,
node_style=None,
node_hover=None,
node_markers=None,
edge_colors=None,
edge_widths=None,
edge_type=None,
edge_style=None,
edge_align_style=None,
use_edge_lengths=None,
scalebar=None,
padding=None,
xbaseline=0,
ybaseline=0,
**kwargs):
"""
Plot a Toytree tree, returns a tuple of Toyplot (Canvas, Axes) objects.
Parameters:
-----------
tree_style: str
One of several preset styles for tree plotting. The default is 'n'
(normal). Other options inlude 'c' (coalescent), 'd' (dark), and
'm' (multitree). You also create your own TreeStyle objects.
The tree_style sets a default set of styling on top of which other
arguments passed to draw() will override when plotting.
height: int (optional; default=None)
If None the plot height is autosized. If 'axes' arg is used then
tree is drawn on an existing Canvas, Axes and this arg is ignored.
width: int (optional; default=None)
Similar to height (above).
axes: Toyplot.Cartesian (default=None)
A toyplot cartesian axes object. If provided tree is drawn on it.
If not provided then a new Canvas and Cartesian axes are created
and returned with the tree plot added to it.
use_edge_lengths: bool (default=False)
Use edge lengths from .treenode (.get_edge_lengths) else
edges are set to length >=1 to make tree ultrametric.
tip_labels: [True, False, list]
If True then the tip labels from .treenode are added to the plot.
If False no tip labels are added. If a list of tip labels
is provided it must be the same length as .get_tip_labels().
tip_labels_colors:
...
tip_labels_style:
...
tip_labels_align:
...
node_labels: [True, False, list]
If True then nodes are shown, if False then nodes are suppressed
If a list of node labels is provided it must be the same length
and order as nodes in .get_node_values(). Node labels can be
generated in the proper order using the the .get_node_labels()
function from a Toytree tree to draw info from the tree features.
For example: node_labels=tree.get_node_labels("support").
node_sizes: [int, list, None]
If None then nodes are not shown, otherwise, if node_labels
then node_size can be modified. If a list of node sizes is
provided it must be the same length and order as nodes in
.get_node_dict().
node_colors: [list]
Use this argument only if you wish to set different colors for
different nodes, in which case you must enter a list of colors
as string names or HEX values the length and order of nodes in
.get_node_dict(). If all nodes will be the same color then use
instead the node_style dictionary:
e.g., node_style={"fill": 'red'}
node_style: [dict]
...
node_hover: [True, False, list, dict]
Default is True in which case node hover will show the node
values. If False then no hover is shown. If a list or dict
is provided (which should be in node order) then the values
will be shown in order. If a dict then labels can be provided
as well.
"""
# allow ts as a shorthand for tree_style
if kwargs.get("ts"):
tree_style = kwargs.get("ts")
# pass a copy of this tree so that any mods to .style are not saved
nself = deepcopy(self)
if tree_style:
nself.style.update(TreeStyle(tree_style[0]))
# update kwargs to merge it with user-entered arguments:
userargs = {
"height": height,
"width": width,
"orient": orient,
"tip_labels": tip_labels,
"tip_labels_colors": tip_labels_colors,
"tip_labels_align": tip_labels_align,
"tip_labels_style": tip_labels_style,
"node_labels": node_labels,
"node_labels_style": node_labels_style,
"node_sizes": node_sizes,
"node_colors": node_colors,
"node_hover": node_hover,
"node_style": node_style,
"node_markers": node_markers,
"edge_type": edge_type,
"edge_colors": edge_colors,
"edge_widths": edge_widths,
"edge_style": edge_style,
"edge_align_style": edge_align_style,
"use_edge_lengths": use_edge_lengths,
"scalebar": scalebar,
"padding": padding,
"xbaseline": xbaseline,
"ybaseline": ybaseline,
}
kwargs.update(userargs)
censored = {i: j for (i, j) in kwargs.items() if j is not None}
nself.style.update(censored)
# warn user if they entered kwargs that weren't recognized:
unrecognized = [i for i in kwargs if i not in userargs]
if unrecognized:
print("unrecognized arguments skipped: {}".format(unrecognized))
print("check the docs, argument names may have changed.")
# Init Drawing class object.
draw = Drawing(nself)
# Debug returns the object to test with.
if kwargs.get("debug"):
return draw
# Make plot. If user provided explicit axes then include them.
canvas, axes = draw.update(axes=axes)
return canvas, axes
|
Plot a Toytree tree, returns a tuple of Toyplot (Canvas, Axes) objects.
Parameters:
-----------
tree_style: str
One of several preset styles for tree plotting. The default is 'n'
(normal). Other options inlude 'c' (coalescent), 'd' (dark), and
'm' (multitree). You also create your own TreeStyle objects.
The tree_style sets a default set of styling on top of which other
arguments passed to draw() will override when plotting.
height: int (optional; default=None)
If None the plot height is autosized. If 'axes' arg is used then
tree is drawn on an existing Canvas, Axes and this arg is ignored.
width: int (optional; default=None)
Similar to height (above).
axes: Toyplot.Cartesian (default=None)
A toyplot cartesian axes object. If provided tree is drawn on it.
If not provided then a new Canvas and Cartesian axes are created
and returned with the tree plot added to it.
use_edge_lengths: bool (default=False)
Use edge lengths from .treenode (.get_edge_lengths) else
edges are set to length >=1 to make tree ultrametric.
tip_labels: [True, False, list]
If True then the tip labels from .treenode are added to the plot.
If False no tip labels are added. If a list of tip labels
is provided it must be the same length as .get_tip_labels().
tip_labels_colors:
...
tip_labels_style:
...
tip_labels_align:
...
node_labels: [True, False, list]
If True then nodes are shown, if False then nodes are suppressed
If a list of node labels is provided it must be the same length
and order as nodes in .get_node_values(). Node labels can be
generated in the proper order using the the .get_node_labels()
function from a Toytree tree to draw info from the tree features.
For example: node_labels=tree.get_node_labels("support").
node_sizes: [int, list, None]
If None then nodes are not shown, otherwise, if node_labels
then node_size can be modified. If a list of node sizes is
provided it must be the same length and order as nodes in
.get_node_dict().
node_colors: [list]
Use this argument only if you wish to set different colors for
different nodes, in which case you must enter a list of colors
as string names or HEX values the length and order of nodes in
.get_node_dict(). If all nodes will be the same color then use
instead the node_style dictionary:
e.g., node_style={"fill": 'red'}
node_style: [dict]
...
node_hover: [True, False, list, dict]
Default is True in which case node hover will show the node
values. If False then no hover is shown. If a list or dict
is provided (which should be in node order) then the values
will be shown in order. If a dict then labels can be provided
as well.
|
entailment
|
def get_merge_direction(cell1, cell2):
"""
Determine the side of cell1 that can be merged with cell2.
This is based on the location of the two cells in the table as well
as the compatability of their height and width.
For example these cells can merge::
cell1 cell2 merge "RIGHT"
+-----+ +------+ +-----+------+
| foo | | dog | | foo | dog |
| | +------+ | +------+
| | | cat | | | cat |
| | +------+ | +------+
| | | bird | | | bird |
+-----+ +------+ +-----+------+
But these cells cannot merge::
+-----+ +------+
| foo | | dog |
| | +------+
| | | cat |
| | +------+
| |
+-----+
Parameters
----------
cell1 : dashtable.data2rst.Cell
cell2 : dashtable.data2rst.Cell
Returns
-------
str
The side onto which cell2 can be merged. Will be one of
["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"]
"""
cell1_left = cell1.column
cell1_right = cell1.column + cell1.column_count
cell1_top = cell1.row
cell1_bottom = cell1.row + cell1.row_count
cell2_left = cell2.column
cell2_right = cell2.column + cell2.column_count
cell2_top = cell2.row
cell2_bottom = cell2.row + cell2.row_count
if (cell1_right == cell2_left and cell1_top == cell2_top and
cell1_bottom == cell2_bottom and
cell1.right_sections >= cell2.left_sections):
return "RIGHT"
elif (cell1_left == cell2_left and cell1_right == cell2_right and
cell1_top == cell2_bottom and
cell1.top_sections >= cell2.bottom_sections):
return "TOP"
elif (cell1_left == cell2_left and
cell1_right == cell2_right and
cell1_bottom == cell2_top and
cell1.bottom_sections >= cell2.top_sections):
return "BOTTOM"
elif (cell1_left == cell2_right and
cell1_top == cell2_top and
cell1_bottom == cell2_bottom and
cell1.left_sections >= cell2.right_sections):
return "LEFT"
else:
return "NONE"
|
Determine the side of cell1 that can be merged with cell2.
This is based on the location of the two cells in the table as well
as the compatability of their height and width.
For example these cells can merge::
cell1 cell2 merge "RIGHT"
+-----+ +------+ +-----+------+
| foo | | dog | | foo | dog |
| | +------+ | +------+
| | | cat | | | cat |
| | +------+ | +------+
| | | bird | | | bird |
+-----+ +------+ +-----+------+
But these cells cannot merge::
+-----+ +------+
| foo | | dog |
| | +------+
| | | cat |
| | +------+
| |
+-----+
Parameters
----------
cell1 : dashtable.data2rst.Cell
cell2 : dashtable.data2rst.Cell
Returns
-------
str
The side onto which cell2 can be merged. Will be one of
["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"]
|
entailment
|
def parse_nhx(NHX_string):
"""
NHX format: [&&NHX:prop1=value1:prop2=value2]
MB format: ((a[&Z=1,Y=2], b[&Z=1,Y=2]):1.0[&L=1,W=0], ...
"""
# store features
ndict = {}
# parse NHX or MB features
if "[&&NHX:" in NHX_string:
NHX_string = NHX_string.replace("[&&NHX:", "")
NHX_string = NHX_string.replace("]", "")
for field in NHX_string.split(":"):
try:
pname, pvalue = field.split("=")
ndict[pname] = pvalue
except ValueError as e:
raise NewickError('Invalid NHX format %s' % field)
return ndict
|
NHX format: [&&NHX:prop1=value1:prop2=value2]
MB format: ((a[&Z=1,Y=2], b[&Z=1,Y=2]):1.0[&L=1,W=0], ...
|
entailment
|
def get_data_from_intree(self):
"""
Load *data* from a file or string and return as a list of strings.
The data contents could be one newick string; a multiline NEXUS format
for one tree; multiple newick strings on multiple lines; or multiple
newick strings in a multiline NEXUS format. In any case, we will read
in the data as a list on lines.
"""
# load string: filename or data stream
if isinstance(self.intree, (str, bytes)):
# strip it
self.intree = self.intree.strip()
# is a URL: make a list by splitting a string
if any([i in self.intree for i in ("http://", "https://")]):
response = requests.get(self.intree)
response.raise_for_status()
self.data = response.text.strip().split("\n")
# is a file: read by lines to a list
elif os.path.exists(self.intree):
with open(self.intree, 'rU') as indata:
self.data = indata.readlines()
# is a string: make into a list by splitting
else:
self.data = self.intree.split("\n")
# load iterable: iterable of newick strings
elif isinstance(self.intree, (list, set, tuple)):
self.data = list(self.intree)
|
Load *data* from a file or string and return as a list of strings.
The data contents could be one newick string; a multiline NEXUS format
for one tree; multiple newick strings on multiple lines; or multiple
newick strings in a multiline NEXUS format. In any case, we will read
in the data as a list on lines.
|
entailment
|
def parse_nexus(self):
"get newick data from NEXUS"
if self.data[0].strip().upper() == "#NEXUS":
nex = NexusParser(self.data)
self.data = nex.newicks
self.tdict = nex.tdict
|
get newick data from NEXUS
|
entailment
|
def get_treenodes(self):
"test format of intree nex/nwk, extra features"
if not self.multitree:
# get TreeNodes from Newick
extractor = Newick2TreeNode(self.data[0].strip(), fmt=self.fmt)
# extract one tree
self.treenodes.append(extractor.newick_from_string())
else:
for tre in self.data:
# get TreeNodes from Newick
extractor = Newick2TreeNode(tre.strip(), fmt=self.fmt)
# extract one tree
self.treenodes.append(extractor.newick_from_string())
|
test format of intree nex/nwk, extra features
|
entailment
|
def newick_from_string(self):
"Reads a newick string in the New Hampshire format."
# split on parentheses to traverse hierarchical tree structure
for chunk in self.data.split("(")[1:]:
# add child to make this node a parent.
self.current_parent = (
self.root if self.current_parent is None else
self.current_parent.add_child()
)
# get all parenth endings from this parenth start
subchunks = [ch.strip() for ch in chunk.split(",")]
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError(
'Broken newick structure at: {}'.format(chunk))
# Every closing parenthesis will close a node and go up one level.
for idx, leaf in enumerate(subchunks):
if leaf.strip() == '' and idx == len(subchunks) - 1:
continue
closing_nodes = leaf.split(")")
# parse features and apply to the node object
self.apply_node_data(closing_nodes[0], "leaf")
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes) > 1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
self.apply_node_data(closing_internal, "internal")
self.current_parent = self.current_parent.up
return self.root
|
Reads a newick string in the New Hampshire format.
|
entailment
|
def extract_tree_block(self):
"iterate through data file to extract trees"
lines = iter(self.data)
while 1:
try:
line = next(lines).strip()
except StopIteration:
break
# enter trees block
if line.lower() == "begin trees;":
while 1:
# iter through trees block
sub = next(lines).strip().split()
# skip if a blank line
if not sub:
continue
# look for translation
if sub[0].lower() == "translate":
while sub[0] != ";":
sub = next(lines).strip().split()
self.tdict[sub[0]] = sub[-1].strip(",")
# parse tree blocks
if sub[0].lower().startswith("tree"):
self.newicks.append(sub[-1])
# end of trees block
if sub[0].lower() == "end;":
break
|
iterate through data file to extract trees
|
entailment
|
def parse_command_line():
""" Parse CLI args."""
## create the parser
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
* Example command-line usage:
## push test branch to conda --label=conda-test for travis CI
./versioner.py -p toytree -b test -t 0.1.7
## push master as a new tag to git and conda
./versioner.py -p toytree -b master -t 0.1.7 --deploy
## build other deps on conda at --label=conda-test
./versioner.py -p toyplot --no-git
./versioner.py -p pypng --no-git
""")
## add arguments
parser.add_argument('-v', '--version', action='version',
version="0.1")
parser.add_argument('-p', #"--package",
dest="package",
default="toytree",
type=str,
help="the tag to put in __init__ and use on conda")
parser.add_argument('-b', #"--branch",
dest="branch",
default="master",
type=str,
help="the branch to build conda package from")
parser.add_argument('-t', #"--tag",
dest="tag",
default="test",
type=str,
help="the tag to put in __init__ and use on conda")
parser.add_argument("--deploy",
dest="deploy",
action='store_true',
help="push the tag to git and upload to conda main label")
parser.add_argument("--no-git",
dest="nogit",
action='store_true',
help="skip git update and only build/upload to conda")
## if no args then return help message
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
## parse args
args = parser.parse_args()
return args
|
Parse CLI args.
|
entailment
|
def get_git_status(self):
"""
Gets git and init versions and commits since the init version
"""
## get git branch
self._get_git_branch()
## get tag in the init file
self._get_init_release_tag()
## get log commits since <tag>
try:
self._get_log_commits()
except Exception as inst:
raise Exception(
"""
Error: the version in __init__.py is {}, so 'git log' is
looking for commits that have happened since that version, but
it appears there is not existing tag for that version. You may
need to roll back the version in __init__.py to what is actually
commited. Check with `git tag`.
--------
{}
""".format(self.init_version, inst))
## where are we at?
print("__init__.__version__ == '{}':".format(self.init_version))
print("'{}' is {} commits ahead of origin/{}"
.format(self.tag, len(self.commits), self.init_version))
|
Gets git and init versions and commits since the init version
|
entailment
|
def push_git_package(self):
"""
if no conflicts then write new tag to
"""
## check for conflicts, then write to local files
self._pull_branch_from_origin()
## log commits to releasenotes
if self.deploy:
self._write_commits_to_release_notes()
## writes tag or 'devel' to
try:
self._write_new_tag_to_init()
self._write_branch_and_tag_to_meta_yaml()
self._push_new_tag_to_git()
except Exception as inst:
print("\n Error:\n", inst)
self._revert_tag_in_init()
sys.exit(2)
|
if no conflicts then write new tag to
|
entailment
|
def _pull_branch_from_origin(self):
"""
Pulls from origin/master, if you have unmerged conflicts
it will raise an exception. You will need to resolve these.
"""
try:
## self.repo.git.pull()
subprocess.check_call(["git", "pull", "origin", self.branch])
except Exception as inst:
sys.exit("""
Your HEAD commit conflicts with origin/{tag}.
Resolve, merge, and rerun versioner.py
""")
|
Pulls from origin/master, if you have unmerged conflicts
it will raise an exception. You will need to resolve these.
|
entailment
|
def _get_init_release_tag(self):
"""
parses init.py to get previous version
"""
self.init_version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
open(self.init_file, "r").read(),
re.M).group(1)
|
parses init.py to get previous version
|
entailment
|
def _get_log_commits(self):
"""
calls git log to complile a change list
"""
## check if update is necessary
cmd = "git log --pretty=oneline {}..".format(self.init_version)
cmdlist = shlex.split(cmd)
commits = subprocess.check_output(cmdlist)
## Split off just the first element, we don't need commit tag
self.commits = [x.split(" ", 1) for x in commits.split("\n")]
|
calls git log to complile a change list
|
entailment
|
def _write_commits_to_release_notes(self):
"""
writes commits to the releasenotes file by appending to the end
"""
with open(self.release_file, 'a') as out:
out.write("==========\n{}\n".format(self.tag))
for commit in self.commits:
try:
msg = commit[1]
if msg != "cosmetic":
out.write("-" + msg + "\n")
except:
pass
|
writes commits to the releasenotes file by appending to the end
|
entailment
|
def _write_new_tag_to_init(self):
"""
Write version to __init__.py by editing in place
"""
for line in fileinput.input(self.init_file, inplace=1):
if line.strip().startswith("__version__"):
line = "__version__ = \"" + self.tag + "\""
print(line.strip("\n"))
|
Write version to __init__.py by editing in place
|
entailment
|
def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
"""
## set the branch to pull source from
with open(self.meta_yaml.replace("meta", "template"), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch})
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat)
|
Write branch and tag to meta.yaml by editing in place
|
entailment
|
def _revert_tag_in_init(self):
"""
Write version to __init__.py by editing in place
"""
for line in fileinput.input(self.init_file, inplace=1):
if line.strip().startswith("__version__"):
line = "__version__ = \"" + self.init_version + "\""
print(line.strip("\n"))
print("reverted __init__.__version__ back to {}"
.format(self.init_version))
|
Write version to __init__.py by editing in place
|
entailment
|
def _push_new_tag_to_git(self):
"""
tags a new release and pushes to origin/master
"""
print("Pushing new version to git")
## stage the releasefile and initfileb
subprocess.call(["git", "add", self.release_file])
subprocess.call(["git", "add", self.init_file])
subprocess.call([
"git", "commit", "-m", "Updating {}/__init__.py to version {}"\
.format(self.package, self.tag)])
## push changes to origin <tracked branch>
subprocess.call(["git", "push", "origin", self.branch])
## create a new tag for the version number on deploy
if self.deploy:
subprocess.call([
"git", "tag", "-a", self.tag,
"-m", "Updating version to {}".format(self.tag),
])
subprocess.call(["git", "push", "origin"])
|
tags a new release and pushes to origin/master
|
entailment
|
def build_conda_packages(self):
"""
Run the Linux build and use converter to build OSX
"""
## check if update is necessary
#if self.nversion == self.pversion:
# raise SystemExit("Exited: new version == existing version")
## tmp dir
bldir = "./tmp-bld"
if not os.path.exists(bldir):
os.makedirs(bldir)
## iterate over builds
for pybuild in ["2.7", "3"]:
## build and upload Linux to anaconda.org
build = api.build(
"conda-recipe/{}".format(self.package),
python=pybuild)
## upload Linux build
if not self.deploy:
cmd = ["anaconda", "upload", build[0], "--label", "test", "--force"]
else:
cmd = ["anaconda", "upload", build[0]]
err = subprocess.Popen(cmd).communicate()
## build OSX copies
api.convert(build[0], output_dir=bldir, platforms=["osx-64"])
osxdir = os.path.join(bldir, "osx-64", os.path.basename(build[0]))
if not self.deploy:
cmd = ["anaconda", "upload", osxdir, "--label", "test", "--force"]
else:
cmd = ["anaconda", "upload", osxdir]
err = subprocess.Popen(cmd).communicate()
## cleanup tmpdir
shutil.rmtree(bldir)
|
Run the Linux build and use converter to build OSX
|
entailment
|
def get_span_row_count(span):
"""
Gets the number of rows included in a span
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
rows : int
The number of rows included in the span
Example
-------
Consider this table::
+--------+-----+
| foo | bar |
+--------+ |
| spam | |
+--------+ |
| goblet | |
+--------+-----+
::
>>> span = [[0, 1], [1, 1], [2, 1]]
>>> print(get_span_row_count(span))
3
"""
rows = 1
first_row = span[0][0]
for i in range(len(span)):
if span[i][0] > first_row:
rows += 1
first_row = span[i][0]
return rows
|
Gets the number of rows included in a span
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
rows : int
The number of rows included in the span
Example
-------
Consider this table::
+--------+-----+
| foo | bar |
+--------+ |
| spam | |
+--------+ |
| goblet | |
+--------+-----+
::
>>> span = [[0, 1], [1, 1], [2, 1]]
>>> print(get_span_row_count(span))
3
|
entailment
|
def asarray(x, dtype=None):
'''Convert ``x`` into a ``numpy.ndarray``.'''
iterable = scalarasiter(x)
if isinstance(iterable, ndarray):
return iterable
else:
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if dtype == object_type:
a = ndarray((len(iterable),), dtype=dtype)
for i,v in enumerate(iterable):
a[i] = v
return a
else:
return array(iterable, dtype=dtype)
|
Convert ``x`` into a ``numpy.ndarray``.
|
entailment
|
def ascolumn(x, dtype = None):
'''Convert ``x`` into a ``column``-type ``numpy.ndarray``.'''
x = asarray(x, dtype)
return x if len(x.shape) >= 2 else x.reshape(len(x),1)
|
Convert ``x`` into a ``column``-type ``numpy.ndarray``.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.