sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def run(self):
"""
Tornado worker which handles HTTP requests.
"""
setproctitle.setproctitle("{:s}: worker {:s}".format(
self.context.config.name,
self._tornado_app.settings['interface'].name))
self.logger.info(
"Worker '%s' has been started with pid %d",
self._tornado_app.settings['interface'].name, os.getpid())
# Configure logging
self.context.config.configure_logging()
# Create HTTP server instance
self.http_server = tornado.httpserver.HTTPServer(self._tornado_app)
# Initialize child
self.context.initialize_child(TORNADO_WORKER, process=self)
# Register SIGINT handler which will stop worker
def sigint_handler(unused_signum, unused_frame):
"""
Call :meth:`stop` method when SIGINT is reached.
"""
io_loop = tornado.ioloop.IOLoop.instance()
io_loop.add_callback_from_signal(self.stop)
signal.signal(signal.SIGINT, sigint_handler)
# Register callback which is called when IOLoop is started
def run_ioloop_callback():
"""
Set ready flag. Callback is called when worker is started.
"""
self._ready.value = True
tornado.ioloop.IOLoop.instance().add_callback(run_ioloop_callback)
# Register job which will stop worker if parent process PID is changed
def check_parent_callback():
"""
Tornado's callback function which checks PID of the parent
process. If PID of the parent process is changed (parent has
stopped), call :meth:`stop` method.
"""
if os.getppid() != self._parent_pid:
self.stop()
stop_callback = tornado.ioloop.PeriodicCallback(
check_parent_callback, 250)
stop_callback.start()
# Run HTTP server
self.http_server.add_sockets(self._sockets)
# Run IOLoop
tornado.ioloop.IOLoop.instance().start() | Tornado worker which handles HTTP requests. | entailment |
def initialize(self):
"""
Initialize instance attributes. You can override this method in
the subclasses.
"""
self.main_pid = os.getpid()
self.processes.extend(self.init_service_processes())
self.processes.extend(self.init_tornado_workers()) | Initialize instance attributes. You can override this method in
the subclasses. | entailment |
def sigusr1_handler(self, unused_signum, unused_frame):
"""
Handle SIGUSR1 signal. Call function which is defined in the
**settings.SIGUSR1_HANDLER**. If main process, forward the
signal to all child processes.
"""
for process in self.processes:
if process.pid and os.getpid() == self.main_pid:
try:
os.kill(process.pid, signal.SIGUSR1)
except ProcessLookupError:
pass
if self._sigusr1_handler_func is not None:
self._sigusr1_handler_func(self.context) | Handle SIGUSR1 signal. Call function which is defined in the
**settings.SIGUSR1_HANDLER**. If main process, forward the
signal to all child processes. | entailment |
def init_service_processes(self):
"""
Prepare processes defined in the **settings.SERVICE_PROCESSES**.
Return :class:`list` of the :class:`ProcessWrapper` instances.
"""
processes = []
for process_struct in getattr(
self.context.config.settings, 'SERVICE_PROCESSES', ()):
process_cls = import_object(process_struct[0])
wait_unless_ready, timeout = process_struct[1], process_struct[2]
self.logger.info("Init service process '%s'", process_cls.__name__)
processes.append(
ProcessWrapper(
process_cls, (self.context,),
wait_unless_ready=wait_unless_ready,
timeout=timeout
)
)
return processes | Prepare processes defined in the **settings.SERVICE_PROCESSES**.
Return :class:`list` of the :class:`ProcessWrapper` instances. | entailment |
def init_tornado_workers(self):
"""
Prepare worker instances for all Tornado applications. Return
:class:`list` of the :class:`ProcessWrapper` instances.
"""
workers = []
for tornado_app in get_tornado_apps(self.context, debug=False):
interface = tornado_app.settings['interface']
if not interface.port and not interface.unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
name, processes, host, port, unix_socket = (
interface.name, interface.processes,
interface.host, interface.port, interface.unix_socket)
if processes <= 0:
processes = tornado.process.cpu_count()
sockets = []
listen_on = []
if port:
sockets.extend(tornado.netutil.bind_sockets(port, host))
listen_on.append("{:s}:{:d}".format(host, port))
if unix_socket:
sockets.append(tornado.netutil.bind_unix_socket(unix_socket))
listen_on.append("{:s}".format(interface.unix_socket))
self.logger.info(
"Init %d worker(s) for interface '%s' (%s)",
processes, name, ", ".join(listen_on))
for dummy_i in six.moves.range(processes):
worker = ProcessWrapper(
TornadoProcess, (tornado_app, sockets),
wait_unless_ready=True, timeout=5.0,
name=name
)
workers.append(worker)
return workers | Prepare worker instances for all Tornado applications. Return
:class:`list` of the :class:`ProcessWrapper` instances. | entailment |
def start_processes(self, max_restarts=-1):
"""
Start processes and check their status. When some process crashes,
start it again. *max_restarts* is maximum amount of the restarts
across all processes. *processes* is a :class:`list` of the
:class:`ProcessWrapper` instances.
"""
while 1:
for process in self.processes:
if not process:
# When process has not been started, start it
if not process.has_started:
process.start()
continue
# When process has stopped, start it again
exitcode = process.exitcode
if exitcode != 0:
# Process has been signaled or crashed
if exitcode > 0:
self.logger.error(
"Process '%s' with pid %d died with exitcode "
"%d", process.name, process.pid, exitcode
)
else:
self.logger.error(
"Process '%s' with pid %d died due to %s",
process.name, process.pid,
SIGNALS_TO_NAMES_DICT[abs(exitcode)]
)
# Max restarts has been reached, exit
if not max_restarts:
self.logger.fatal("Too many child restarts")
break
# Start process again
process.start()
# Decrement max_restarts counter
if max_restarts > 0:
max_restarts -= 1
else:
# Process has stopped without error
self.logger.info(
"Process '%s' with pid %d has stopped",
process.name, process.pid
)
# Start process again
process.start()
self.logger.info(
"Process '%s' has been started with pid %d",
process.name, process.pid
)
else:
time.sleep(0.25)
continue
break | Start processes and check their status. When some process crashes,
start it again. *max_restarts* is maximum amount of the restarts
across all processes. *processes* is a :class:`list` of the
:class:`ProcessWrapper` instances. | entailment |
def command(self):
"""
**runserver** command implementation.
"""
setproctitle.setproctitle(
"{:s}: master process '{:s}'".format(
self.context.config.name, " ".join(sys.argv)
))
# Init and start processes
try:
self.start_processes(max_restarts=100)
except KeyboardInterrupt:
pass
# Stop processes
for process in self.processes:
process.stop() | **runserver** command implementation. | entailment |
def taxtable_to_tree(handle):
"""Read a CSV taxonomy from *handle* into a Tree."""
c = csv.reader(handle, quoting=csv.QUOTE_NONNUMERIC)
header = next(c)
rootdict = dict(list(zip(header, next(c))))
t = Tree(rootdict['tax_id'], rank=rootdict[
'rank'], tax_name=rootdict['tax_name'])
for l in c:
d = dict(list(zip(header, l)))
target = t.descendents[d['parent_id']]
target(Tree(d['tax_id'], rank=d['rank'], tax_name=d['tax_name']))
return t | Read a CSV taxonomy from *handle* into a Tree. | entailment |
def lonely_company(taxonomy, tax_ids):
"""Return a set of species tax_ids which will makes those in *tax_ids* not lonely.
The returned species will probably themselves be lonely.
"""
return [taxonomy.species_below(taxonomy.sibling_of(t)) for t in tax_ids] | Return a set of species tax_ids which will makes those in *tax_ids* not lonely.
The returned species will probably themselves be lonely. | entailment |
def solid_company(taxonomy, tax_ids):
"""Return a set of non-lonely species tax_ids that will make those in *tax_ids* not lonely."""
res = []
for t in tax_ids:
res.extend(taxonomy.nary_subtree(taxonomy.sibling_of(t), 2) or [])
return res | Return a set of non-lonely species tax_ids that will make those in *tax_ids* not lonely. | entailment |
def eval_algorithm(curr, prev):
""" Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV
"""
if curr['close'] > prev['close']:
v = curr['volume']
elif curr['close'] < prev['close']:
v = curr['volume'] * -1
else:
v = 0
return prev['obv'] + v | Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV | entailment |
def eval_from_json(json):
""" Evaluates OBV from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float of OBV
"""
closes = poloniex.get_attribute(json, 'close')
volumes = poloniex.get_attribute(json, 'volume')
obv = 0
for date in range(1, len(json)):
curr = {'close': closes[date], 'volume': volumes[date]}
prev = {'close': closes[date - 1], 'obv': obv}
obv = OBV.eval_algorithm(curr, prev)
return obv | Evaluates OBV from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float of OBV | entailment |
def scratch_file(unlink=True, **kwargs):
"""Create a temporary file and return its name.
Additional arguments are passed to :class:`tempfile.NamedTemporaryFile`
At the start of the with block a secure, temporary file is created
and its name returned. At the end of the with block it is
deleted.
"""
kwargs['delete'] = False
tf = tempfile.NamedTemporaryFile(**kwargs)
tf.close()
try:
yield tf.name
finally:
if unlink:
os.unlink(tf.name) | Create a temporary file and return its name.
Additional arguments are passed to :class:`tempfile.NamedTemporaryFile`
At the start of the with block a secure, temporary file is created
and its name returned. At the end of the with block it is
deleted. | entailment |
def open(self, name, *mode):
"""
Return an open file object for a file in the reference package.
"""
return self.file_factory(self.file_path(name), *mode) | Return an open file object for a file in the reference package. | entailment |
def open_resource(self, resource, *mode):
"""
Return an open file object for a particular named resource in this
reference package.
"""
return self.open(self.resource_name(resource), *mode) | Return an open file object for a particular named resource in this
reference package. | entailment |
def resource_name(self, resource):
"""
Return the name of the file within the reference package for a
particular named resource.
"""
if not(resource in self.contents['files']):
raise ValueError("No such resource %r in refpkg" % (resource,))
return self.contents['files'][resource] | Return the name of the file within the reference package for a
particular named resource. | entailment |
def resource_md5(self, resource):
"""Return the stored MD5 sum for a particular named resource."""
if not(resource in self.contents['md5']):
raise ValueError("No such resource %r in refpkg" % (resource,))
return self.contents['md5'][resource] | Return the stored MD5 sum for a particular named resource. | entailment |
def _set_defaults(self):
"""
Set some default values in the manifest.
This method should be called after loading from disk, but before
checking the integrity of the reference package.
"""
self.contents.setdefault('log', [])
self.contents.setdefault('rollback', None)
self.contents.setdefault('rollforward', None) | Set some default values in the manifest.
This method should be called after loading from disk, but before
checking the integrity of the reference package. | entailment |
def _sync_to_disk(self):
"""Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg.
"""
with self.open_manifest('w') as h:
json.dump(self.contents, h, indent=4)
h.write('\n') | Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg. | entailment |
def _sync_from_disk(self):
"""Read any changes made on disk to this Refpkg.
This is necessary if other programs are making changes to the
Refpkg on disk and your program must be synchronized to them.
"""
try:
fobj = self.open_manifest('r')
except IOError as e:
if e.errno == errno.ENOENT:
raise ValueError(
"couldn't find manifest file in %s" % (self.path,))
elif e.errno == errno.ENOTDIR:
raise ValueError("%s is not a directory" % (self.path,))
else:
raise
with fobj:
self.contents = json.load(fobj)
self._set_defaults()
self._check_refpkg() | Read any changes made on disk to this Refpkg.
This is necessary if other programs are making changes to the
Refpkg on disk and your program must be synchronized to them. | entailment |
def _add_file(self, key, path):
"""Copy a file into the reference package."""
filename = os.path.basename(path)
base, ext = os.path.splitext(filename)
if os.path.exists(self.file_path(filename)):
with tempfile.NamedTemporaryFile(
dir=self.path, prefix=base, suffix=ext) as tf:
filename = os.path.basename(tf.name)
shutil.copyfile(path, self.file_path(filename))
self.contents['files'][key] = filename | Copy a file into the reference package. | entailment |
def is_invalid(self):
"""Check if this RefPkg is invalid.
Valid means that it contains a properly named manifest, and
each of the files described in the manifest exists and has the
proper MD5 hashsum.
If the Refpkg is valid, is_invalid returns False. Otherwise it
returns a nonempty string describing the error.
"""
# Manifest file contains the proper keys
for k in ['metadata', 'files', 'md5']:
if not(k in self.contents):
return "Manifest file missing key %s" % k
if not(isinstance(self.contents[k], dict)):
return "Key %s in manifest did not refer to a dictionary" % k
if not('rollback' in self.contents):
return "Manifest file missing key rollback"
if not(isinstance(self.contents['rollback'], dict)) and self.contents[
"rollback"] is not None:
return ("Key rollback in manifest did not refer to a "
"dictionary or None, found %s") % str(self.contents['rollback'])
if not('rollforward' in self.contents):
return "Manifest file missing key rollforward"
if self.contents['rollforward'] is not None:
if not(isinstance(self.contents['rollforward'], list)):
return "Key rollforward was not a list, found %s" % str(
self.contents['rollforward'])
elif len(self.contents['rollforward']) != 2:
return "Key rollforward had wrong length, found %d" % \
len(self.contents['rollforward'])
elif not is_string(self.contents['rollforward'][0]):
print(type(self.contents['rollforward'][0]))
return "Key rollforward's first entry was not a string, found %s" % \
str(self.contents['rollforward'][0])
elif not(isinstance(self.contents['rollforward'][1], dict)):
return "Key rollforward's second entry was not a dict, found %s" % \
str(self.contents['rollforward'][1])
if not("log" in self.contents):
return "Manifest file missing key 'log'"
if not(isinstance(self.contents['log'], list)):
return "Key 'log' in manifest did not refer to a list"
# MD5 keys and filenames are in one to one correspondence
if self.contents['files'].keys() != self.contents[
'md5'].keys():
return ("Files and MD5 sums in manifest do not "
"match (files: %s, MD5 sums: %s)") % \
(list(self.contents['files'].keys()),
list(self.contents['md5'].keys()))
# All files in the manifest exist and match the MD5 sums
for key, filename in self.contents['files'].items():
# we don't need to explicitly check for existence;
# calculate_resource_md5 will open the file for us.
expected_md5 = self.resource_md5(key)
found_md5 = self.calculate_resource_md5(key)
if found_md5 != expected_md5:
return ("File %s referred to by key %s did "
"not match its MD5 sum (found: %s, expected %s)") % \
(filename, key, found_md5, expected_md5)
return False | Check if this RefPkg is invalid.
Valid means that it contains a properly named manifest, and
each of the files described in the manifest exists and has the
proper MD5 hashsum.
If the Refpkg is valid, is_invalid returns False. Otherwise it
returns a nonempty string describing the error. | entailment |
def update_metadata(self, key, value):
"""Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set.
"""
old_value = self.contents['metadata'].get(key)
self.contents['metadata'][key] = value
self._log('Updated metadata: %s=%s' % (key, value))
return old_value | Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set. | entailment |
def update_file(self, key, new_path):
"""Insert file *new_path* into the refpkg under *key*.
The filename of *new_path* will be preserved in the refpkg
unless it would conflict with a previously existing file, in
which case a suffix is appended which makes it unique. The
previous file, if there was one, is left in the refpkg. If
you wish to delete it, see the ``strip`` method.
The full path to the previous file referred to by *key* is
returned, or ``None`` if *key* was not previously defined in
the refpkg.
"""
if key in self.contents['files']:
old_path = self.resource_path(key)
else:
old_path = None
self._add_file(key, new_path)
with open(new_path, 'rb') as f:
md5_value = md5file(f)
self.contents['md5'][key] = md5_value
self._log('Updated file: %s=%s' % (key, new_path))
if key == 'tree_stats' and old_path:
warnings.warn('Updating tree_stats, but not phylo_model.',
DerivedFileNotUpdatedWarning, stacklevel=2)
return old_path | Insert file *new_path* into the refpkg under *key*.
The filename of *new_path* will be preserved in the refpkg
unless it would conflict with a previously existing file, in
which case a suffix is appended which makes it unique. The
previous file, if there was one, is left in the refpkg. If
you wish to delete it, see the ``strip`` method.
The full path to the previous file referred to by *key* is
returned, or ``None`` if *key* was not previously defined in
the refpkg. | entailment |
def reroot(self, rppr=None, pretend=False):
"""Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated.
"""
with scratch_file(prefix='tree', suffix='.tre') as name:
# Use a specific path to rppr, otherwise rely on $PATH
subprocess.check_call([rppr or 'rppr', 'reroot',
'-c', self.path, '-o', name])
if not(pretend):
self.update_file('tree', name)
self._log('Rerooting refpkg') | Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated. | entailment |
def update_phylo_model(self, stats_type, stats_file, frequency_type=None):
"""Parse a stats log and use it to update ``phylo_model``.
``pplacer`` expects its input to include the deatils of the
phylogenetic model used for creating a tree in JSON format
under the key ``phylo_model``, but no program actually outputs
that format.
This function takes a log generated by RAxML or FastTree, parses it,
and inserts an appropriate JSON file into the refpkg. The first
parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which
program generated the log. It may also be None to attempt to guess
which program generated the log.
:param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML'
:param stats_file: path to statistics/log file
:param frequency_type: For ``stats_type == 'PhyML'``, amino acid
alignments only: was the alignment inferred with ``model`` or
``empirical`` frequencies?
"""
if frequency_type not in (None, 'model', 'empirical'):
raise ValueError(
'Unknown frequency type: "{0}"'.format(frequency_type))
if frequency_type and stats_type not in (None, 'PhyML'):
raise ValueError('Frequency type should only be specified for '
'PhyML alignments.')
if stats_type is None:
with open(stats_file) as fobj:
for line in fobj:
if line.startswith('FastTree'):
stats_type = 'FastTree'
break
elif (line.startswith('This is RAxML') or
line.startswith('You are using RAxML')):
stats_type = 'RAxML'
break
elif 'PhyML' in line:
stats_type = 'PhyML'
break
else:
raise ValueError(
"couldn't guess log type for %r" % (stats_file,))
if stats_type == 'RAxML':
parser = utils.parse_raxml
elif stats_type == 'FastTree':
parser = utils.parse_fasttree
elif stats_type == 'PhyML':
parser = functools.partial(utils.parse_phyml,
frequency_type=frequency_type)
else:
raise ValueError('invalid log type: %r' % (stats_type,))
with scratch_file(prefix='phylo_model', suffix='.json') as name:
with open(name, 'w') as phylo_model, open(stats_file) as h:
json.dump(parser(h), phylo_model, indent=4)
self.update_file('phylo_model', name) | Parse a stats log and use it to update ``phylo_model``.
``pplacer`` expects its input to include the deatils of the
phylogenetic model used for creating a tree in JSON format
under the key ``phylo_model``, but no program actually outputs
that format.
This function takes a log generated by RAxML or FastTree, parses it,
and inserts an appropriate JSON file into the refpkg. The first
parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which
program generated the log. It may also be None to attempt to guess
which program generated the log.
:param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML'
:param stats_file: path to statistics/log file
:param frequency_type: For ``stats_type == 'PhyML'``, amino acid
alignments only: was the alignment inferred with ``model`` or
``empirical`` frequencies? | entailment |
def rollback(self):
"""Revert the previous modification to the refpkg.
"""
# This is slightly complicated because of Python's freakish
# assignment semantics and because we don't store multiple
# copies of the log.
if self.contents['rollback'] is None:
raise ValueError("No operation to roll back on refpkg")
future_msg = self.contents['log'][0]
rolledback_log = self.contents['log'][1:]
rollforward = copy.deepcopy(self.contents)
rollforward.pop('rollback')
self.contents = self.contents['rollback']
self.contents['log'] = rolledback_log
self.contents['rollforward'] = [future_msg, rollforward]
self._sync_to_disk() | Revert the previous modification to the refpkg. | entailment |
def rollforward(self):
"""Restore a reverted modification to the refpkg.
"""
if self.contents['rollforward'] is None:
raise ValueError("No operation to roll forward on refpkg")
new_log_message = self.contents['rollforward'][0]
new_contents = self.contents['rollforward'][1]
new_contents['log'] = [new_log_message] + self.contents.pop('log')
self.contents['rollforward'] = None
new_contents['rollback'] = copy.deepcopy(self.contents)
new_contents['rollback'].pop('rollforward')
self.contents = new_contents
self._sync_to_disk() | Restore a reverted modification to the refpkg. | entailment |
def strip(self):
"""Remove rollbacks, rollforwards, and all non-current files.
When distributing a refpkg, you probably want to distribute as
small a one as possible. strip removes everything from the
refpkg which is not relevant to its current state.
"""
self._sync_from_disk()
current_filenames = set(self.contents['files'].values())
all_filenames = set(os.listdir(self.path))
to_delete = all_filenames.difference(current_filenames)
to_delete.discard('CONTENTS.json')
for f in to_delete:
self._delete_file(f)
self.contents['rollback'] = None
self.contents['rollforward'] = None
self.contents['log'].insert(
0, 'Stripped refpkg (removed %d files)' % len(to_delete))
self._sync_to_disk() | Remove rollbacks, rollforwards, and all non-current files.
When distributing a refpkg, you probably want to distribute as
small a one as possible. strip removes everything from the
refpkg which is not relevant to its current state. | entailment |
def start_transaction(self):
"""Begin a transaction to group operations on the refpkg.
All the operations until the next call to
``commit_transaction`` will be recorded as a single operation
for rollback and rollforward, and recorded with a single line
in the log.
"""
if self.current_transaction:
raise ValueError("There is already a transaction going")
else:
initial_state = copy.deepcopy(self.contents)
self.current_transaction = {'rollback': initial_state,
'log': '(Transaction left no log message)'} | Begin a transaction to group operations on the refpkg.
All the operations until the next call to
``commit_transaction`` will be recorded as a single operation
for rollback and rollforward, and recorded with a single line
in the log. | entailment |
def commit_transaction(self, log=None):
"""Commit a transaction, with *log* as the log entry."""
self.current_transaction['rollback'].pop('log')
self.current_transaction['rollback'].pop('rollforward')
self.contents['log'].insert(
0, log and log or self.current_transaction['log'])
self.contents['rollback'] = self.current_transaction['rollback']
self.contents['rollforward'] = None # We can't roll forward anymore
self.current_transaction = None
self._sync_to_disk() | Commit a transaction, with *log* as the log entry. | entailment |
def is_ill_formed(self):
"""Stronger set of checks than is_invalid for Refpkg.
Checks that FASTA, Stockholm, JSON, and CSV files under known
keys are all valid as well as calling is_invalid. Returns
either False or a string describing the error.
"""
m = self.is_invalid()
if m:
return m
required_keys = ('aln_fasta', 'aln_sto', 'seq_info', 'tree',
'taxonomy', 'phylo_model')
for k in required_keys:
if k not in self.contents['files']:
return "RefPkg has no key " + k
# aln_fasta, seq_info, tree, and aln_sto must be valid FASTA,
# CSV, Newick, and Stockholm files, respectively, and describe
# the same sequences.
with self.open_resource('aln_fasta') as f:
firstline = f.readline()
if firstline.startswith('>'):
f.seek(0)
else:
return 'aln_fasta file is not valid FASTA.'
fasta_names = {seq.id for seq in fastalite(f)}
with self.open_resource('seq_info') as f:
lines = list(csv.reader(f))
headers = set(lines[0])
# Check required headers
for req_header in 'seqname', 'tax_id':
if req_header not in headers:
return "seq_info is missing {0}".format(req_header)
lengths = {len(line) for line in lines}
if len(lengths) > 1:
return "some lines in seq_info differ in field cout"
csv_names = {line[0] for line in lines[1:]}
with self.open_resource('aln_sto') as f:
try:
sto_names = set(utils.parse_stockholm(f))
except ValueError:
return 'aln_sto file is not valid Stockholm.'
try:
tree = dendropy.Tree.get(
path=self.resource_path('tree'),
schema='newick',
case_sensitive_taxon_labels=True,
preserve_underscores=True)
tree_names = set(tree.taxon_namespace.labels())
except Exception:
return 'tree file is not valid Newick.'
d = fasta_names.symmetric_difference(sto_names)
if len(d) != 0:
return "Names in aln_fasta did not match aln_sto. Mismatches: " + \
', '.join([str(x) for x in d])
d = fasta_names.symmetric_difference(csv_names)
if len(d) != 0:
return "Names in aln_fasta did not match seq_info. Mismatches: " + \
', '.join([str(x) for x in d])
d = fasta_names.symmetric_difference(tree_names)
if len(d) != 0:
return "Names in aln_fasta did not match nodes in tree. Mismatches: " + \
', '.join([str(x) for x in d])
# Next make sure that taxonomy is valid CSV, phylo_model is valid JSON
with self.open_resource('taxonomy') as f:
lines = list(csv.reader(f))
lengths = {len(line) for line in lines}
if len(lengths) > 1:
return ("Taxonomy is invalid: not all lines had "
"the same number of fields.")
# I don't try to check if the taxids match up to those
# mentioned in aln_fasta, since that would make taxtastic
# depend on RefsetInternalFasta in romperroom.
with self.open_resource('phylo_model') as f:
try:
json.load(f)
except ValueError:
return "phylo_model is not valid JSON."
return False | Stronger set of checks than is_invalid for Refpkg.
Checks that FASTA, Stockholm, JSON, and CSV files under known
keys are all valid as well as calling is_invalid. Returns
either False or a string describing the error. | entailment |
def load_db(self):
"""Load the taxonomy into a sqlite3 database.
This will set ``self.db`` to a sqlite3 database which contains all of
the taxonomic information in the reference package.
"""
db = taxdb.Taxdb()
db.create_tables()
reader = csv.DictReader(self.open_resource('taxonomy', 'rU'))
db.insert_from_taxtable(lambda: reader._fieldnames, reader)
curs = db.cursor()
reader = csv.DictReader(self.open_resource('seq_info', 'rU'))
curs.executemany("INSERT INTO sequences VALUES (?, ?)",
((row['seqname'], row['tax_id']) for row in reader))
db.commit()
self.db = db | Load the taxonomy into a sqlite3 database.
This will set ``self.db`` to a sqlite3 database which contains all of
the taxonomic information in the reference package. | entailment |
def most_recent_common_ancestor(self, *ts):
"""Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found.
"""
if len(ts) > 200:
res = self._large_mrca(ts)
else:
res = self._small_mrca(ts)
if res:
(res,), = res
else:
raise NoAncestor()
return res | Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found. | entailment |
def _large_mrca(self, ts):
"""Find the MRCA using a temporary table."""
cursor = self.db.cursor()
cursor.execute("""
DROP TABLE IF EXISTS _mrca_temp
""")
cursor.execute("""
CREATE TEMPORARY TABLE _mrca_temp(
child TEXT PRIMARY KEY REFERENCES taxa (tax_id) NOT NULL
)
""")
cursor.executemany("""
INSERT INTO _mrca_temp
VALUES (?)
""", ((tid,) for tid in ts))
cursor.execute("""
SELECT parent
FROM _mrca_temp
JOIN parents USING (child)
JOIN taxa
ON parent = taxa.tax_id
JOIN ranks USING (rank)
GROUP BY parent
HAVING COUNT(*) = ?
ORDER BY rank_order DESC
LIMIT 1
""", (len(ts),))
return cursor.fetchall() | Find the MRCA using a temporary table. | entailment |
def _small_mrca(self, ts):
"""Find a MRCA using query parameters.
This only supports a limited number of tax_ids; ``_large_mrca`` will
support an arbitrary number.
"""
cursor = self.db.cursor()
qmarks = ', '.join('?' * len(ts))
cursor.execute("""
SELECT parent
FROM parents
JOIN taxa
ON parent = taxa.tax_id
JOIN ranks USING (rank)
WHERE child IN (%s)
GROUP BY parent
HAVING COUNT(*) = ?
ORDER BY rank_order DESC
LIMIT 1
""" % qmarks, ts + (len(ts),))
return cursor.fetchall() | Find a MRCA using query parameters.
This only supports a limited number of tax_ids; ``_large_mrca`` will
support an arbitrary number. | entailment |
def file_abspath(self, resource):
"""Deprecated alias for *resource_path*."""
warnings.warn(
"file_abspath is deprecated; use resource_path instead",
DeprecationWarning, stacklevel=2)
return self.resource_path(resource) | Deprecated alias for *resource_path*. | entailment |
def file_name(self, resource):
"""Deprecated alias for *resource_name*."""
warnings.warn(
"file_name is deprecated; use resource_name instead",
DeprecationWarning, stacklevel=2)
return self.resource_name(resource) | Deprecated alias for *resource_name*. | entailment |
def file_md5(self, resource):
"""Deprecated alias for *resource_md5*."""
warnings.warn(
"file_md5 is deprecated; use resource_md5 instead",
DeprecationWarning, stacklevel=2)
return self.resource_md5(resource) | Deprecated alias for *resource_md5*. | entailment |
def get_app_settings(parser, known_args):
"""
Return **settings** module of the application according to
either command line argument or **SHELTER_SETTINGS_MODULE**
environment variable.
"""
args, dummy_remaining = parser.parse_known_args(known_args)
settings_module_path = (
args.settings or os.environ.get('SHELTER_SETTINGS_MODULE', ''))
if not settings_module_path:
return None
return importlib.import_module(settings_module_path) | Return **settings** module of the application according to
either command line argument or **SHELTER_SETTINGS_MODULE**
environment variable. | entailment |
def get_management_commands(settings):
"""
Find registered managemend commands and return their classes
as a :class:`dict`. Keys are names of the management command
and values are classes of the management command.
"""
app_commands = getattr(settings, 'MANAGEMENT_COMMANDS', ())
commands = {}
for name in itertools.chain(SHELTER_MANAGEMENT_COMMANDS, app_commands):
command_obj = import_object(name)
if not issubclass(command_obj, BaseCommand):
raise ValueError("'%s' is not subclass of the BaseCommand" % name)
commands[command_obj.name] = command_obj
return commands | Find registered managemend commands and return their classes
as a :class:`dict`. Keys are names of the management command
and values are classes of the management command. | entailment |
def get_config_class(settings):
"""
According to **settings.CONFIG_CLASS** return either config class
defined by user or default :class:`shelter.core.config.Config`.
"""
config_cls_name = getattr(settings, 'CONFIG_CLASS', '')
if config_cls_name:
config_cls = import_object(config_cls_name)
else:
config_cls = Config
return config_cls | According to **settings.CONFIG_CLASS** return either config class
defined by user or default :class:`shelter.core.config.Config`. | entailment |
def main(args=None):
"""
Run management command handled from command line.
"""
# Base command line parser. Help is not allowed because command
# line is parsed in two stages - in the first stage is found setting
# module of the application, in the second stage are found management
# command's arguments.
parser = ArgumentParser(add_help=False)
parser.add_argument(
'-s', '--settings',
dest='settings', action='store', type=str, default=None,
help=_('application settings module')
)
# Get settings module
try:
settings = get_app_settings(parser, args)
except ImportError as exc:
parser.error(_("Invalid application settings module: {}").format(exc))
# Get management commands and add their arguments into command
# line parser
commands = get_management_commands(settings)
subparsers = parser.add_subparsers(
dest='action', help=_('specify action')
)
for command_cls in six.itervalues(commands):
subparser = subparsers.add_parser(
command_cls.name, help=command_cls.help)
for command_args, kwargs in command_cls.arguments:
subparser.add_argument(*command_args, **kwargs)
# Get config class and add its arguments into command line parser
if settings:
config_cls = get_config_class(settings)
if not issubclass(config_cls, Config):
raise TypeError(
"Config class must be subclass of the "
"shelter.core.config.Config")
for config_args, kwargs in config_cls.arguments:
parser.add_argument(*config_args, **kwargs)
else:
config_cls = Config
# Add help argument and parse command line
parser.add_argument(
'-h', '--help', action='help',
help=_('show this help message and exit')
)
cmdline_args = parser.parse_args(args)
if not cmdline_args.action:
parser.error(_('No action'))
# Run management command
command_cls = commands[cmdline_args.action]
if command_cls.settings_required and not settings:
parser.error(_(
"Settings module is not defined. You must either set "
"'SHELTER_SETTINGS_MODULE' environment variable or "
"'-s/--settings' command line argument."
))
try:
config = config_cls(settings, cmdline_args)
except ImproperlyConfiguredError as exc:
parser.error(str(exc))
command = command_cls(config)
try:
command()
except Exception:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
if multiprocessing.active_children():
# If main process has children processes, exit immediately without
# cleaning. It is a workaround, because parent process waits for
# non-daemon children.
os._exit(1)
sys.exit(1)
sys.exit(0) | Run management command handled from command line. | entailment |
def as_length(self, value):
"""Return a new vector scaled to given length"""
new_vec = self.copy()
new_vec.length = value
return new_vec | Return a new vector scaled to given length | entailment |
def as_percent(self, value):
"""Return a new vector scaled by given decimal percent"""
new_vec = self.copy()
new_vec.length = value * self.length
return new_vec | Return a new vector scaled by given decimal percent | entailment |
def dot(self, vec):
"""Dot product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a vector')
return np.dot(self, vec) | Dot product with another vector | entailment |
def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return self.__class__(np.cross(self, vec)) | Cross product with another vector | entailment |
def angle(self, vec, unit='rad'):
"""Calculate the angle between two Vectors
unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad'
"""
if not isinstance(vec, self.__class__):
raise TypeError('Angle operand must be of class {}'
.format(self.__class__.__name__))
if unit not in ['deg', 'rad']:
raise ValueError('Only units of rad or deg are supported')
denom = self.length * vec.length
if denom == 0:
raise ZeroDivisionError('Cannot calculate angle between '
'zero-length vector(s)')
ang = np.arccos(self.dot(vec) / denom)
if unit == 'deg':
ang = ang * 180 / np.pi
return ang | Calculate the angle between two Vectors
unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad' | entailment |
def phi(self):
"""Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
"""
return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z) | Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi) | entailment |
def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return Vector3(0, 0, np.asscalar(np.cross(self, vec))) | Cross product with another vector | entailment |
def length(self):
"""Array of vector lengths"""
return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray) | Array of vector lengths | entailment |
def dot(self, vec):
"""Dot product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a VectorArray')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Dot product operands must have the same '
'number of elements.')
return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1) | Dot product with another vector | entailment |
def cross(self, vec):
"""Cross product with another Vector3Array"""
if not isinstance(vec, Vector3Array):
raise TypeError('Cross product operand must be a Vector3Array')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Cross product operands must have the same '
'number of elements.')
return Vector3Array(np.cross(self, vec)) | Cross product with another Vector3Array | entailment |
def eval_rs(gains, losses):
""" Evaluates the RS variable in RSI algorithm
Args:
gains: List of price gains.
losses: List of prices losses.
Returns:
Float of average gains over average losses.
"""
# Number of days that the data was collected through
count = len(gains) + len(losses)
avg_gains = stats.avg(gains, count=count) if gains else 1
avg_losses = stats.avg(losses,count=count) if losses else 1
if avg_losses == 0:
return avg_gains
else:
return avg_gains / avg_losses | Evaluates the RS variable in RSI algorithm
Args:
gains: List of price gains.
losses: List of prices losses.
Returns:
Float of average gains over average losses. | entailment |
def eval_from_json(json):
""" Evaluates RSI from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float between 0 and 100, momentum indicator
of a market measuring the speed and change of price movements.
"""
changes = poloniex.get_gains_losses(poloniex.parse_changes(json))
return RSI.eval_algorithm(changes['gains'], changes['losses']) | Evaluates RSI from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float between 0 and 100, momentum indicator
of a market measuring the speed and change of price movements. | entailment |
def sigusr1_handler(self, unused_signum, unused_frame):
"""
Handle SIGUSR1 signal. Call function which is defined in the
**settings.SIGUSR1_HANDLER**.
"""
if self._sigusr1_handler_func is not None:
self._sigusr1_handler_func(self.context) | Handle SIGUSR1 signal. Call function which is defined in the
**settings.SIGUSR1_HANDLER**. | entailment |
def sigusr2_handler(self, unused_signum, unused_frame):
"""
Handle SIGUSR2 signal. Call function which is defined in the
**settings.SIGUSR2_HANDLER**.
"""
if self._sigusr1_handler_func is not None:
self._sigusr2_handler_func(self.context) | Handle SIGUSR2 signal. Call function which is defined in the
**settings.SIGUSR2_HANDLER**. | entailment |
def action(args):
"""
Show information about reference packages.
"""
log.info('loading reference package')
pkg = refpkg.Refpkg(args.refpkg, create=False)
with open(pkg.file_abspath('seq_info'), 'rU') as seq_info:
seqinfo = list(csv.DictReader(seq_info))
snames = [row['seqname'] for row in seqinfo]
if args.seq_names:
print('\n'.join(snames))
elif args.tally:
tally_taxa(pkg)
elif args.lengths:
print_lengths(pkg)
else:
print('number of sequences:', len(snames))
print('package components\n', '\n'.join(sorted(pkg.file_keys()))) | Show information about reference packages. | entailment |
def json_to_url(json, symbol):
""" Converts a JSON to a URL by the Poloniex API
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
symbol: String of currency pair, like a ticker symbol.
Returns:
String URL to Poloniex API representing the given JSON.
"""
start = json[0]['date']
end = json[-1]['date']
diff = end - start
# Get period by a ratio from calculated period to valid periods
# Ratio closest to 1 is the period
# Valid values: 300, 900, 1800, 7200, 14400, 86400
periods = [300, 900, 1800, 7200, 14400, 86400]
diffs = {}
for p in periods:
diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio
period = min(diffs, key=diffs.get) # Find closest period
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
return url | Converts a JSON to a URL by the Poloniex API
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
symbol: String of currency pair, like a ticker symbol.
Returns:
String URL to Poloniex API representing the given JSON. | entailment |
def chart_json(start, end, period, symbol):
""" Requests chart data from Poloniex API
Args:
start: Int epoch date to START getting market stats from.
Note that this epoch is FURTHER from the current date.
end: Int epoch date to STOP getting market stats from.
Note that this epoch is CLOSER to the current date.
period: Int defining width of each chart candlestick in seconds.
Valid values: 300, 900, 1800, 7200, 14400, 86400
symbol: String of currency pair, like a ticker symbol.
Returns:
Tuple of (JSON data, URL to JSON).
JSON data as a list of dict dates, where the keys are
the raw market statistics.
String URL to Poloniex API representing the given JSON.
"""
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
logger.debug(' HTTP Request URL:\n{0}'.format(url))
json = requests.get(url).json()
logger.debug(' JSON:\n{0}'.format(json))
if 'error' in json:
logger.error(' Invalid parameters in URL for HTTP response')
raise SystemExit
elif all(val == 0 for val in json[0]):
logger.error(' Bad HTTP response. Time unit too short?')
raise SystemExit
elif len(json) < 1: # time to short
logger.error(' Not enough dates to calculate changes')
raise SystemExit
return json, url | Requests chart data from Poloniex API
Args:
start: Int epoch date to START getting market stats from.
Note that this epoch is FURTHER from the current date.
end: Int epoch date to STOP getting market stats from.
Note that this epoch is CLOSER to the current date.
period: Int defining width of each chart candlestick in seconds.
Valid values: 300, 900, 1800, 7200, 14400, 86400
symbol: String of currency pair, like a ticker symbol.
Returns:
Tuple of (JSON data, URL to JSON).
JSON data as a list of dict dates, where the keys are
the raw market statistics.
String URL to Poloniex API representing the given JSON. | entailment |
def parse_changes(json):
""" Gets price changes from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
List of floats of price changes between entries in JSON.
"""
changes = []
dates = len(json)
for date in range(1, dates):
last_close = json[date - 1]['close']
now_close = json[date]['close']
changes.append(now_close - last_close)
logger.debug('Market Changes (from JSON):\n{0}'.format(changes))
return changes | Gets price changes from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
List of floats of price changes between entries in JSON. | entailment |
def get_gains_losses(changes):
""" Categorizes changes into gains and losses
Args:
changes: List of floats of price changes between entries in JSON.
Returns:
Dict of changes with keys 'gains' and 'losses'.
All values are positive.
"""
res = {'gains': [], 'losses': []}
for change in changes:
if change > 0:
res['gains'].append(change)
else:
res['losses'].append(change * -1)
logger.debug('Gains: {0}'.format(res['gains']))
logger.debug('Losses: {0}'.format(res['losses']))
return res | Categorizes changes into gains and losses
Args:
changes: List of floats of price changes between entries in JSON.
Returns:
Dict of changes with keys 'gains' and 'losses'.
All values are positive. | entailment |
def get_attribute(json, attr):
""" Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON
"""
res = [json[entry][attr] for entry, _ in enumerate(json)]
logger.debug('{0}s (from JSON):\n{1}'.format(attr, res))
return res | Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON | entailment |
def get_json_shift(year, month, day, unit, count, period, symbol):
""" Gets JSON from shifted date by the Poloniex API
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'
count: Int of units.
How far back to check historical market data.
period: Int defining width of each chart candlestick in seconds.
symbol: String of currency pair, like a ticker symbol.
Returns: JSON, list of dates where each entry is a dict of raw market data.
"""
epochs = date.get_end_start_epochs(year, month, day, 'last', unit, count)
return chart_json(epochs['shifted'], epochs['initial'],
period, symbol)[0] | Gets JSON from shifted date by the Poloniex API
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'
count: Int of units.
How far back to check historical market data.
period: Int defining width of each chart candlestick in seconds.
symbol: String of currency pair, like a ticker symbol.
Returns: JSON, list of dates where each entry is a dict of raw market data. | entailment |
def filter_ranks(results):
"""
Find just the first rank for all the results for a given tax_id.
"""
for _, group in itertools.groupby(results, operator.itemgetter(0)):
yield next(group) | Find just the first rank for all the results for a given tax_id. | entailment |
def eval_algorithm(closing, low, high):
""" Evaluates the SO algorithm
Args:
closing: Float of current closing price.
low: Float of lowest low closing price throughout some duration.
high: Float of highest high closing price throughout some duration.
Returns:
Float SO between 0 and 100.
"""
if high - low == 0: # High and low are the same, zero division error
return 100 * (closing - low)
else:
return 100 * (closing - low) / (high - low) | Evaluates the SO algorithm
Args:
closing: Float of current closing price.
low: Float of lowest low closing price throughout some duration.
high: Float of highest high closing price throughout some duration.
Returns:
Float SO between 0 and 100. | entailment |
def eval_from_json(json):
""" Evaluates SO from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float SO between 0 and 100.
"""
close = json[-1]['close'] # Latest closing price
low = min(poloniex.get_attribute(json, 'low')) # Lowest low
high = max(poloniex.get_attribute(json, 'high')) # Highest high
return SO.eval_algorithm(close, low, high) | Evaluates SO from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float SO between 0 and 100. | entailment |
def avg(vals, count=None):
""" Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count.
"""
sum = 0
for v in vals:
sum += v
if count is None:
count = len(vals)
return float(sum) / count | Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count. | entailment |
def db_connect(engine, schema=None, clobber=False):
"""Create a connection object to a database. Attempt to establish a
schema. If there are existing tables, delete them if clobber is
True and return otherwise. Returns a sqlalchemy engine object.
"""
if schema is None:
base = declarative_base()
else:
try:
engine.execute(sqlalchemy.schema.CreateSchema(schema))
except sqlalchemy.exc.ProgrammingError as err:
logging.warn(err)
base = declarative_base(metadata=MetaData(schema=schema))
define_schema(base)
if clobber:
logging.info('Clobbering database tables')
base.metadata.drop_all(bind=engine)
logging.info('Creating database tables')
base.metadata.create_all(bind=engine)
return base | Create a connection object to a database. Attempt to establish a
schema. If there are existing tables, delete them if clobber is
True and return otherwise. Returns a sqlalchemy engine object. | entailment |
def read_nodes(rows, source_id=1):
"""
Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp)
"""
ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id']
extra_keys = ['source_id', 'is_valid']
is_valid = True
ncbi_cols = len(ncbi_keys)
rank = ncbi_keys.index('rank')
parent_id = ncbi_keys.index('parent_id')
# assumes the first row is the root
row = next(rows)
row[rank] = 'root'
# parent must be None for termination of recursive CTE for
# calculating lineages
row[parent_id] = None
rows = itertools.chain([row], rows)
yield ncbi_keys + extra_keys
for row in rows:
# replace whitespace in "rank" with underscore
row[rank] = '_'.join(row[rank].split())
# provide default values for source_id and is_valid
yield row[:ncbi_cols] + [source_id, is_valid] | Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp) | entailment |
def read_names(rows, source_id=1):
"""Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
"""
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 | Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...) | entailment |
def fetch_data(dest_dir='.', clobber=False, url=DATA_URL):
"""
Download data from NCBI required to generate local taxonomy
database. Default url is ncbi.DATA_URL
* dest_dir - directory in which to save output files (created if necessary).
* clobber - don't download if False and target of url exists in dest_dir
* url - url to archive; default is ncbi.DATA_URL
Returns (fname, downloaded), where fname is the name of the
downloaded zip archive, and downloaded is True if a new files was
downloaded, false otherwise.
see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt
"""
dest_dir = os.path.abspath(dest_dir)
try:
os.mkdir(dest_dir)
except OSError:
pass
fout = os.path.join(dest_dir, os.path.split(url)[-1])
if os.access(fout, os.F_OK) and not clobber:
downloaded = False
logging.info(fout + ' exists; not downloading')
else:
downloaded = True
logging.info('downloading {} to {}'.format(url, fout))
request.urlretrieve(url, fout)
return (fout, downloaded) | Download data from NCBI required to generate local taxonomy
database. Default url is ncbi.DATA_URL
* dest_dir - directory in which to save output files (created if necessary).
* clobber - don't download if False and target of url exists in dest_dir
* url - url to archive; default is ncbi.DATA_URL
Returns (fname, downloaded), where fname is the name of the
downloaded zip archive, and downloaded is True if a new files was
downloaded, false otherwise.
see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt | entailment |
def read_archive(archive, fname):
"""Return an iterator of unique rows from a zip archive.
* archive - path to the zip archive.
* fname - name of the compressed file within the archive.
"""
# Note that deduplication here is equivalent to an upsert/ignore,
# but avoids requirement for a database-specific implementation.
zfile = zipfile.ZipFile(archive)
contents = zfile.open(fname, 'r')
fobj = io.TextIOWrapper(contents)
seen = set()
for line in fobj:
line = line.rstrip('\t|\n')
if line not in seen:
yield line.split('\t|\t')
seen.add(line) | Return an iterator of unique rows from a zip archive.
* archive - path to the zip archive.
* fname - name of the compressed file within the archive. | entailment |
def prepend_schema(self, name):
"""Prepend schema name to 'name' when a schema is specified
"""
return '.'.join([self.schema, name]) if self.schema else name | Prepend schema name to 'name' when a schema is specified | entailment |
def load_table(self, table, rows, colnames=None, limit=None):
"""Load 'rows' into table 'table'. If 'colnames' is not provided, the
first element of 'rows' must provide column names.
"""
conn = self.engine.raw_connection()
cur = conn.cursor()
colnames = colnames or next(rows)
cmd = 'INSERT INTO {table} ({colnames}) VALUES ({placeholders})'.format(
table=self.tables[table],
colnames=', '.join(colnames),
placeholders=', '.join([self.placeholder] * len(colnames)))
cur.executemany(cmd, itertools.islice(rows, limit))
conn.commit() | Load 'rows' into table 'table'. If 'colnames' is not provided, the
first element of 'rows' must provide column names. | entailment |
def load_archive(self, archive):
"""Load data from the zip archive of the NCBI taxonomy.
"""
# source
self.load_table(
'source',
rows=[('ncbi', DATA_URL)],
colnames=['name', 'description'],
)
conn = self.engine.raw_connection()
cur = conn.cursor()
cmd = "select id from {source} where name = 'ncbi'".format(**self.tables)
cur.execute(cmd)
source_id = cur.fetchone()[0]
# ranks
log.info('loading ranks')
self.load_table(
'ranks',
rows=((rank, i) for i, rank in enumerate(RANKS)),
colnames=['rank', 'height'],
)
# nodes
logging.info('loading nodes')
nodes_rows = read_nodes(
read_archive(archive, 'nodes.dmp'), source_id=source_id)
self.load_table('nodes', rows=nodes_rows)
# names
logging.info('loading names')
names_rows = read_names(
read_archive(archive, 'names.dmp'), source_id=source_id)
self.load_table('names', rows=names_rows)
# merged
logging.info('loading merged')
merged_rows = read_merged(read_archive(archive, 'merged.dmp'))
self.load_table('merged', rows=merged_rows) | Load data from the zip archive of the NCBI taxonomy. | entailment |
def date_to_delorean(year, month, day):
""" Converts date arguments to a Delorean instance in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Delorean instance in UTC of date.
"""
return Delorean(datetime=dt(year, month, day), timezone='UTC') | Converts date arguments to a Delorean instance in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Delorean instance in UTC of date. | entailment |
def date_to_epoch(year, month, day):
""" Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date.
"""
return int(date_to_delorean(year, month, day).epoch) | Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date. | entailment |
def shift_epoch(delorean, direction, unit, count):
""" Gets the resulting epoch after a time shift of a Delorean
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Int epoch in UTC from a shifted Delorean
"""
return int(delorean._shift_date(direction, unit, count).epoch) | Gets the resulting epoch after a time shift of a Delorean
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Int epoch in UTC from a shifted Delorean | entailment |
def generate_epochs(delorean, direction, unit, count):
""" Generates epochs from a shifted Delorean instance
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Generator of count int epochs in UTC from a shifted Delorean
"""
for shift in range(count):
yield int(delorean._shift_date(direction, unit, shift).epoch) | Generates epochs from a shifted Delorean instance
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Generator of count int epochs in UTC from a shifted Delorean | entailment |
def get_end_start_epochs(year, month, day, direction, unit, count):
""" Gets epoch from a start date and epoch from a shifted date
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How far back to check historical market data?
Returns:
Dict of int epochs in UTC with keys 'initial' and 'shifted'
"""
if year or month or day: # Date is specified
if not year:
year = 2017
if not month:
month = 1
if not day:
day = 1
initial_delorean = date_to_delorean(year, month, day)
else: # Date is not specified, get current date
count += 1 # Get another date because market is still open
initial_delorean = now_delorean()
initial_epoch = int(initial_delorean.epoch)
shifted_epoch = shift_epoch(initial_delorean, direction, unit, count)
return { 'initial': initial_epoch, 'shifted': shifted_epoch } | Gets epoch from a start date and epoch from a shifted date
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How far back to check historical market data?
Returns:
Dict of int epochs in UTC with keys 'initial' and 'shifted' | entailment |
def add_child(self, child):
"""
Add a child to this node.
"""
assert child != self
child.parent = self
child.ranks = self.ranks
child.index = self.index
assert child.tax_id not in self.index
self.index[child.tax_id] = child
self.children.add(child) | Add a child to this node. | entailment |
def remove_child(self, child):
"""
Remove a child from this node.
"""
assert child in self.children
self.children.remove(child)
self.index.pop(child.tax_id)
if child.parent is self:
child.parent = None
if child.index is self.index:
child.index = None
# Remove child subtree from index
for n in child:
if n is child:
continue
self.index.pop(n.tax_id)
if n.index is self.index:
n.index = None | Remove a child from this node. | entailment |
def drop(self):
"""
Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node.
"""
if self.is_root:
raise ValueError("Cannot drop root node!")
parent = self.parent
for child in self.children:
child.parent = parent
parent.children.add(child)
self.children = set()
parent.sequence_ids.update(self.sequence_ids)
self.sequence_ids = set()
parent.remove_child(self) | Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node. | entailment |
def prune_unrepresented(self):
"""
Remove nodes without sequences or children below this node.
"""
for node in self.depth_first_iter(self_first=False):
if (not node.children and
not node.sequence_ids and
node is not self):
node.parent.remove_child(node) | Remove nodes without sequences or children below this node. | entailment |
def at_rank(self, rank):
"""
Find the node above this node at rank ``rank``
"""
s = self
while s:
if s.rank == rank:
return s
s = s.parent
raise ValueError("No node at rank {0} for {1}".format(
rank, self.tax_id)) | Find the node above this node at rank ``rank`` | entailment |
def depth_first_iter(self, self_first=True):
"""
Iterate over nodes below this node, optionally yielding children before
self.
"""
if self_first:
yield self
for child in list(self.children):
for i in child.depth_first_iter(self_first):
yield i
if not self_first:
yield self | Iterate over nodes below this node, optionally yielding children before
self. | entailment |
def path(self, tax_ids):
"""Get the node at the end of the path described by tax_ids."""
assert tax_ids[0] == self.tax_id
if len(tax_ids) == 1:
return self
n = tax_ids[1]
try:
child = next(i for i in self.children if i.tax_id == n)
except StopIteration:
raise ValueError(n)
return child.path(tax_ids[1:]) | Get the node at the end of the path described by tax_ids. | entailment |
def lineage(self):
"""
Return all nodes between this node and the root, including this one.
"""
if not self.parent:
return [self]
else:
L = self.parent.lineage()
L.append(self)
return L | Return all nodes between this node and the root, including this one. | entailment |
def write_taxtable(self, out_fp, **kwargs):
"""
Write a taxtable for this node and all descendants,
including the lineage leading to this node.
"""
ranks_represented = frozenset(i.rank for i in self) | \
frozenset(i.rank for i in self.lineage())
ranks = [i for i in self.ranks if i in ranks_represented]
assert len(ranks_represented) == len(ranks)
def node_record(node):
parent_id = node.parent.tax_id if node.parent else node.tax_id
d = {'tax_id': node.tax_id,
'tax_name': node.name,
'parent_id': parent_id,
'rank': node.rank}
L = {i.rank: i.tax_id for i in node.lineage()}
d.update(L)
return d
header = ['tax_id', 'parent_id', 'rank', 'tax_name'] + ranks
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n')
w.writeheader()
# All nodes leading to this one
for i in self.lineage()[:-1]:
w.writerow(node_record(i))
w.writerows(node_record(i) for i in self) | Write a taxtable for this node and all descendants,
including the lineage leading to this node. | entailment |
def populate_from_seqinfo(self, seqinfo):
"""Populate sequence_ids below this node from a seqinfo file object."""
for row in csv.DictReader(seqinfo):
node = self.index.get(row['tax_id'])
if node:
node.sequence_ids.add(row['seqname']) | Populate sequence_ids below this node from a seqinfo file object. | entailment |
def collapse(self, remove=False):
"""
Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy.
"""
descendants = iter(self)
# Skip this node
assert next(descendants) is self
for descendant in descendants:
self.sequence_ids.update(descendant.sequence_ids)
descendant.sequence_ids.clear()
if remove:
for node in self.children:
self.remove_child(node) | Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy. | entailment |
def write_seqinfo(self, out_fp, include_name=True):
"""
Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included.
"""
header = ['seqname', 'tax_id']
if include_name:
header.append('tax_name')
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n', extrasaction='ignore')
w.writeheader()
rows = ({'seqname': seq_id,
'tax_id': node.tax_id,
'tax_name': node.name}
for node in self
for seq_id in node.sequence_ids)
w.writerows(rows) | Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included. | entailment |
def from_taxtable(cls, taxtable_fp):
"""
Generate a node from an open handle to a taxtable, as generated by
``taxit taxtable``
"""
r = csv.reader(taxtable_fp)
headers = next(r)
rows = (collections.OrderedDict(list(zip(headers, i))) for i in r)
row = next(rows)
root = cls(rank=row['rank'], tax_id=row[
'tax_id'], name=row['tax_name'])
path_root = headers.index('root')
root.ranks = headers[path_root:]
for row in rows:
rank, tax_id, name = [
row[i] for i in ('rank', 'tax_id', 'tax_name')]
path = [_f for _f in list(row.values())[path_root:] if _f]
parent = root.path(path[:-1])
parent.add_child(cls(rank, tax_id, name=name))
return root | Generate a node from an open handle to a taxtable, as generated by
``taxit taxtable`` | entailment |
def from_taxdb(cls, con, root=None):
"""
Generate a TaxNode from a taxonomy database
"""
cursor = con.cursor()
if root is None:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id")
else:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = ?", [root])
tax_id, rank = cursor.fetchone()
root = cls(rank=rank, tax_id=tax_id)
def add_lineage(parent):
cursor.execute("""SELECT tax_id, rank, tax_name
FROM nodes INNER JOIN names USING (tax_id)
WHERE parent_id = :1 and tax_id <> :1
AND names.is_primary = 1
""", [parent.tax_id])
for tax_id, rank, name in cursor:
node = cls(rank=rank, tax_id=tax_id, name=name)
parent.add_child(node)
for child in parent.children:
add_lineage(child)
add_lineage(root)
return root | Generate a TaxNode from a taxonomy database | entailment |
def parse_host(host):
"""
Parse *host* in format ``"[hostname:]port"`` and return :class:`tuple`
``(address, port)``.
>>> parse_host('localhost:4444')
('localhost', 4444)
>>> parse_host(':4444')
('', 4444)
>>> parse_host('4444')
('', 4444)
>>> parse_host('2001:db8::1428:57ab:4444')
('2001:db8::1428:57ab', 4444)
>>> parse_host('localhost')
ValueError: Invalid port number 'localhost'
"""
parts = host.split(':')
address = ':'.join(parts[:-1])
try:
port = int(parts[-1])
except ValueError:
port = None
if not port or port < 1 or port > 65535:
raise ValueError("Invalid port number '%s'" % port)
return address, port | Parse *host* in format ``"[hostname:]port"`` and return :class:`tuple`
``(address, port)``.
>>> parse_host('localhost:4444')
('localhost', 4444)
>>> parse_host(':4444')
('', 4444)
>>> parse_host('4444')
('', 4444)
>>> parse_host('2001:db8::1428:57ab:4444')
('2001:db8::1428:57ab', 4444)
>>> parse_host('localhost')
ValueError: Invalid port number 'localhost' | entailment |
def get_children(engine, parent_ids, rank='species', schema=None):
"""
Recursively fetch children of tax_ids in `parent_ids` until the
rank of `rank`
"""
if not parent_ids:
return []
nodes = schema + '.nodes' if schema else 'nodes'
names = schema + '.names' if schema else 'names'
cmd = ('select tax_id, tax_name, rank '
'from {} join {} using (tax_id) '
'where parent_id = :tax_id and is_primary').format(nodes, names)
species = []
for parent_id in parent_ids:
result = engine.execute(sqlalchemy.sql.text(cmd), tax_id=parent_id)
keys = list(result.keys())
rows = [dict(list(zip(keys, row))) for row in result.fetchall()]
for r in rows:
if r['rank'] == rank and 'sp.' not in r['tax_name']:
species.append(r)
others = [r for r in rows if r['rank'] not in (rank, 'no_rank')]
if others:
_, s = get_children(engine, [r['tax_id'] for r in others])
species.extend(s)
return keys, species | Recursively fetch children of tax_ids in `parent_ids` until the
rank of `rank` | entailment |
def get_config_items(self):
"""
Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2))
"""
return (
('settings', self.settings),
('context_class', self.context_class),
('interfaces', self.interfaces),
('logging', self.logging),
('name', self.name),
('init_handler', self.init_handler),
('sigusr1_handler', self.sigusr1_handler),
('sigusr2_handler', self.sigusr2_handler),
) | Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2)) | entailment |
def context_class(self):
"""
Context as a :class:`shelter.core.context.Context` class or subclass.
"""
if 'context_class' not in self._cached_values:
context_cls_name = getattr(self.settings, 'CONTEXT_CLASS', '')
if context_cls_name:
context_class = import_object(context_cls_name)
else:
context_class = Context
self._cached_values['context_class'] = context_class
return self._cached_values['context_class'] | Context as a :class:`shelter.core.context.Context` class or subclass. | entailment |
def interfaces(self):
"""
Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances.
"""
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
listen = interface.get('LISTEN')
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
processes = int(interface.get('PROCESSES', 1))
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] | Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.