code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if curr['close'] > prev['close']:
v = curr['volume']
elif curr['close'] < prev['close']:
v = curr['volume'] * -1
else:
v = 0
return prev['obv'] + v | def eval_algorithm(curr, prev) | Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV | 3.429535 | 2.940453 | 1.166329 |
closes = poloniex.get_attribute(json, 'close')
volumes = poloniex.get_attribute(json, 'volume')
obv = 0
for date in range(1, len(json)):
curr = {'close': closes[date], 'volume': volumes[date]}
prev = {'close': closes[date - 1], 'obv': obv}
obv = OBV.eval_algorithm(curr, prev)
return obv | def eval_from_json(json) | Evaluates OBV from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float of OBV | 3.946744 | 3.521236 | 1.120841 |
kwargs['delete'] = False
tf = tempfile.NamedTemporaryFile(**kwargs)
tf.close()
try:
yield tf.name
finally:
if unlink:
os.unlink(tf.name) | def scratch_file(unlink=True, **kwargs) | Create a temporary file and return its name.
Additional arguments are passed to :class:`tempfile.NamedTemporaryFile`
At the start of the with block a secure, temporary file is created
and its name returned. At the end of the with block it is
deleted. | 2.57915 | 3.549979 | 0.726526 |
return self.file_factory(self.file_path(name), *mode) | def open(self, name, *mode) | Return an open file object for a file in the reference package. | 7.465097 | 6.03422 | 1.237127 |
return self.open(self.resource_name(resource), *mode) | def open_resource(self, resource, *mode) | Return an open file object for a particular named resource in this
reference package. | 6.75007 | 6.142713 | 1.098874 |
if not(resource in self.contents['files']):
raise ValueError("No such resource %r in refpkg" % (resource,))
return self.contents['files'][resource] | def resource_name(self, resource) | Return the name of the file within the reference package for a
particular named resource. | 8.80514 | 6.02996 | 1.460232 |
if not(resource in self.contents['md5']):
raise ValueError("No such resource %r in refpkg" % (resource,))
return self.contents['md5'][resource] | def resource_md5(self, resource) | Return the stored MD5 sum for a particular named resource. | 7.319297 | 6.229578 | 1.174927 |
self.contents.setdefault('log', [])
self.contents.setdefault('rollback', None)
self.contents.setdefault('rollforward', None) | def _set_defaults(self) | Set some default values in the manifest.
This method should be called after loading from disk, but before
checking the integrity of the reference package. | 7.29424 | 8.036915 | 0.907592 |
with self.open_manifest('w') as h:
json.dump(self.contents, h, indent=4)
h.write('\n') | def _sync_to_disk(self) | Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg. | 5.875987 | 6.387562 | 0.919911 |
try:
fobj = self.open_manifest('r')
except IOError as e:
if e.errno == errno.ENOENT:
raise ValueError(
"couldn't find manifest file in %s" % (self.path,))
elif e.errno == errno.ENOTDIR:
raise ValueError("%s is not a directory" % (self.path,))
else:
raise
with fobj:
self.contents = json.load(fobj)
self._set_defaults()
self._check_refpkg() | def _sync_from_disk(self) | Read any changes made on disk to this Refpkg.
This is necessary if other programs are making changes to the
Refpkg on disk and your program must be synchronized to them. | 3.768047 | 3.283997 | 1.147397 |
filename = os.path.basename(path)
base, ext = os.path.splitext(filename)
if os.path.exists(self.file_path(filename)):
with tempfile.NamedTemporaryFile(
dir=self.path, prefix=base, suffix=ext) as tf:
filename = os.path.basename(tf.name)
shutil.copyfile(path, self.file_path(filename))
self.contents['files'][key] = filename | def _add_file(self, key, path) | Copy a file into the reference package. | 2.796792 | 2.594376 | 1.078021 |
# Manifest file contains the proper keys
for k in ['metadata', 'files', 'md5']:
if not(k in self.contents):
return "Manifest file missing key %s" % k
if not(isinstance(self.contents[k], dict)):
return "Key %s in manifest did not refer to a dictionary" % k
if not('rollback' in self.contents):
return "Manifest file missing key rollback"
if not(isinstance(self.contents['rollback'], dict)) and self.contents[
"rollback"] is not None:
return ("Key rollback in manifest did not refer to a "
"dictionary or None, found %s") % str(self.contents['rollback'])
if not('rollforward' in self.contents):
return "Manifest file missing key rollforward"
if self.contents['rollforward'] is not None:
if not(isinstance(self.contents['rollforward'], list)):
return "Key rollforward was not a list, found %s" % str(
self.contents['rollforward'])
elif len(self.contents['rollforward']) != 2:
return "Key rollforward had wrong length, found %d" % \
len(self.contents['rollforward'])
elif not is_string(self.contents['rollforward'][0]):
print(type(self.contents['rollforward'][0]))
return "Key rollforward's first entry was not a string, found %s" % \
str(self.contents['rollforward'][0])
elif not(isinstance(self.contents['rollforward'][1], dict)):
return "Key rollforward's second entry was not a dict, found %s" % \
str(self.contents['rollforward'][1])
if not("log" in self.contents):
return "Manifest file missing key 'log'"
if not(isinstance(self.contents['log'], list)):
return "Key 'log' in manifest did not refer to a list"
# MD5 keys and filenames are in one to one correspondence
if self.contents['files'].keys() != self.contents[
'md5'].keys():
return ("Files and MD5 sums in manifest do not "
"match (files: %s, MD5 sums: %s)") % \
(list(self.contents['files'].keys()),
list(self.contents['md5'].keys()))
# All files in the manifest exist and match the MD5 sums
for key, filename in self.contents['files'].items():
# we don't need to explicitly check for existence;
# calculate_resource_md5 will open the file for us.
expected_md5 = self.resource_md5(key)
found_md5 = self.calculate_resource_md5(key)
if found_md5 != expected_md5:
return ("File %s referred to by key %s did "
"not match its MD5 sum (found: %s, expected %s)") % \
(filename, key, found_md5, expected_md5)
return False | def is_invalid(self) | Check if this RefPkg is invalid.
Valid means that it contains a properly named manifest, and
each of the files described in the manifest exists and has the
proper MD5 hashsum.
If the Refpkg is valid, is_invalid returns False. Otherwise it
returns a nonempty string describing the error. | 2.381523 | 2.320216 | 1.026423 |
old_value = self.contents['metadata'].get(key)
self.contents['metadata'][key] = value
self._log('Updated metadata: %s=%s' % (key, value))
return old_value | def update_metadata(self, key, value) | Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set. | 3.451414 | 3.736389 | 0.92373 |
if key in self.contents['files']:
old_path = self.resource_path(key)
else:
old_path = None
self._add_file(key, new_path)
with open(new_path, 'rb') as f:
md5_value = md5file(f)
self.contents['md5'][key] = md5_value
self._log('Updated file: %s=%s' % (key, new_path))
if key == 'tree_stats' and old_path:
warnings.warn('Updating tree_stats, but not phylo_model.',
DerivedFileNotUpdatedWarning, stacklevel=2)
return old_path | def update_file(self, key, new_path) | Insert file *new_path* into the refpkg under *key*.
The filename of *new_path* will be preserved in the refpkg
unless it would conflict with a previously existing file, in
which case a suffix is appended which makes it unique. The
previous file, if there was one, is left in the refpkg. If
you wish to delete it, see the ``strip`` method.
The full path to the previous file referred to by *key* is
returned, or ``None`` if *key* was not previously defined in
the refpkg. | 4.73264 | 4.894731 | 0.966885 |
with scratch_file(prefix='tree', suffix='.tre') as name:
# Use a specific path to rppr, otherwise rely on $PATH
subprocess.check_call([rppr or 'rppr', 'reroot',
'-c', self.path, '-o', name])
if not(pretend):
self.update_file('tree', name)
self._log('Rerooting refpkg') | def reroot(self, rppr=None, pretend=False) | Reroot the phylogenetic tree.
This operation calls ``rppr reroot`` to generate the rerooted
tree, so you must have ``pplacer`` and its auxiliary tools
``rppr`` and ``guppy`` installed for it to work. You can
specify the path to ``rppr`` by giving it as the *rppr*
argument.
If *pretend* is ``True``, the convexification is run, but the
refpkg is not actually updated. | 7.812106 | 6.76372 | 1.155001 |
if frequency_type not in (None, 'model', 'empirical'):
raise ValueError(
'Unknown frequency type: "{0}"'.format(frequency_type))
if frequency_type and stats_type not in (None, 'PhyML'):
raise ValueError('Frequency type should only be specified for '
'PhyML alignments.')
if stats_type is None:
with open(stats_file) as fobj:
for line in fobj:
if line.startswith('FastTree'):
stats_type = 'FastTree'
break
elif (line.startswith('This is RAxML') or
line.startswith('You are using RAxML')):
stats_type = 'RAxML'
break
elif 'PhyML' in line:
stats_type = 'PhyML'
break
else:
raise ValueError(
"couldn't guess log type for %r" % (stats_file,))
if stats_type == 'RAxML':
parser = utils.parse_raxml
elif stats_type == 'FastTree':
parser = utils.parse_fasttree
elif stats_type == 'PhyML':
parser = functools.partial(utils.parse_phyml,
frequency_type=frequency_type)
else:
raise ValueError('invalid log type: %r' % (stats_type,))
with scratch_file(prefix='phylo_model', suffix='.json') as name:
with open(name, 'w') as phylo_model, open(stats_file) as h:
json.dump(parser(h), phylo_model, indent=4)
self.update_file('phylo_model', name) | def update_phylo_model(self, stats_type, stats_file, frequency_type=None) | Parse a stats log and use it to update ``phylo_model``.
``pplacer`` expects its input to include the deatils of the
phylogenetic model used for creating a tree in JSON format
under the key ``phylo_model``, but no program actually outputs
that format.
This function takes a log generated by RAxML or FastTree, parses it,
and inserts an appropriate JSON file into the refpkg. The first
parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which
program generated the log. It may also be None to attempt to guess
which program generated the log.
:param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML'
:param stats_file: path to statistics/log file
:param frequency_type: For ``stats_type == 'PhyML'``, amino acid
alignments only: was the alignment inferred with ``model`` or
``empirical`` frequencies? | 2.866544 | 2.34813 | 1.220778 |
# This is slightly complicated because of Python's freakish
# assignment semantics and because we don't store multiple
# copies of the log.
if self.contents['rollback'] is None:
raise ValueError("No operation to roll back on refpkg")
future_msg = self.contents['log'][0]
rolledback_log = self.contents['log'][1:]
rollforward = copy.deepcopy(self.contents)
rollforward.pop('rollback')
self.contents = self.contents['rollback']
self.contents['log'] = rolledback_log
self.contents['rollforward'] = [future_msg, rollforward]
self._sync_to_disk() | def rollback(self) | Revert the previous modification to the refpkg. | 6.013135 | 5.168979 | 1.163312 |
if self.contents['rollforward'] is None:
raise ValueError("No operation to roll forward on refpkg")
new_log_message = self.contents['rollforward'][0]
new_contents = self.contents['rollforward'][1]
new_contents['log'] = [new_log_message] + self.contents.pop('log')
self.contents['rollforward'] = None
new_contents['rollback'] = copy.deepcopy(self.contents)
new_contents['rollback'].pop('rollforward')
self.contents = new_contents
self._sync_to_disk() | def rollforward(self) | Restore a reverted modification to the refpkg. | 3.91917 | 3.209668 | 1.221051 |
self._sync_from_disk()
current_filenames = set(self.contents['files'].values())
all_filenames = set(os.listdir(self.path))
to_delete = all_filenames.difference(current_filenames)
to_delete.discard('CONTENTS.json')
for f in to_delete:
self._delete_file(f)
self.contents['rollback'] = None
self.contents['rollforward'] = None
self.contents['log'].insert(
0, 'Stripped refpkg (removed %d files)' % len(to_delete))
self._sync_to_disk() | def strip(self) | Remove rollbacks, rollforwards, and all non-current files.
When distributing a refpkg, you probably want to distribute as
small a one as possible. strip removes everything from the
refpkg which is not relevant to its current state. | 4.690625 | 3.488477 | 1.344606 |
if self.current_transaction:
raise ValueError("There is already a transaction going")
else:
initial_state = copy.deepcopy(self.contents)
self.current_transaction = {'rollback': initial_state,
'log': '(Transaction left no log message)'} | def start_transaction(self) | Begin a transaction to group operations on the refpkg.
All the operations until the next call to
``commit_transaction`` will be recorded as a single operation
for rollback and rollforward, and recorded with a single line
in the log. | 10.734992 | 9.972151 | 1.076497 |
self.current_transaction['rollback'].pop('log')
self.current_transaction['rollback'].pop('rollforward')
self.contents['log'].insert(
0, log and log or self.current_transaction['log'])
self.contents['rollback'] = self.current_transaction['rollback']
self.contents['rollforward'] = None # We can't roll forward anymore
self.current_transaction = None
self._sync_to_disk() | def commit_transaction(self, log=None) | Commit a transaction, with *log* as the log entry. | 4.776512 | 4.511327 | 1.058782 |
m = self.is_invalid()
if m:
return m
required_keys = ('aln_fasta', 'aln_sto', 'seq_info', 'tree',
'taxonomy', 'phylo_model')
for k in required_keys:
if k not in self.contents['files']:
return "RefPkg has no key " + k
# aln_fasta, seq_info, tree, and aln_sto must be valid FASTA,
# CSV, Newick, and Stockholm files, respectively, and describe
# the same sequences.
with self.open_resource('aln_fasta') as f:
firstline = f.readline()
if firstline.startswith('>'):
f.seek(0)
else:
return 'aln_fasta file is not valid FASTA.'
fasta_names = {seq.id for seq in fastalite(f)}
with self.open_resource('seq_info') as f:
lines = list(csv.reader(f))
headers = set(lines[0])
# Check required headers
for req_header in 'seqname', 'tax_id':
if req_header not in headers:
return "seq_info is missing {0}".format(req_header)
lengths = {len(line) for line in lines}
if len(lengths) > 1:
return "some lines in seq_info differ in field cout"
csv_names = {line[0] for line in lines[1:]}
with self.open_resource('aln_sto') as f:
try:
sto_names = set(utils.parse_stockholm(f))
except ValueError:
return 'aln_sto file is not valid Stockholm.'
try:
tree = dendropy.Tree.get(
path=self.resource_path('tree'),
schema='newick',
case_sensitive_taxon_labels=True,
preserve_underscores=True)
tree_names = set(tree.taxon_namespace.labels())
except Exception:
return 'tree file is not valid Newick.'
d = fasta_names.symmetric_difference(sto_names)
if len(d) != 0:
return "Names in aln_fasta did not match aln_sto. Mismatches: " + \
', '.join([str(x) for x in d])
d = fasta_names.symmetric_difference(csv_names)
if len(d) != 0:
return "Names in aln_fasta did not match seq_info. Mismatches: " + \
', '.join([str(x) for x in d])
d = fasta_names.symmetric_difference(tree_names)
if len(d) != 0:
return "Names in aln_fasta did not match nodes in tree. Mismatches: " + \
', '.join([str(x) for x in d])
# Next make sure that taxonomy is valid CSV, phylo_model is valid JSON
with self.open_resource('taxonomy') as f:
lines = list(csv.reader(f))
lengths = {len(line) for line in lines}
if len(lengths) > 1:
return ("Taxonomy is invalid: not all lines had "
"the same number of fields.")
# I don't try to check if the taxids match up to those
# mentioned in aln_fasta, since that would make taxtastic
# depend on RefsetInternalFasta in romperroom.
with self.open_resource('phylo_model') as f:
try:
json.load(f)
except ValueError:
return "phylo_model is not valid JSON."
return False | def is_ill_formed(self) | Stronger set of checks than is_invalid for Refpkg.
Checks that FASTA, Stockholm, JSON, and CSV files under known
keys are all valid as well as calling is_invalid. Returns
either False or a string describing the error. | 3.290047 | 3.111177 | 1.057493 |
db = taxdb.Taxdb()
db.create_tables()
reader = csv.DictReader(self.open_resource('taxonomy', 'rU'))
db.insert_from_taxtable(lambda: reader._fieldnames, reader)
curs = db.cursor()
reader = csv.DictReader(self.open_resource('seq_info', 'rU'))
curs.executemany("INSERT INTO sequences VALUES (?, ?)",
((row['seqname'], row['tax_id']) for row in reader))
db.commit()
self.db = db | def load_db(self) | Load the taxonomy into a sqlite3 database.
This will set ``self.db`` to a sqlite3 database which contains all of
the taxonomic information in the reference package. | 5.959544 | 5.279292 | 1.128853 |
if len(ts) > 200:
res = self._large_mrca(ts)
else:
res = self._small_mrca(ts)
if res:
(res,), = res
else:
raise NoAncestor()
return res | def most_recent_common_ancestor(self, *ts) | Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found. | 4.879308 | 3.897131 | 1.252025 |
cursor = self.db.cursor()
cursor.execute()
cursor.execute()
cursor.executemany(, ((tid,) for tid in ts))
cursor.execute(, (len(ts),))
return cursor.fetchall() | def _large_mrca(self, ts) | Find the MRCA using a temporary table. | 7.068772 | 5.922015 | 1.193643 |
cursor = self.db.cursor()
qmarks = ', '.join('?' * len(ts))
cursor.execute( % qmarks, ts + (len(ts),))
return cursor.fetchall() | def _small_mrca(self, ts) | Find a MRCA using query parameters.
This only supports a limited number of tax_ids; ``_large_mrca`` will
support an arbitrary number. | 7.344991 | 6.94098 | 1.058207 |
warnings.warn(
"file_abspath is deprecated; use resource_path instead",
DeprecationWarning, stacklevel=2)
return self.resource_path(resource) | def file_abspath(self, resource) | Deprecated alias for *resource_path*. | 3.353097 | 2.299108 | 1.458434 |
warnings.warn(
"file_name is deprecated; use resource_name instead",
DeprecationWarning, stacklevel=2)
return self.resource_name(resource) | def file_name(self, resource) | Deprecated alias for *resource_name*. | 3.297179 | 2.283864 | 1.443685 |
warnings.warn(
"file_md5 is deprecated; use resource_md5 instead",
DeprecationWarning, stacklevel=2)
return self.resource_md5(resource) | def file_md5(self, resource) | Deprecated alias for *resource_md5*. | 2.975116 | 2.176093 | 1.367183 |
args, dummy_remaining = parser.parse_known_args(known_args)
settings_module_path = (
args.settings or os.environ.get('SHELTER_SETTINGS_MODULE', ''))
if not settings_module_path:
return None
return importlib.import_module(settings_module_path) | def get_app_settings(parser, known_args) | Return **settings** module of the application according to
either command line argument or **SHELTER_SETTINGS_MODULE**
environment variable. | 3.980183 | 3.106332 | 1.281313 |
app_commands = getattr(settings, 'MANAGEMENT_COMMANDS', ())
commands = {}
for name in itertools.chain(SHELTER_MANAGEMENT_COMMANDS, app_commands):
command_obj = import_object(name)
if not issubclass(command_obj, BaseCommand):
raise ValueError("'%s' is not subclass of the BaseCommand" % name)
commands[command_obj.name] = command_obj
return commands | def get_management_commands(settings) | Find registered managemend commands and return their classes
as a :class:`dict`. Keys are names of the management command
and values are classes of the management command. | 3.104902 | 3.133584 | 0.990847 |
config_cls_name = getattr(settings, 'CONFIG_CLASS', '')
if config_cls_name:
config_cls = import_object(config_cls_name)
else:
config_cls = Config
return config_cls | def get_config_class(settings) | According to **settings.CONFIG_CLASS** return either config class
defined by user or default :class:`shelter.core.config.Config`. | 2.394411 | 2.389265 | 1.002154 |
# Base command line parser. Help is not allowed because command
# line is parsed in two stages - in the first stage is found setting
# module of the application, in the second stage are found management
# command's arguments.
parser = ArgumentParser(add_help=False)
parser.add_argument(
'-s', '--settings',
dest='settings', action='store', type=str, default=None,
help=_('application settings module')
)
# Get settings module
try:
settings = get_app_settings(parser, args)
except ImportError as exc:
parser.error(_("Invalid application settings module: {}").format(exc))
# Get management commands and add their arguments into command
# line parser
commands = get_management_commands(settings)
subparsers = parser.add_subparsers(
dest='action', help=_('specify action')
)
for command_cls in six.itervalues(commands):
subparser = subparsers.add_parser(
command_cls.name, help=command_cls.help)
for command_args, kwargs in command_cls.arguments:
subparser.add_argument(*command_args, **kwargs)
# Get config class and add its arguments into command line parser
if settings:
config_cls = get_config_class(settings)
if not issubclass(config_cls, Config):
raise TypeError(
"Config class must be subclass of the "
"shelter.core.config.Config")
for config_args, kwargs in config_cls.arguments:
parser.add_argument(*config_args, **kwargs)
else:
config_cls = Config
# Add help argument and parse command line
parser.add_argument(
'-h', '--help', action='help',
help=_('show this help message and exit')
)
cmdline_args = parser.parse_args(args)
if not cmdline_args.action:
parser.error(_('No action'))
# Run management command
command_cls = commands[cmdline_args.action]
if command_cls.settings_required and not settings:
parser.error(_(
"Settings module is not defined. You must either set "
"'SHELTER_SETTINGS_MODULE' environment variable or "
"'-s/--settings' command line argument."
))
try:
config = config_cls(settings, cmdline_args)
except ImproperlyConfiguredError as exc:
parser.error(str(exc))
command = command_cls(config)
try:
command()
except Exception:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
if multiprocessing.active_children():
# If main process has children processes, exit immediately without
# cleaning. It is a workaround, because parent process waits for
# non-daemon children.
os._exit(1)
sys.exit(1)
sys.exit(0) | def main(args=None) | Run management command handled from command line. | 3.062242 | 2.997752 | 1.021513 |
new_vec = self.copy()
new_vec.length = value
return new_vec | def as_length(self, value) | Return a new vector scaled to given length | 4.976356 | 3.309248 | 1.503772 |
new_vec = self.copy()
new_vec.length = value * self.length
return new_vec | def as_percent(self, value) | Return a new vector scaled by given decimal percent | 6.181348 | 3.658236 | 1.689707 |
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a vector')
return np.dot(self, vec) | def dot(self, vec) | Dot product with another vector | 4.02989 | 3.758269 | 1.072273 |
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return self.__class__(np.cross(self, vec)) | def cross(self, vec) | Cross product with another vector | 3.872038 | 3.908896 | 0.990571 |
if not isinstance(vec, self.__class__):
raise TypeError('Angle operand must be of class {}'
.format(self.__class__.__name__))
if unit not in ['deg', 'rad']:
raise ValueError('Only units of rad or deg are supported')
denom = self.length * vec.length
if denom == 0:
raise ZeroDivisionError('Cannot calculate angle between '
'zero-length vector(s)')
ang = np.arccos(self.dot(vec) / denom)
if unit == 'deg':
ang = ang * 180 / np.pi
return ang | def angle(self, vec, unit='rad') | Calculate the angle between two Vectors
unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad' | 2.695034 | 2.608196 | 1.033294 |
return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z) | def phi(self) | Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi) | 2.881401 | 2.667369 | 1.080241 |
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return Vector3(0, 0, np.asscalar(np.cross(self, vec))) | def cross(self, vec) | Cross product with another vector | 4.467375 | 4.416502 | 1.011519 |
return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray) | def length(self) | Array of vector lengths | 6.034784 | 4.601972 | 1.311348 |
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a VectorArray')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Dot product operands must have the same '
'number of elements.')
return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1) | def dot(self, vec) | Dot product with another vector | 3.625724 | 3.64265 | 0.995353 |
if not isinstance(vec, Vector3Array):
raise TypeError('Cross product operand must be a Vector3Array')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Cross product operands must have the same '
'number of elements.')
return Vector3Array(np.cross(self, vec)) | def cross(self, vec) | Cross product with another Vector3Array | 3.161865 | 2.839528 | 1.113518 |
# Number of days that the data was collected through
count = len(gains) + len(losses)
avg_gains = stats.avg(gains, count=count) if gains else 1
avg_losses = stats.avg(losses,count=count) if losses else 1
if avg_losses == 0:
return avg_gains
else:
return avg_gains / avg_losses | def eval_rs(gains, losses) | Evaluates the RS variable in RSI algorithm
Args:
gains: List of price gains.
losses: List of prices losses.
Returns:
Float of average gains over average losses. | 4.187425 | 4.201953 | 0.996542 |
changes = poloniex.get_gains_losses(poloniex.parse_changes(json))
return RSI.eval_algorithm(changes['gains'], changes['losses']) | def eval_from_json(json) | Evaluates RSI from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float between 0 and 100, momentum indicator
of a market measuring the speed and change of price movements. | 14.037888 | 10.560596 | 1.32927 |
if self._sigusr1_handler_func is not None:
self._sigusr1_handler_func(self.context) | def sigusr1_handler(self, unused_signum, unused_frame) | Handle SIGUSR1 signal. Call function which is defined in the
**settings.SIGUSR1_HANDLER**. | 4.138304 | 3.454159 | 1.198064 |
if self._sigusr1_handler_func is not None:
self._sigusr2_handler_func(self.context) | def sigusr2_handler(self, unused_signum, unused_frame) | Handle SIGUSR2 signal. Call function which is defined in the
**settings.SIGUSR2_HANDLER**. | 5.168013 | 4.815669 | 1.073166 |
log.info('loading reference package')
pkg = refpkg.Refpkg(args.refpkg, create=False)
with open(pkg.file_abspath('seq_info'), 'rU') as seq_info:
seqinfo = list(csv.DictReader(seq_info))
snames = [row['seqname'] for row in seqinfo]
if args.seq_names:
print('\n'.join(snames))
elif args.tally:
tally_taxa(pkg)
elif args.lengths:
print_lengths(pkg)
else:
print('number of sequences:', len(snames))
print('package components\n', '\n'.join(sorted(pkg.file_keys()))) | def action(args) | Show information about reference packages. | 5.68461 | 5.31677 | 1.069185 |
start = json[0]['date']
end = json[-1]['date']
diff = end - start
# Get period by a ratio from calculated period to valid periods
# Ratio closest to 1 is the period
# Valid values: 300, 900, 1800, 7200, 14400, 86400
periods = [300, 900, 1800, 7200, 14400, 86400]
diffs = {}
for p in periods:
diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio
period = min(diffs, key=diffs.get) # Find closest period
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
return url | def json_to_url(json, symbol) | Converts a JSON to a URL by the Poloniex API
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
symbol: String of currency pair, like a ticker symbol.
Returns:
String URL to Poloniex API representing the given JSON. | 2.968805 | 2.842385 | 1.044477 |
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
logger.debug(' HTTP Request URL:\n{0}'.format(url))
json = requests.get(url).json()
logger.debug(' JSON:\n{0}'.format(json))
if 'error' in json:
logger.error(' Invalid parameters in URL for HTTP response')
raise SystemExit
elif all(val == 0 for val in json[0]):
logger.error(' Bad HTTP response. Time unit too short?')
raise SystemExit
elif len(json) < 1: # time to short
logger.error(' Not enough dates to calculate changes')
raise SystemExit
return json, url | def chart_json(start, end, period, symbol) | Requests chart data from Poloniex API
Args:
start: Int epoch date to START getting market stats from.
Note that this epoch is FURTHER from the current date.
end: Int epoch date to STOP getting market stats from.
Note that this epoch is CLOSER to the current date.
period: Int defining width of each chart candlestick in seconds.
Valid values: 300, 900, 1800, 7200, 14400, 86400
symbol: String of currency pair, like a ticker symbol.
Returns:
Tuple of (JSON data, URL to JSON).
JSON data as a list of dict dates, where the keys are
the raw market statistics.
String URL to Poloniex API representing the given JSON. | 3.996554 | 4.12904 | 0.967914 |
changes = []
dates = len(json)
for date in range(1, dates):
last_close = json[date - 1]['close']
now_close = json[date]['close']
changes.append(now_close - last_close)
logger.debug('Market Changes (from JSON):\n{0}'.format(changes))
return changes | def parse_changes(json) | Gets price changes from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
List of floats of price changes between entries in JSON. | 4.519928 | 4.076474 | 1.108784 |
res = {'gains': [], 'losses': []}
for change in changes:
if change > 0:
res['gains'].append(change)
else:
res['losses'].append(change * -1)
logger.debug('Gains: {0}'.format(res['gains']))
logger.debug('Losses: {0}'.format(res['losses']))
return res | def get_gains_losses(changes) | Categorizes changes into gains and losses
Args:
changes: List of floats of price changes between entries in JSON.
Returns:
Dict of changes with keys 'gains' and 'losses'.
All values are positive. | 1.951551 | 1.991947 | 0.97972 |
res = [json[entry][attr] for entry, _ in enumerate(json)]
logger.debug('{0}s (from JSON):\n{1}'.format(attr, res))
return res | def get_attribute(json, attr) | Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON | 7.023429 | 8.67731 | 0.809402 |
epochs = date.get_end_start_epochs(year, month, day, 'last', unit, count)
return chart_json(epochs['shifted'], epochs['initial'],
period, symbol)[0] | def get_json_shift(year, month, day, unit, count, period, symbol) | Gets JSON from shifted date by the Poloniex API
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'
count: Int of units.
How far back to check historical market data.
period: Int defining width of each chart candlestick in seconds.
symbol: String of currency pair, like a ticker symbol.
Returns: JSON, list of dates where each entry is a dict of raw market data. | 13.75563 | 16.293814 | 0.844224 |
for _, group in itertools.groupby(results, operator.itemgetter(0)):
yield next(group) | def filter_ranks(results) | Find just the first rank for all the results for a given tax_id. | 5.4416 | 5.171954 | 1.052136 |
if high - low == 0: # High and low are the same, zero division error
return 100 * (closing - low)
else:
return 100 * (closing - low) / (high - low) | def eval_algorithm(closing, low, high) | Evaluates the SO algorithm
Args:
closing: Float of current closing price.
low: Float of lowest low closing price throughout some duration.
high: Float of highest high closing price throughout some duration.
Returns:
Float SO between 0 and 100. | 3.936434 | 4.246295 | 0.927028 |
close = json[-1]['close'] # Latest closing price
low = min(poloniex.get_attribute(json, 'low')) # Lowest low
high = max(poloniex.get_attribute(json, 'high')) # Highest high
return SO.eval_algorithm(close, low, high) | def eval_from_json(json) | Evaluates SO from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float SO between 0 and 100. | 6.660717 | 5.456916 | 1.220601 |
sum = 0
for v in vals:
sum += v
if count is None:
count = len(vals)
return float(sum) / count | def avg(vals, count=None) | Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count. | 2.472703 | 3.209526 | 0.770426 |
if schema is None:
base = declarative_base()
else:
try:
engine.execute(sqlalchemy.schema.CreateSchema(schema))
except sqlalchemy.exc.ProgrammingError as err:
logging.warn(err)
base = declarative_base(metadata=MetaData(schema=schema))
define_schema(base)
if clobber:
logging.info('Clobbering database tables')
base.metadata.drop_all(bind=engine)
logging.info('Creating database tables')
base.metadata.create_all(bind=engine)
return base | def db_connect(engine, schema=None, clobber=False) | Create a connection object to a database. Attempt to establish a
schema. If there are existing tables, delete them if clobber is
True and return otherwise. Returns a sqlalchemy engine object. | 2.364954 | 2.53165 | 0.934155 |
ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id']
extra_keys = ['source_id', 'is_valid']
is_valid = True
ncbi_cols = len(ncbi_keys)
rank = ncbi_keys.index('rank')
parent_id = ncbi_keys.index('parent_id')
# assumes the first row is the root
row = next(rows)
row[rank] = 'root'
# parent must be None for termination of recursive CTE for
# calculating lineages
row[parent_id] = None
rows = itertools.chain([row], rows)
yield ncbi_keys + extra_keys
for row in rows:
# replace whitespace in "rank" with underscore
row[rank] = '_'.join(row[rank].split())
# provide default values for source_id and is_valid
yield row[:ncbi_cols] + [source_id, is_valid] | def read_nodes(rows, source_id=1) | Return an iterator of rows ready to insert into table "nodes".
* rows - iterator of lists (eg, output from read_archive or read_dmp) | 4.749639 | 4.57925 | 1.037209 |
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 | def read_names(rows, source_id=1) | Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...) | 3.922478 | 3.462138 | 1.132964 |
dest_dir = os.path.abspath(dest_dir)
try:
os.mkdir(dest_dir)
except OSError:
pass
fout = os.path.join(dest_dir, os.path.split(url)[-1])
if os.access(fout, os.F_OK) and not clobber:
downloaded = False
logging.info(fout + ' exists; not downloading')
else:
downloaded = True
logging.info('downloading {} to {}'.format(url, fout))
request.urlretrieve(url, fout)
return (fout, downloaded) | def fetch_data(dest_dir='.', clobber=False, url=DATA_URL) | Download data from NCBI required to generate local taxonomy
database. Default url is ncbi.DATA_URL
* dest_dir - directory in which to save output files (created if necessary).
* clobber - don't download if False and target of url exists in dest_dir
* url - url to archive; default is ncbi.DATA_URL
Returns (fname, downloaded), where fname is the name of the
downloaded zip archive, and downloaded is True if a new files was
downloaded, false otherwise.
see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt | 2.234592 | 2.153458 | 1.037676 |
# Note that deduplication here is equivalent to an upsert/ignore,
# but avoids requirement for a database-specific implementation.
zfile = zipfile.ZipFile(archive)
contents = zfile.open(fname, 'r')
fobj = io.TextIOWrapper(contents)
seen = set()
for line in fobj:
line = line.rstrip('\t|\n')
if line not in seen:
yield line.split('\t|\t')
seen.add(line) | def read_archive(archive, fname) | Return an iterator of unique rows from a zip archive.
* archive - path to the zip archive.
* fname - name of the compressed file within the archive. | 5.798751 | 5.328074 | 1.088339 |
return '.'.join([self.schema, name]) if self.schema else name | def prepend_schema(self, name) | Prepend schema name to 'name' when a schema is specified | 6.377383 | 6.130764 | 1.040226 |
conn = self.engine.raw_connection()
cur = conn.cursor()
colnames = colnames or next(rows)
cmd = 'INSERT INTO {table} ({colnames}) VALUES ({placeholders})'.format(
table=self.tables[table],
colnames=', '.join(colnames),
placeholders=', '.join([self.placeholder] * len(colnames)))
cur.executemany(cmd, itertools.islice(rows, limit))
conn.commit() | def load_table(self, table, rows, colnames=None, limit=None) | Load 'rows' into table 'table'. If 'colnames' is not provided, the
first element of 'rows' must provide column names. | 2.515399 | 2.448653 | 1.027258 |
# source
self.load_table(
'source',
rows=[('ncbi', DATA_URL)],
colnames=['name', 'description'],
)
conn = self.engine.raw_connection()
cur = conn.cursor()
cmd = "select id from {source} where name = 'ncbi'".format(**self.tables)
cur.execute(cmd)
source_id = cur.fetchone()[0]
# ranks
log.info('loading ranks')
self.load_table(
'ranks',
rows=((rank, i) for i, rank in enumerate(RANKS)),
colnames=['rank', 'height'],
)
# nodes
logging.info('loading nodes')
nodes_rows = read_nodes(
read_archive(archive, 'nodes.dmp'), source_id=source_id)
self.load_table('nodes', rows=nodes_rows)
# names
logging.info('loading names')
names_rows = read_names(
read_archive(archive, 'names.dmp'), source_id=source_id)
self.load_table('names', rows=names_rows)
# merged
logging.info('loading merged')
merged_rows = read_merged(read_archive(archive, 'merged.dmp'))
self.load_table('merged', rows=merged_rows) | def load_archive(self, archive) | Load data from the zip archive of the NCBI taxonomy. | 2.709187 | 2.547845 | 1.063325 |
return Delorean(datetime=dt(year, month, day), timezone='UTC') | def date_to_delorean(year, month, day) | Converts date arguments to a Delorean instance in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Delorean instance in UTC of date. | 5.4263 | 9.213726 | 0.588937 |
return int(date_to_delorean(year, month, day).epoch) | def date_to_epoch(year, month, day) | Converts a date to epoch in UTC
Args:
year: int between 1 and 9999.
month: int between 1 and 12.
day: int between 1 and 31.
Returns:
Int epoch in UTC from date. | 6.648227 | 14.236984 | 0.466969 |
return int(delorean._shift_date(direction, unit, count).epoch) | def shift_epoch(delorean, direction, unit, count) | Gets the resulting epoch after a time shift of a Delorean
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Int epoch in UTC from a shifted Delorean | 5.665673 | 11.941191 | 0.474465 |
for shift in range(count):
yield int(delorean._shift_date(direction, unit, shift).epoch) | def generate_epochs(delorean, direction, unit, count) | Generates epochs from a shifted Delorean instance
Args:
delorean: Delorean datetime instance to shift from.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
What unit in direction should be shifted?
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How many units to shift in direction?
Returns:
Generator of count int epochs in UTC from a shifted Delorean | 6.055884 | 7.610402 | 0.795738 |
if year or month or day: # Date is specified
if not year:
year = 2017
if not month:
month = 1
if not day:
day = 1
initial_delorean = date_to_delorean(year, month, day)
else: # Date is not specified, get current date
count += 1 # Get another date because market is still open
initial_delorean = now_delorean()
initial_epoch = int(initial_delorean.epoch)
shifted_epoch = shift_epoch(initial_delorean, direction, unit, count)
return { 'initial': initial_epoch, 'shifted': shifted_epoch } | def get_end_start_epochs(year, month, day, direction, unit, count) | Gets epoch from a start date and epoch from a shifted date
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How far back to check historical market data?
Returns:
Dict of int epochs in UTC with keys 'initial' and 'shifted' | 3.665261 | 3.637011 | 1.007767 |
assert child != self
child.parent = self
child.ranks = self.ranks
child.index = self.index
assert child.tax_id not in self.index
self.index[child.tax_id] = child
self.children.add(child) | def add_child(self, child) | Add a child to this node. | 3.955246 | 3.689059 | 1.072156 |
assert child in self.children
self.children.remove(child)
self.index.pop(child.tax_id)
if child.parent is self:
child.parent = None
if child.index is self.index:
child.index = None
# Remove child subtree from index
for n in child:
if n is child:
continue
self.index.pop(n.tax_id)
if n.index is self.index:
n.index = None | def remove_child(self, child) | Remove a child from this node. | 3.029916 | 2.857254 | 1.060429 |
if self.is_root:
raise ValueError("Cannot drop root node!")
parent = self.parent
for child in self.children:
child.parent = parent
parent.children.add(child)
self.children = set()
parent.sequence_ids.update(self.sequence_ids)
self.sequence_ids = set()
parent.remove_child(self) | def drop(self) | Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node. | 3.291667 | 2.515982 | 1.308303 |
for node in self.depth_first_iter(self_first=False):
if (not node.children and
not node.sequence_ids and
node is not self):
node.parent.remove_child(node) | def prune_unrepresented(self) | Remove nodes without sequences or children below this node. | 6.842136 | 4.801699 | 1.424941 |
s = self
while s:
if s.rank == rank:
return s
s = s.parent
raise ValueError("No node at rank {0} for {1}".format(
rank, self.tax_id)) | def at_rank(self, rank) | Find the node above this node at rank ``rank`` | 4.527223 | 4.069249 | 1.112545 |
if self_first:
yield self
for child in list(self.children):
for i in child.depth_first_iter(self_first):
yield i
if not self_first:
yield self | def depth_first_iter(self, self_first=True) | Iterate over nodes below this node, optionally yielding children before
self. | 2.25521 | 2.085345 | 1.081457 |
assert tax_ids[0] == self.tax_id
if len(tax_ids) == 1:
return self
n = tax_ids[1]
try:
child = next(i for i in self.children if i.tax_id == n)
except StopIteration:
raise ValueError(n)
return child.path(tax_ids[1:]) | def path(self, tax_ids) | Get the node at the end of the path described by tax_ids. | 2.764449 | 2.496443 | 1.107355 |
if not self.parent:
return [self]
else:
L = self.parent.lineage()
L.append(self)
return L | def lineage(self) | Return all nodes between this node and the root, including this one. | 3.447517 | 2.427375 | 1.420266 |
ranks_represented = frozenset(i.rank for i in self) | \
frozenset(i.rank for i in self.lineage())
ranks = [i for i in self.ranks if i in ranks_represented]
assert len(ranks_represented) == len(ranks)
def node_record(node):
parent_id = node.parent.tax_id if node.parent else node.tax_id
d = {'tax_id': node.tax_id,
'tax_name': node.name,
'parent_id': parent_id,
'rank': node.rank}
L = {i.rank: i.tax_id for i in node.lineage()}
d.update(L)
return d
header = ['tax_id', 'parent_id', 'rank', 'tax_name'] + ranks
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n')
w.writeheader()
# All nodes leading to this one
for i in self.lineage()[:-1]:
w.writerow(node_record(i))
w.writerows(node_record(i) for i in self) | def write_taxtable(self, out_fp, **kwargs) | Write a taxtable for this node and all descendants,
including the lineage leading to this node. | 2.888235 | 2.667843 | 1.082611 |
for row in csv.DictReader(seqinfo):
node = self.index.get(row['tax_id'])
if node:
node.sequence_ids.add(row['seqname']) | def populate_from_seqinfo(self, seqinfo) | Populate sequence_ids below this node from a seqinfo file object. | 6.068035 | 4.272031 | 1.42041 |
descendants = iter(self)
# Skip this node
assert next(descendants) is self
for descendant in descendants:
self.sequence_ids.update(descendant.sequence_ids)
descendant.sequence_ids.clear()
if remove:
for node in self.children:
self.remove_child(node) | def collapse(self, remove=False) | Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy. | 4.326859 | 3.844297 | 1.125527 |
header = ['seqname', 'tax_id']
if include_name:
header.append('tax_name')
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n', extrasaction='ignore')
w.writeheader()
rows = ({'seqname': seq_id,
'tax_id': node.tax_id,
'tax_name': node.name}
for node in self
for seq_id in node.sequence_ids)
w.writerows(rows) | def write_seqinfo(self, out_fp, include_name=True) | Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included. | 2.783122 | 2.470942 | 1.126341 |
r = csv.reader(taxtable_fp)
headers = next(r)
rows = (collections.OrderedDict(list(zip(headers, i))) for i in r)
row = next(rows)
root = cls(rank=row['rank'], tax_id=row[
'tax_id'], name=row['tax_name'])
path_root = headers.index('root')
root.ranks = headers[path_root:]
for row in rows:
rank, tax_id, name = [
row[i] for i in ('rank', 'tax_id', 'tax_name')]
path = [_f for _f in list(row.values())[path_root:] if _f]
parent = root.path(path[:-1])
parent.add_child(cls(rank, tax_id, name=name))
return root | def from_taxtable(cls, taxtable_fp) | Generate a node from an open handle to a taxtable, as generated by
``taxit taxtable`` | 3.561902 | 3.571021 | 0.997447 |
cursor = con.cursor()
if root is None:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id")
else:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = ?", [root])
tax_id, rank = cursor.fetchone()
root = cls(rank=rank, tax_id=tax_id)
def add_lineage(parent):
cursor.execute(, [parent.tax_id])
for tax_id, rank, name in cursor:
node = cls(rank=rank, tax_id=tax_id, name=name)
parent.add_child(node)
for child in parent.children:
add_lineage(child)
add_lineage(root)
return root | def from_taxdb(cls, con, root=None) | Generate a TaxNode from a taxonomy database | 2.278156 | 2.201107 | 1.035005 |
parts = host.split(':')
address = ':'.join(parts[:-1])
try:
port = int(parts[-1])
except ValueError:
port = None
if not port or port < 1 or port > 65535:
raise ValueError("Invalid port number '%s'" % port)
return address, port | def parse_host(host) | Parse *host* in format ``"[hostname:]port"`` and return :class:`tuple`
``(address, port)``.
>>> parse_host('localhost:4444')
('localhost', 4444)
>>> parse_host(':4444')
('', 4444)
>>> parse_host('4444')
('', 4444)
>>> parse_host('2001:db8::1428:57ab:4444')
('2001:db8::1428:57ab', 4444)
>>> parse_host('localhost')
ValueError: Invalid port number 'localhost' | 2.2916 | 2.344236 | 0.977547 |
if not parent_ids:
return []
nodes = schema + '.nodes' if schema else 'nodes'
names = schema + '.names' if schema else 'names'
cmd = ('select tax_id, tax_name, rank '
'from {} join {} using (tax_id) '
'where parent_id = :tax_id and is_primary').format(nodes, names)
species = []
for parent_id in parent_ids:
result = engine.execute(sqlalchemy.sql.text(cmd), tax_id=parent_id)
keys = list(result.keys())
rows = [dict(list(zip(keys, row))) for row in result.fetchall()]
for r in rows:
if r['rank'] == rank and 'sp.' not in r['tax_name']:
species.append(r)
others = [r for r in rows if r['rank'] not in (rank, 'no_rank')]
if others:
_, s = get_children(engine, [r['tax_id'] for r in others])
species.extend(s)
return keys, species | def get_children(engine, parent_ids, rank='species', schema=None) | Recursively fetch children of tax_ids in `parent_ids` until the
rank of `rank` | 3.075901 | 3.069138 | 1.002204 |
return (
('settings', self.settings),
('context_class', self.context_class),
('interfaces', self.interfaces),
('logging', self.logging),
('name', self.name),
('init_handler', self.init_handler),
('sigusr1_handler', self.sigusr1_handler),
('sigusr2_handler', self.sigusr2_handler),
) | def get_config_items(self) | Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2)) | 3.033105 | 3.064859 | 0.989639 |
if 'context_class' not in self._cached_values:
context_cls_name = getattr(self.settings, 'CONTEXT_CLASS', '')
if context_cls_name:
context_class = import_object(context_cls_name)
else:
context_class = Context
self._cached_values['context_class'] = context_class
return self._cached_values['context_class'] | def context_class(self) | Context as a :class:`shelter.core.context.Context` class or subclass. | 2.372105 | 2.171319 | 1.092472 |
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
listen = interface.get('LISTEN')
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
processes = int(interface.get('PROCESSES', 1))
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] | def interfaces(self) | Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances. | 3.224209 | 3.106827 | 1.037782 |
log.info('loading reference package')
pairs = [p.split('=', 1) for p in args.changes]
if args.metadata:
rp = refpkg.Refpkg(args.refpkg, create=False)
rp.start_transaction()
for key, value in pairs:
rp.update_metadata(key, value)
rp.commit_transaction('Updated metadata: ' +
', '.join(['%s=%s' % (a, b)
for a, b in pairs]))
else:
for key, filename in pairs:
if not(os.path.exists(filename)):
print("No such file: %s" % filename)
exit(1)
rp = refpkg.Refpkg(args.refpkg, create=False)
rp.start_transaction()
for key, filename in pairs:
if key == 'tree_stats':
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", refpkg.DerivedFileNotUpdatedWarning)
rp.update_file(key, os.path.abspath(filename))
# Trigger model update
log.info('Updating phylo_model to match tree_stats')
rp.update_phylo_model(args.stats_type, filename,
args.frequency_type)
else:
rp.update_file(key, os.path.abspath(filename))
rp.commit_transaction('Updates files: ' +
', '.join(['%s=%s' % (a, b)
for a, b in pairs]))
return 0 | def action(args) | Updates a Refpkg with new files.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and changes (a series of strings
of the form 'key=file' giving the key to update in the refpkg and
the file to store under that key)." | 3.486638 | 3.026599 | 1.151999 |
parts = name.split('.')
if len(parts) < 2:
raise ValueError("Invalid name '%s'" % name)
module_name = ".".join(parts[:-1])
obj_name = parts[-1]
module = importlib.import_module(module_name)
return getattr(module, obj_name) | def import_object(name) | Import module and return object from it. *name* is :class:`str` in
format ``module.path.ObjectClass``.
::
>>> import_command('module.path.ObjectClass')
<class 'module.path.ObjectClass'> | 1.779508 | 2.170672 | 0.819796 |
parser = argparse.ArgumentParser(description=DESCRIPTION)
base_parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-V', '--version', action='version',
version='taxit v' + version,
help='Print the version number and exit')
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=1,
help='Increase verbosity of screen output (eg, -v is verbose, '
'-vv more so)')
parser.add_argument('-q', '--quiet',
action='store_const', dest='verbosity', const=0,
help='Suppress output')
##########################
# Setup all sub-commands #
##########################
subparsers = parser.add_subparsers(dest='subparser_name')
# Begin help sub-command
parser_help = subparsers.add_parser(
'help', help='Detailed help for actions using `help <action>`')
parser_help.add_argument('action', nargs=1)
# End help sub-command
actions = {}
for name, mod in subcommands.itermodules(
os.path.split(subcommands.__file__)[0]):
# set up subcommand help text. The first line of the dosctring
# in the module is displayed as the help text in the
# script-level help message (`script -h`). The entire
# docstring is displayed in the help message for the
# individual subcommand ((`script action -h`)).
subparser = subparsers.add_parser(
name,
prog='taxit {}'.format(name),
help=mod.__doc__.lstrip().split('\n', 1)[0],
description=mod.__doc__,
formatter_class=RawDescriptionHelpFormatter,
parents=[base_parser])
mod.build_parser(subparser)
actions[name] = mod.action
# Determine we have called ourself (e.g. "help <action>")
# Set arguments to display help if parameter is set
# *or*
# Set arguments to perform an action with any specified options.
arguments = parser.parse_args(argv)
# Determine which action is in play.
action = arguments.subparser_name
# Support help <action> by simply having this function call itself and
# translate the arguments into something that argparse can work with.
if action == 'help':
return parse_arguments([str(arguments.action[0]), '-h'])
return actions[action], arguments | def parse_arguments(argv) | Create the argument parser | 4.714449 | 4.687517 | 1.005746 |
Session = sessionmaker(bind=self.engine)
session = Session()
try:
for statement in statements:
session.execute(statement)
except exc as err:
session.rollback()
raise rasie_as(str(err))
else:
session.commit()
finally:
session.close() | def execute(self, statements, exc=IntegrityError, rasie_as=ValueError) | Execute ``statements`` in a session, and perform a rollback on
error. ``exc`` is a single exception object or a tuple of
objects to be used in the except clause. The error message is
re-raised as the exception specified by ``raise_as``. | 1.890742 | 1.961812 | 0.963773 |
s = select([self.nodes.c.parent_id, self.nodes.c.rank],
self.nodes.c.tax_id == tax_id)
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in nodes.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output | def _node(self, tax_id) | Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled | 3.216457 | 2.846644 | 1.129912 |
s = select([self.names.c.tax_name],
and_(self.names.c.tax_id == tax_id,
self.names.c.is_primary))
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in names.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output[0] | def primary_from_id(self, tax_id) | Returns primary taxonomic name associated with tax_id | 3.180064 | 2.965457 | 1.072369 |
names = self.names
s1 = select([names.c.tax_id, names.c.is_primary],
names.c.tax_name == tax_name)
log.debug(str(s1))
res = s1.execute().fetchone()
if res:
tax_id, is_primary = res
else:
msg = '"{}" not found in names.tax_names'.format(tax_name)
raise ValueError(msg)
if not is_primary:
s2 = select([names.c.tax_name],
and_(names.c.tax_id == tax_id,
names.c.is_primary))
tax_name = s2.execute().fetchone()[0]
return tax_id, tax_name, bool(is_primary) | def primary_from_name(self, tax_name) | Return tax_id and primary tax_name corresponding to tax_name. | 2.515228 | 2.449901 | 1.026665 |
cmd = .format(x=self.placeholder, merged=self.merged)
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id, tax_id))
return result.fetchone()[0] | def _get_merged(self, tax_id) | Returns tax_id into which `tax_id` has been merged or `tax_id` of
not obsolete. | 6.539242 | 5.835496 | 1.120597 |
# Be sure we aren't working with an obsolete tax_id
if merge_obsolete:
tax_id = self._get_merged(tax_id)
# Note: joining with ranks seems like a no-op, but for some
# reason it results in a faster query using sqlite, as well as
# an ordering from leaf --> root. Might be a better idea to
# sort explicitly if this is the expected behavior, but it
# seems like for the most part, the lineage is converted to a
# dict and the order is irrelevant.
cmd = .format(self.placeholder, nodes=self.nodes, ranks=self.ranks_table)
# with some versions of sqlite3, an error is raised when no
# rows are returned; with others, an empty list is returned.
try:
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id,))
# reorder so that root is first
lineage = result.fetchall()[::-1]
except sqlalchemy.exc.ResourceClosedError:
lineage = []
if not lineage:
raise ValueError('tax id "{}" not found'.format(tax_id))
return lineage | def _get_lineage(self, tax_id, merge_obsolete=True) | Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged. | 6.958379 | 6.742452 | 1.032025 |
try:
with self.engine.connect() as con:
# insert tax_ids into a temporary table
temptab = self.prepend_schema(random_name(12))
cmd = 'CREATE TEMPORARY TABLE "{tab}" (old_tax_id text)'.format(
tab=temptab)
con.execute(cmd)
log.info('inserting tax_ids into temporary table')
# TODO: couldn't find an equivalent of "executemany" - does one exist?
cmd = 'INSERT INTO "{tab}" VALUES ({x})'.format(
tab=temptab, x=self.placeholder)
for tax_id in tax_ids:
con.execute(cmd, tax_id)
log.info('executing recursive CTE')
cmd = Template().render(
temptab=temptab,
merge_obsolete=merge_obsolete,
merged=self.merged,
nodes=self.nodes,
names=self.names,
)
result = con.execute(cmd)
rows = result.fetchall()
con.execute('DROP TABLE "{}"'.format(temptab))
log.info('returning lineages')
if not rows:
raise ValueError('no tax_ids were found')
else:
returned = {row[0] for row in rows}
# TODO: compare set membership, not lengths
if len(returned) < len(tax_ids):
msg = ('{} tax_ids were provided '
'but only {} were returned').format(
len(tax_ids), len(returned))
log.error('Input tax_ids not represented in output:')
log.error(sorted(set(tax_ids) - returned))
raise ValueError(msg)
return rows
except sqlalchemy.exc.ResourceClosedError:
raise ValueError('tax id "{}" not found'.format(tax_id)) | def _get_lineage_table(self, tax_ids, merge_obsolete=True) | Return a list of [(rank, tax_id, tax_name)] describing the lineage
of tax_id. If ``merge_obsolete`` is True and ``tax_id`` has
been replaced, use the corresponding value in table merged. | 3.925654 | 3.937801 | 0.996915 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.