sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def action(args):
"""Updates a Refpkg with new files.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and changes (a series of strings
of the form 'key=file' giving the key to update in the refpkg and
the file to store under that key)."
"""
log.info('loading reference package')
pairs = [p.split('=', 1) for p in args.changes]
if args.metadata:
rp = refpkg.Refpkg(args.refpkg, create=False)
rp.start_transaction()
for key, value in pairs:
rp.update_metadata(key, value)
rp.commit_transaction('Updated metadata: ' +
', '.join(['%s=%s' % (a, b)
for a, b in pairs]))
else:
for key, filename in pairs:
if not(os.path.exists(filename)):
print("No such file: %s" % filename)
exit(1)
rp = refpkg.Refpkg(args.refpkg, create=False)
rp.start_transaction()
for key, filename in pairs:
if key == 'tree_stats':
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", refpkg.DerivedFileNotUpdatedWarning)
rp.update_file(key, os.path.abspath(filename))
# Trigger model update
log.info('Updating phylo_model to match tree_stats')
rp.update_phylo_model(args.stats_type, filename,
args.frequency_type)
else:
rp.update_file(key, os.path.abspath(filename))
rp.commit_transaction('Updates files: ' +
', '.join(['%s=%s' % (a, b)
for a, b in pairs]))
return 0 | Updates a Refpkg with new files.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and changes (a series of strings
of the form 'key=file' giving the key to update in the refpkg and
the file to store under that key)." | entailment |
def import_object(name):
"""
Import module and return object from it. *name* is :class:`str` in
format ``module.path.ObjectClass``.
::
>>> import_command('module.path.ObjectClass')
<class 'module.path.ObjectClass'>
"""
parts = name.split('.')
if len(parts) < 2:
raise ValueError("Invalid name '%s'" % name)
module_name = ".".join(parts[:-1])
obj_name = parts[-1]
module = importlib.import_module(module_name)
return getattr(module, obj_name) | Import module and return object from it. *name* is :class:`str` in
format ``module.path.ObjectClass``.
::
>>> import_command('module.path.ObjectClass')
<class 'module.path.ObjectClass'> | entailment |
def parse_arguments(argv):
"""Create the argument parser
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
base_parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-V', '--version', action='version',
version='taxit v' + version,
help='Print the version number and exit')
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=1,
help='Increase verbosity of screen output (eg, -v is verbose, '
'-vv more so)')
parser.add_argument('-q', '--quiet',
action='store_const', dest='verbosity', const=0,
help='Suppress output')
##########################
# Setup all sub-commands #
##########################
subparsers = parser.add_subparsers(dest='subparser_name')
# Begin help sub-command
parser_help = subparsers.add_parser(
'help', help='Detailed help for actions using `help <action>`')
parser_help.add_argument('action', nargs=1)
# End help sub-command
actions = {}
for name, mod in subcommands.itermodules(
os.path.split(subcommands.__file__)[0]):
# set up subcommand help text. The first line of the dosctring
# in the module is displayed as the help text in the
# script-level help message (`script -h`). The entire
# docstring is displayed in the help message for the
# individual subcommand ((`script action -h`)).
subparser = subparsers.add_parser(
name,
prog='taxit {}'.format(name),
help=mod.__doc__.lstrip().split('\n', 1)[0],
description=mod.__doc__,
formatter_class=RawDescriptionHelpFormatter,
parents=[base_parser])
mod.build_parser(subparser)
actions[name] = mod.action
# Determine we have called ourself (e.g. "help <action>")
# Set arguments to display help if parameter is set
# *or*
# Set arguments to perform an action with any specified options.
arguments = parser.parse_args(argv)
# Determine which action is in play.
action = arguments.subparser_name
# Support help <action> by simply having this function call itself and
# translate the arguments into something that argparse can work with.
if action == 'help':
return parse_arguments([str(arguments.action[0]), '-h'])
return actions[action], arguments | Create the argument parser | entailment |
def execute(self, statements, exc=IntegrityError, rasie_as=ValueError):
"""Execute ``statements`` in a session, and perform a rollback on
error. ``exc`` is a single exception object or a tuple of
objects to be used in the except clause. The error message is
re-raised as the exception specified by ``raise_as``.
"""
Session = sessionmaker(bind=self.engine)
session = Session()
try:
for statement in statements:
session.execute(statement)
except exc as err:
session.rollback()
raise rasie_as(str(err))
else:
session.commit()
finally:
session.close() | Execute ``statements`` in a session, and perform a rollback on
error. ``exc`` is a single exception object or a tuple of
objects to be used in the except clause. The error message is
re-raised as the exception specified by ``raise_as``. | entailment |
def _node(self, tax_id):
"""
Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled
"""
s = select([self.nodes.c.parent_id, self.nodes.c.rank],
self.nodes.c.tax_id == tax_id)
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in nodes.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output | Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled | entailment |
def primary_from_id(self, tax_id):
"""
Returns primary taxonomic name associated with tax_id
"""
s = select([self.names.c.tax_name],
and_(self.names.c.tax_id == tax_id,
self.names.c.is_primary))
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in names.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output[0] | Returns primary taxonomic name associated with tax_id | entailment |
def primary_from_name(self, tax_name):
"""
Return tax_id and primary tax_name corresponding to tax_name.
"""
names = self.names
s1 = select([names.c.tax_id, names.c.is_primary],
names.c.tax_name == tax_name)
log.debug(str(s1))
res = s1.execute().fetchone()
if res:
tax_id, is_primary = res
else:
msg = '"{}" not found in names.tax_names'.format(tax_name)
raise ValueError(msg)
if not is_primary:
s2 = select([names.c.tax_name],
and_(names.c.tax_id == tax_id,
names.c.is_primary))
tax_name = s2.execute().fetchone()[0]
return tax_id, tax_name, bool(is_primary) | Return tax_id and primary tax_name corresponding to tax_name. | entailment |
def _get_merged(self, tax_id):
"""Returns tax_id into which `tax_id` has been merged or `tax_id` of
not obsolete.
"""
cmd = """
SELECT COALESCE(
(SELECT new_tax_id FROM {merged}
WHERE old_tax_id = {x}), {x})
""".format(x=self.placeholder, merged=self.merged)
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id, tax_id))
return result.fetchone()[0] | Returns tax_id into which `tax_id` has been merged or `tax_id` of
not obsolete. | entailment |
def _get_lineage(self, tax_id, merge_obsolete=True):
"""Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
"""
# Be sure we aren't working with an obsolete tax_id
if merge_obsolete:
tax_id = self._get_merged(tax_id)
# Note: joining with ranks seems like a no-op, but for some
# reason it results in a faster query using sqlite, as well as
# an ordering from leaf --> root. Might be a better idea to
# sort explicitly if this is the expected behavior, but it
# seems like for the most part, the lineage is converted to a
# dict and the order is irrelevant.
cmd = """
WITH RECURSIVE a AS (
SELECT tax_id, parent_id, rank
FROM {nodes}
WHERE tax_id = {}
UNION ALL
SELECT p.tax_id, p.parent_id, p.rank
FROM a JOIN {nodes} p ON a.parent_id = p.tax_id
)
SELECT a.rank, a.tax_id FROM a
JOIN {ranks} using(rank)
""".format(self.placeholder, nodes=self.nodes, ranks=self.ranks_table)
# with some versions of sqlite3, an error is raised when no
# rows are returned; with others, an empty list is returned.
try:
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id,))
# reorder so that root is first
lineage = result.fetchall()[::-1]
except sqlalchemy.exc.ResourceClosedError:
lineage = []
if not lineage:
raise ValueError('tax id "{}" not found'.format(tax_id))
return lineage | Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged. | entailment |
def _get_lineage_table(self, tax_ids, merge_obsolete=True):
"""Return a list of [(rank, tax_id, tax_name)] describing the lineage
of tax_id. If ``merge_obsolete`` is True and ``tax_id`` has
been replaced, use the corresponding value in table merged.
"""
try:
with self.engine.connect() as con:
# insert tax_ids into a temporary table
temptab = self.prepend_schema(random_name(12))
cmd = 'CREATE TEMPORARY TABLE "{tab}" (old_tax_id text)'.format(
tab=temptab)
con.execute(cmd)
log.info('inserting tax_ids into temporary table')
# TODO: couldn't find an equivalent of "executemany" - does one exist?
cmd = 'INSERT INTO "{tab}" VALUES ({x})'.format(
tab=temptab, x=self.placeholder)
for tax_id in tax_ids:
con.execute(cmd, tax_id)
log.info('executing recursive CTE')
cmd = Template("""
WITH RECURSIVE a AS (
SELECT tax_id as tid, 1 AS ord, tax_id, parent_id, rank
FROM {{ nodes }}
WHERE tax_id in (
{% if merge_obsolete %}
SELECT COALESCE(m.new_tax_id, "{{ temptab }}".old_tax_id)
FROM "{{ temptab }}" LEFT JOIN {{ merged }} m USING(old_tax_id)
{% else %}
SELECT * from "{{ temptab }}"
{% endif %}
)
UNION ALL
SELECT a.tid, a.ord + 1, p.tax_id, p.parent_id, p.rank
FROM a JOIN {{ nodes }} p ON a.parent_id = p.tax_id
)
SELECT a.tid, a.tax_id, a.parent_id, a.rank, tax_name FROM a
JOIN {{ names }} using(tax_id)
WHERE names.is_primary
ORDER BY tid, ord desc
""").render(
temptab=temptab,
merge_obsolete=merge_obsolete,
merged=self.merged,
nodes=self.nodes,
names=self.names,
)
result = con.execute(cmd)
rows = result.fetchall()
con.execute('DROP TABLE "{}"'.format(temptab))
log.info('returning lineages')
if not rows:
raise ValueError('no tax_ids were found')
else:
returned = {row[0] for row in rows}
# TODO: compare set membership, not lengths
if len(returned) < len(tax_ids):
msg = ('{} tax_ids were provided '
'but only {} were returned').format(
len(tax_ids), len(returned))
log.error('Input tax_ids not represented in output:')
log.error(sorted(set(tax_ids) - returned))
raise ValueError(msg)
return rows
except sqlalchemy.exc.ResourceClosedError:
raise ValueError('tax id "{}" not found'.format(tax_id)) | Return a list of [(rank, tax_id, tax_name)] describing the lineage
of tax_id. If ``merge_obsolete`` is True and ``tax_id`` has
been replaced, use the corresponding value in table merged. | entailment |
def lineage(self, tax_id=None, tax_name=None):
"""Public method for returning a lineage; includes tax_name and rank
"""
if not bool(tax_id) ^ bool(tax_name):
msg = 'Exactly one of tax_id and tax_name may be provided.'
raise ValueError(msg)
if tax_name:
tax_id, primary_name, is_primary = self.primary_from_name(tax_name)
else:
primary_name = None
# assumes stable ordering of lineage from root --> leaf
lintups = self._get_lineage(tax_id)
ldict = dict(lintups)
ldict['tax_id'] = tax_id
try:
# parent is second to last element, except for root
__, ldict['parent_id'] = lintups[-2]
except IndexError:
ldict['parent_id'] = None
ldict['rank'], __ = lintups[-1] # this taxon is last element in lineage
ldict['tax_name'] = primary_name or self.primary_from_id(tax_id)
return ldict | Public method for returning a lineage; includes tax_name and rank | entailment |
def add_source(self, source_name, description=None):
"""Adds a row to table "source" if "name" does not
exist. Returns (source_id, True) if a new row is created,
(source_id, False) otherwise.
"""
# TODO: shoud be able to do this inside a transaction
if not source_name:
raise ValueError('"source_name" may not be None or an empty string')
sel = select([self.source.c.id], self.source.c.name == source_name).execute()
result = sel.fetchone()
if result:
return result[0], False
else:
ins = self.source.insert().execute(
name=source_name, description=description)
return ins.inserted_primary_key[0], True | Adds a row to table "source" if "name" does not
exist. Returns (source_id, True) if a new row is created,
(source_id, False) otherwise. | entailment |
def get_source(self, source_id=None, source_name=None):
"""Returns a dict with keys ['id', 'name', 'description'] or None if
no match. The ``id`` field is guaranteed to be an int that
exists in table source. Requires exactly one of ``source_id``
or ``source_name``. A new source corresponding to
``source_name`` is created if necessary.
"""
if not (bool(source_id) ^ bool(source_name)):
raise ValueError('exactly one of source_id or source_name is required')
if source_id:
try:
source_id = int(source_id)
except (ValueError, TypeError):
raise ValueError(
'source_id must be an int or a string representing one')
sel = select([self.source], self.source.c.id == source_id).execute()
else:
sel = select([self.source], self.source.c.name == source_name).execute()
result = sel.fetchone()
if not result:
raise ValueError(
'there is no source with id {} or name {}'.format(
source_id, source_name))
return dict(list(zip(list(sel.keys()), result))) | Returns a dict with keys ['id', 'name', 'description'] or None if
no match. The ``id`` field is guaranteed to be an int that
exists in table source. Requires exactly one of ``source_id``
or ``source_name``. A new source corresponding to
``source_name`` is created if necessary. | entailment |
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
"""Confirm that for each node the parent ranks and children ranks are
coherent
"""
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True | Confirm that for each node the parent ranks and children ranks are
coherent | entailment |
def add_node(self, tax_id, parent_id, rank, names, source_name, children=None,
is_valid=True, execute=True, **ignored):
"""Add a node to the taxonomy.
``source_name`` is added to table "source" if necessary.
"""
if ignored:
log.info('some arguments were ignored: {} '.format(str(ignored)))
children = children or []
self.verify_rank_integrity(tax_id, rank, parent_id, children)
source_id, __ = self.add_source(source_name)
assert isinstance(is_valid, bool)
statements = []
# add node
statements.append(
self.nodes.insert().values(
tax_id=tax_id,
parent_id=parent_id,
rank=rank,
source_id=source_id))
# add names. Since this is a new node, at least one name must
# be provided; if only one is provided, it is the primary
# name. If more than one is primary, an error will be raised
# from add_names()
if len(names) == 1:
names[0]['is_primary'] = True
else:
primary_names = [n['tax_name'] for n in names if n.get('is_primary')]
if len(primary_names) != 1:
raise ValueError(
'`is_primary` must be True for exactly one name in `names`')
for namedict in names:
namedict['source_id'] = source_id
if 'source_name' in namedict:
del namedict['source_name']
statements.extend(self.add_names(tax_id, names, execute=False))
# add children and update source_id
for child in children:
statements.append(self.nodes.update(
whereclause=self.nodes.c.tax_id == child,
values={'parent_id': tax_id, 'source_id': source_id}))
if execute:
self.execute(statements)
else:
return statements | Add a node to the taxonomy.
``source_name`` is added to table "source" if necessary. | entailment |
def add_name(self, tax_id, tax_name, source_name=None, source_id=None,
name_class='synonym', is_primary=False, is_classified=None,
execute=True, **ignored):
"""Add a record to the names table corresponding to
``tax_id``. Arguments are as follows:
- tax_id (string, required)
- tax_name (string, required)
*one* of the following are required:
- source_id (int or string coercable to int)
- source_name (string)
``source_id`` or ``source_name`` must identify an existing
record in table "source".
The following are optional:
- name_class (string, default 'synonym')
- is_primary (bool, see below)
- is_classified (bool or None, default None)
``is_primary`` is optional and defaults to True if only one
name is provided; otherwise is_primary must be True for
exactly one name (and is optional in others).
"""
assert isinstance(is_primary, bool)
assert is_classified in {None, True, False}
if ignored:
log.info('some arguments were ignored: {} '.format(str(ignored)))
source_id = self.get_source(source_id, source_name)['id']
statements = []
if is_primary:
statements.append(self.names.update(
whereclause=self.names.c.tax_id == tax_id,
values={'is_primary': False}))
statements.append(self.names.insert().values(
tax_id=tax_id,
tax_name=tax_name,
source_id=source_id,
is_primary=is_primary,
name_class=name_class,
is_classified=is_classified))
if execute:
self.execute(statements)
else:
return statements | Add a record to the names table corresponding to
``tax_id``. Arguments are as follows:
- tax_id (string, required)
- tax_name (string, required)
*one* of the following are required:
- source_id (int or string coercable to int)
- source_name (string)
``source_id`` or ``source_name`` must identify an existing
record in table "source".
The following are optional:
- name_class (string, default 'synonym')
- is_primary (bool, see below)
- is_classified (bool or None, default None)
``is_primary`` is optional and defaults to True if only one
name is provided; otherwise is_primary must be True for
exactly one name (and is optional in others). | entailment |
def add_names(self, tax_id, names, execute=True):
"""Associate one or more names with ``tax_id``.
``names`` is a list of one or more dicts, with keys
corresponding to the signature of ``self.add_name()``
(excluding ``execute``).
"""
primary_names = [n['tax_name'] for n in names if n.get('is_primary')]
if len(primary_names) > 1:
raise ValueError(
'`is_primary` may be True for no more than one name in `names`')
statements = []
for namevals in names:
if 'tax_id' in namevals:
del namevals['tax_id']
statements.extend(
self.add_name(tax_id=tax_id, execute=False, **namevals))
if execute:
self.execute(statements)
else:
return statements | Associate one or more names with ``tax_id``.
``names`` is a list of one or more dicts, with keys
corresponding to the signature of ``self.add_name()``
(excluding ``execute``). | entailment |
def sibling_of(self, tax_id):
"""Return None or a tax_id of a sibling of *tax_id*.
If ``tax_id`` is None, then always returns None. Otherwise,
returns None if there is no sibling.
"""
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == parent_id,
self.nodes.c.tax_id != tax_id,
self.nodes.c.rank == rank))
res = s.execute()
output = res.fetchone()
if not output:
msg = 'No sibling of tax_id {} with rank {} found in taxonomy'
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
return output[0] | Return None or a tax_id of a sibling of *tax_id*.
If ``tax_id`` is None, then always returns None. Otherwise,
returns None if there is no sibling. | entailment |
def tax_ids(self):
'''
Return all tax_ids in node table
'''
fetch = select([self.nodes.c.tax_id]).execute().fetchall()
ids = [t[0] for t in fetch]
return ids | Return all tax_ids in node table | entailment |
def child_of(self, tax_id):
"""Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom).
"""
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == tax_id,
or_(*[self.nodes.c.rank == r
for r in self.ranks_below(rank)])))
res = s.execute()
output = res.fetchone()
if not output:
msg = ('No children of tax_id {} with '
'rank below {} found in database')
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
r = output[0]
assert self.is_ancestor_of(r, tax_id)
return r | Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom). | entailment |
def nary_subtree(self, tax_id, n=2):
"""Return a list of species tax_ids under *tax_id* such that
node under *tax_id* and above the species has two children.
"""
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
if rank == 'species':
return [tax_id]
else:
children = self.children_of(tax_id, 2)
species_taxids = []
for t in children:
species_taxids.extend(self.nary_subtree(t, n))
return species_taxids | Return a list of species tax_ids under *tax_id* such that
node under *tax_id* and above the species has two children. | entailment |
def validate_db(sqlalchemy_bind, is_enabled=ENABLE_DB):
""" Checks if a DB is authorized and responding before executing the function """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
def is_db_responsive():
try:
sqlalchemy_bind.session.query('1').first_or_404()
except:
return False
else:
return True
if is_enabled and is_db_responsive():
return func(*args, **kwargs)
else:
abort(HTTP_CODES.UNAUTHORIZED)
return wrapper
return decorator | Checks if a DB is authorized and responding before executing the function | entailment |
def substitute(template, mapping=None):
"""
Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template.
"""
if mapping is None:
mapping = {}
templ = Template(template)
return templ.substitute(mapping) | Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template. | entailment |
def dirtree(path):
"""
Find recursively and return all files and directories from
the path *path*.
"""
results = []
for name in glob.glob(os.path.join(path, '*')):
results.append(name)
if os.path.isdir(name):
results.extend(dirtree(name))
return results | Find recursively and return all files and directories from
the path *path*. | entailment |
def get_tornado_apps(context, debug=False):
"""
Create Tornado's application for all interfaces which are defined
in the configuration. *context* is instance of the
:class:`shelter.core.context.Context`. If *debug* is :const:`True`,
server will be run in **DEBUG** mode. Return :class:`list` of the
:class:`tornado.web.Application` instances.
"""
if context.config.app_settings_handler:
app_settings_handler = import_object(
context.config.app_settings_handler)
settings = app_settings_handler(context)
else:
settings = {}
apps = []
for interface in context.config.interfaces:
urls = interface.urls
if not urls:
urls = [tornado.web.URLSpec('/', NullHandler)]
apps.append(
tornado.web.Application(
urls, debug=debug, context=context,
interface=interface, **settings
)
)
return apps | Create Tornado's application for all interfaces which are defined
in the configuration. *context* is instance of the
:class:`shelter.core.context.Context`. If *debug* is :const:`True`,
server will be run in **DEBUG** mode. Return :class:`list` of the
:class:`tornado.web.Application` instances. | entailment |
def run(self):
"""
Child process. Repeatedly call :meth:`loop` method every
:attribute:`interval` seconds.
"""
setproctitle.setproctitle("{:s}: {:s}".format(
self.context.config.name, self.__class__.__name__))
self.logger.info(
"Worker '%s' has been started with pid %d",
self.__class__.__name__, os.getpid())
# Register SIGINT handler which will exit service process
def sigint_handler(unused_signum, unused_frame):
"""
Exit service process when SIGINT is reached.
"""
self.stop()
signal.signal(signal.SIGINT, sigint_handler)
# Initialize logging
self.context.config.configure_logging()
# Initialize child
self.context.initialize_child(SERVICE_PROCESS, process=self)
next_loop_time = 0
while 1:
# Exit if pid of the parent process has changed (parent process
# has exited and init is new parent) or if stop flag is set.
if os.getppid() != self._parent_pid or self._stop_event.is_set():
break
# Repeatedly call loop method. After first call set ready flag.
if time.time() >= next_loop_time:
try:
self.loop()
except Exception:
self.logger.exception(
"Worker '%s' failed", self.__class__.__name__)
else:
if not next_loop_time and not self.ready:
self._ready.value = True
next_loop_time = time.time() + self.interval
else:
time.sleep(0.25) | Child process. Repeatedly call :meth:`loop` method every
:attribute:`interval` seconds. | entailment |
def get_new_nodes(fname):
"""
Return an iterator of dicts given a .csv-format file.
"""
with open(fname, 'rU') as infile:
infile = (line for line in infile if not line.startswith('#'))
reader = list(csv.DictReader(infile))
rows = (d for d in reader if d['tax_id'])
# for now, children are provided as a semicolon-delimited list
# within a cell (yes, yuck). We need to convert thit into a list
# if present.
for d in rows:
if 'children' in d:
if d['children']:
d['children'] = [x.strip() for x in d['children'].split(';')]
else:
del d['children']
yield d | Return an iterator of dicts given a .csv-format file. | entailment |
def getlines(fname):
"""
Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character.
"""
with open(fname, 'rU') as f:
for line in f:
if line.strip() and not line.startswith('#'):
yield line.split('#', 1)[0].strip() | Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character. | entailment |
def parse_raxml(handle):
"""Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
"""
s = ''.join(handle.readlines())
result = {}
try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s)
try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s)
result['empirical_frequencies'] = (
result['datatype'] != 'AA' or
re.search('empirical base frequencies', s, re.IGNORECASE) is not None)
try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s)
rates = {}
if result['datatype'] != 'AA':
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float)
if len(rates) > 0:
result['subs_rates'] = rates
result['gamma'] = {'n_cats': 4}
try_set_fields(result['gamma'],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result['ras_model'] = 'gamma'
return result | Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned. | entailment |
def parse_stockholm(fobj):
"""Return a list of names from an Stockholm-format sequence alignment
file. ``fobj`` is an open file or another object representing a
sequence of lines.
"""
names = OrderedDict()
found_eof = False
for line in fobj:
line = line.strip()
if line == '//':
found_eof = True
elif line.startswith('#') or not line.strip():
continue
else:
name, __ = line.split(None, 1)
names[name] = None
if not found_eof:
raise ValueError('Invalid Stockholm format: no file terminator')
return list(names.keys()) | Return a list of names from an Stockholm-format sequence alignment
file. ``fobj`` is an open file or another object representing a
sequence of lines. | entailment |
def has_rppr(rppr_name='rppr'):
"""
Check for rppr binary in path
"""
with open(os.devnull) as dn:
try:
subprocess.check_call([rppr_name], stdout=dn, stderr=dn)
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
else:
raise
except subprocess.CalledProcessError as e:
# rppr returns non-zero exit status with no arguments
pass
return True | Check for rppr binary in path | entailment |
def add_database_args(parser):
'''
Add a standard set of database arguments for argparse
'''
parser.add_argument(
'url',
nargs='?',
default='sqlite:///ncbi_taxonomy.db',
type=sqlite_default(),
help=('Database string URI or filename. If no database scheme '
'specified \"sqlite:///\" will be prepended. [%(default)s]'))
db_parser = parser.add_argument_group(title='database options')
# TODO: better description of what --schema does
db_parser.add_argument(
'--schema',
help=('Name of SQL schema in database to query '
'(if database flavor supports this).'))
return parser | Add a standard set of database arguments for argparse | entailment |
def sqlite_default():
'''
Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database.
'''
def parse_url(url):
# TODO: need separate option for a config file
if url.endswith('.db') or url.endswith('.sqlite'):
if not url.startswith('sqlite:///'):
url = 'sqlite:///' + url
elif url.endswith('.cfg') or url.endswith('.conf'):
conf = configparser.SafeConfigParser(allow_no_value=True)
conf.optionxform = str # options are case-sensitive
conf.read(url)
url = conf.get('sqlalchemy', 'url')
return url
return parse_url | Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database. | entailment |
def action(args):
"""Strips non-current files and rollback information from a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on).
"""
log.info('loading reference package')
refpkg.Refpkg(args.refpkg, create=False).strip() | Strips non-current files and rollback information from a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on). | entailment |
def get_conf_d_files(path):
"""
Return alphabetical ordered :class:`list` of the *.conf* files
placed in the path. *path* is a directory path.
::
>>> get_conf_d_files('conf/conf.d/')
['conf/conf.d/10-base.conf', 'conf/conf.d/99-dev.conf']
"""
if not os.path.isdir(path):
raise ValueError("'%s' is not a directory" % path)
files_mask = os.path.join(path, "*.conf")
return [f for f in sorted(glob.glob(files_mask)) if os.path.isfile(f)] | Return alphabetical ordered :class:`list` of the *.conf* files
placed in the path. *path* is a directory path.
::
>>> get_conf_d_files('conf/conf.d/')
['conf/conf.d/10-base.conf', 'conf/conf.d/99-dev.conf'] | entailment |
def get_conf_files(filename):
"""
Return :class:`list` of the all configuration files. *filename* is a
path of the main configuration file.
::
>>> get_conf_files('exampleapp.conf')
['exampleapp.conf', 'exampleapp.conf.d/10-database.conf']
"""
if not os.path.isfile(filename):
raise ValueError("'%s' is not a file" % filename)
conf_d_path = "%s.d" % filename
if not os.path.exists(conf_d_path):
return [filename]
return [filename] + get_conf_d_files(conf_d_path) | Return :class:`list` of the all configuration files. *filename* is a
path of the main configuration file.
::
>>> get_conf_files('exampleapp.conf')
['exampleapp.conf', 'exampleapp.conf.d/10-database.conf'] | entailment |
def get_configparser(filename=''):
"""
Read main configuration file and all files from *conf.d* subdirectory
and return parsed configuration as a **configparser.RawConfigParser**
instance.
"""
filename = filename or os.environ.get('SHELTER_CONFIG_FILENAME', '')
if not filename:
raise ImproperlyConfiguredError(_(
"Configuration file is not defined. You must either "
"set 'SHELTER_CONFIG_FILENAME' environment variable or "
"'-f/--config-file' command line argument."
))
parser = six.moves.configparser.RawConfigParser()
for conf_file in get_conf_files(filename):
logger.info("Found config '%s'", conf_file)
if not parser.read(conf_file):
logger.warning("Error while parsing config '%s'", conf_file)
return parser | Read main configuration file and all files from *conf.d* subdirectory
and return parsed configuration as a **configparser.RawConfigParser**
instance. | entailment |
def name(self):
"""
Application name. It's used as a process name.
"""
try:
return self.config_parser.get('application', 'name')
except CONFIGPARSER_EXC:
return super(IniConfig, self).name | Application name. It's used as a process name. | entailment |
def interfaces(self):
"""
Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances.
"""
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
interface_name = 'interface_%s' % name
# Hostname:port + unix socket
try:
listen = self.config_parser.get(interface_name, 'Listen')
except CONFIGPARSER_EXC:
listen = interface.get('LISTEN')
try:
unix_socket = self.config_parser.get(
interface_name, 'UnixSocket')
except CONFIGPARSER_EXC:
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
# Processes
try:
processes = self.config_parser.getint(
interface_name, 'Processes')
except CONFIGPARSER_EXC:
processes = int(interface.get('PROCESSES', 1))
# Urls
try:
urls_obj_name = self.config_parser.get(
interface_name, 'Urls')
except CONFIGPARSER_EXC:
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] | Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances. | entailment |
def tz_file(name):
"""
Open a timezone file from the zoneinfo subdir for reading.
:param name: The name of the timezone.
:type name: str
:rtype: file
"""
try:
filepath = tz_path(name)
return open(filepath, 'rb')
except TimezoneNotFound:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
try:
return resource_stream(__name__, 'zoneinfo/' + name)
except FileNotFoundError:
return tz_path(name)
raise | Open a timezone file from the zoneinfo subdir for reading.
:param name: The name of the timezone.
:type name: str
:rtype: file | entailment |
def tz_path(name):
"""
Return the path to a timezone file.
:param name: The name of the timezone.
:type name: str
:rtype: str
"""
if not name:
raise ValueError('Invalid timezone')
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filepath = os.path.join(_DIRECTORY, *name_parts)
if not os.path.exists(filepath):
raise TimezoneNotFound('Timezone {} not found at {}'.format(name, filepath))
return filepath | Return the path to a timezone file.
:param name: The name of the timezone.
:type name: str
:rtype: str | entailment |
def get_timezones():
"""
Get the supported timezones.
The list will be cached unless you set the "fresh" attribute to True.
:param fresh: Whether to get a fresh list or not
:type fresh: bool
:rtype: tuple
"""
base_dir = _DIRECTORY
zones = ()
for root, dirs, files in os.walk(base_dir):
for basename in files:
zone = os.path.join(root, basename)
if os.path.isdir(zone):
continue
zone = os.path.relpath(zone, base_dir)
with open(os.path.join(root, basename), 'rb') as fd:
if fd.read(4) == b'TZif' and zone not in INVALID_ZONES:
zones = zones + (zone,)
return tuple(sorted(zones)) | Get the supported timezones.
The list will be cached unless you set the "fresh" attribute to True.
:param fresh: Whether to get a fresh list or not
:type fresh: bool
:rtype: tuple | entailment |
def award_points(target, key, reason="", source=None):
"""
Awards target the point value for key. If key is an integer then it's a
one off assignment and should be interpreted as the actual point value.
"""
point_value, points = get_points(key)
if not ALLOW_NEGATIVE_TOTALS:
total = points_awarded(target)
if total + points < 0:
reason = reason + "(floored from {0} to 0)".format(points)
points = -total
apv = AwardedPointValue(points=points, value=point_value, reason=reason)
if isinstance(target, get_user_model()):
apv.target_user = target
lookup_params = {
"target_user": target
}
else:
apv.target_object = target
lookup_params = {
"target_content_type": apv.target_content_type,
"target_object_id": apv.target_object_id,
}
if source is not None:
if isinstance(source, get_user_model()):
apv.source_user = source
else:
apv.source_object = source
apv.save()
if not TargetStat.update_points(points, lookup_params):
try:
sid = transaction.savepoint()
TargetStat._default_manager.create(
**dict(lookup_params, points=points)
)
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
TargetStat.update_points(points, lookup_params)
signals.points_awarded.send(
sender=target.__class__,
target=target,
key=key,
points=points,
source=source
)
new_points = points_awarded(target)
old_points = new_points - points
TargetStat.update_positions((old_points, new_points))
return apv | Awards target the point value for key. If key is an integer then it's a
one off assignment and should be interpreted as the actual point value. | entailment |
def points_awarded(target=None, source=None, since=None):
"""
Determine out how many points the given target has received.
"""
lookup_params = {}
if target is not None:
if isinstance(target, get_user_model()):
lookup_params["target_user"] = target
else:
lookup_params.update({
"target_content_type": ContentType.objects.get_for_model(target),
"target_object_id": target.pk,
})
if source is not None:
if isinstance(source, get_user_model()):
lookup_params["source_user"] = source
else:
lookup_params.update({
"source_content_type": ContentType.objects.get_for_model(source),
"source_object_id": source.pk,
})
if since is None:
if target is not None and source is None:
try:
return TargetStat.objects.get(**lookup_params).points
except TargetStat.DoesNotExist:
return 0
else:
return AwardedPointValue.points_awarded(**lookup_params)
else:
lookup_params["timestamp__gte"] = since
return AwardedPointValue.points_awarded(**lookup_params) | Determine out how many points the given target has received. | entailment |
def __validate_dates(start_date, end_date):
"""Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year.
"""
try:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be yyyy-mm-dd")
if (end_date - start_date).days > 366:
raise ValueError("The difference between start and end date " +
"should be less than or equal to 366 days.")
if (end_date - start_date).days < 0:
raise ValueError("End date cannot be before start date.") | Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year. | entailment |
def __yahoo_request(query):
"""Request Yahoo Finance information.
Request information from YQL.
`Check <http://goo.gl/8AROUD>`_ for more information on YQL.
"""
query = quote(query)
url = 'https://query.yahooapis.com/v1/public/yql?q=' + query + \
'&format=json&env=store://datatables.org/alltableswithkeys'
response = urlopen(url).read()
return json.loads(response.decode('utf-8'))['query']['results'] | Request Yahoo Finance information.
Request information from YQL.
`Check <http://goo.gl/8AROUD>`_ for more information on YQL. | entailment |
def request_quotes(tickers_list, selected_columns=['*']):
"""Request Yahoo Finance recent quotes.
Returns quotes information from YQL. The columns to be requested are
listed at selected_columns. Check `here <http://goo.gl/8AROUD>`_ for more
information on YQL.
>>> request_quotes(['AAPL'], ['Name', 'PreviousClose'])
{
'PreviousClose': '95.60',
'Name': 'Apple Inc.'
}
:param table: Table name.
:type table: string
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param selected_columns: List of columns to be returned, defaults to ['*']
:type selected_columns: list of strings, optional
:returns: Requested quotes.
:rtype: json
:raises: TypeError, TypeError
"""
__validate_list(tickers_list)
__validate_list(selected_columns)
query = 'select {cols} from yahoo.finance.quotes where symbol in ({vals})'
query = query.format(
cols=', '.join(selected_columns),
vals=', '.join('"{0}"'.format(s) for s in tickers_list)
)
response = __yahoo_request(query)
if not response:
raise RequestError('Unable to process the request. Check if the ' +
'columns selected are valid.')
if not type(response['quote']) is list:
return [response['quote']]
return response['quote'] | Request Yahoo Finance recent quotes.
Returns quotes information from YQL. The columns to be requested are
listed at selected_columns. Check `here <http://goo.gl/8AROUD>`_ for more
information on YQL.
>>> request_quotes(['AAPL'], ['Name', 'PreviousClose'])
{
'PreviousClose': '95.60',
'Name': 'Apple Inc.'
}
:param table: Table name.
:type table: string
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param selected_columns: List of columns to be returned, defaults to ['*']
:type selected_columns: list of strings, optional
:returns: Requested quotes.
:rtype: json
:raises: TypeError, TypeError | entailment |
def request_historical(ticker, start_date, end_date):
"""Get stock's daily historical information.
Returns a dictionary with Adj Close, Close, High, Low, Open and
Volume, between the start_date and the end_date. Is start_date and
end_date were not provided all the available information will be
retrieved. Information provided by YQL platform.
Check `here <http://goo.gl/8AROUD>`_ for more information on YQL.
.. warning:: Request limited to a period not greater than 366 days.
Use download_historical() to download the full historical data.
>>> request_historical('AAPL', '2016-03-01', '2016-03-02')
[
{
'Close': '100.75',
'Low': '99.639999',
'High': '100.889999',
'Adj_Close': '100.140301',
'Date': '2016-03-02',
'Open': '100.510002',
'Volume': '33169600'
},
{
'Close': '100.529999',
'Low': '97.419998',
'High': '100.769997',
'Adj_Close': '99.921631',
'Date': '2016-03-01',
'Open': '97.650002',
'Volume': '50407100'
}
]
:param start_date: Start date
:type start_date: string on the format of "yyyy-mm-dd"
:param end_date: End date
:type end_date: string on the format of "yyyy-mm-dd"
:returns: Daily historical information.
:rtype: list of dictionaries
"""
__validate_dates(start_date, end_date)
cols = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj_Close']
query = 'select {cols} from yahoo.finance.historicaldata ' + \
'where symbol in ("{ticker}") and startDate = "{start_date}" ' + \
'and endDate = "{end_date}"'
query = query.format(
cols=', '.join(cols),
ticker=ticker,
start_date=start_date,
end_date=end_date
)
response = __yahoo_request(query)
if not response:
raise RequestError('Unable to process the request. Check if the ' +
'stock ticker used is a valid one.')
if not type(response['quote']) is list:
return [response['quote']]
return response['quote'] | Get stock's daily historical information.
Returns a dictionary with Adj Close, Close, High, Low, Open and
Volume, between the start_date and the end_date. Is start_date and
end_date were not provided all the available information will be
retrieved. Information provided by YQL platform.
Check `here <http://goo.gl/8AROUD>`_ for more information on YQL.
.. warning:: Request limited to a period not greater than 366 days.
Use download_historical() to download the full historical data.
>>> request_historical('AAPL', '2016-03-01', '2016-03-02')
[
{
'Close': '100.75',
'Low': '99.639999',
'High': '100.889999',
'Adj_Close': '100.140301',
'Date': '2016-03-02',
'Open': '100.510002',
'Volume': '33169600'
},
{
'Close': '100.529999',
'Low': '97.419998',
'High': '100.769997',
'Adj_Close': '99.921631',
'Date': '2016-03-01',
'Open': '97.650002',
'Volume': '50407100'
}
]
:param start_date: Start date
:type start_date: string on the format of "yyyy-mm-dd"
:param end_date: End date
:type end_date: string on the format of "yyyy-mm-dd"
:returns: Daily historical information.
:rtype: list of dictionaries | entailment |
def download_historical(tickers_list, output_folder):
"""Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string
"""
__validate_list(tickers_list)
for ticker in tickers_list:
file_name = os.path.join(output_folder, ticker + '.csv')
with open(file_name, 'wb') as f:
base_url = 'http://real-chart.finance.yahoo.com/table.csv?s='
try:
urlopen(base_url + ticker)
urlretrieve(base_url + ticker, f.name)
except:
os.remove(file_name)
raise RequestError('Unable to process the request. Check if ' +
ticker + ' is a valid stock ticker') | Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string | entailment |
def setup_logging(default_json_path=None, default_level=None, env_key='LOG_CFG',
custom_log_dir=None):
"""Setup logging configuration
"""
if not default_json_path:
default_json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "logging.json")
path = default_json_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
log_dir = os.path.abspath(prms.Paths["filelogdir"])
if custom_log_dir:
log_dir = custom_log_dir
if not os.path.isdir(log_dir):
warning_txt = ("\nCould not set custom log-dir - "
"non-existing directory"
f"\nDir: {log_dir}"
"\nUsing current directory instead: "
f"{os.getcwd()}")
logging.warning(warning_txt)
log_dir = os.getcwd()
for file_handler in ["error_file_handler", "info_file_handler",
"debug_file_handler"]:
try:
file_name = config["handlers"][file_handler]["filename"]
logging.debug("Setting file handlers for logging.")
logging.debug(f"Filename: {file_name}")
logging.debug(f"Full path: {os.path.join(log_dir,file_name)}")
# print(f"Filename: {file_name}")
# print(f"Full path: {os.path.join(log_dir,file_name)}")
config["handlers"][file_handler][
"filename"] = os.path.join(log_dir,
file_name)
except Exception as e:
warnings.warn("\nCould not set custom log-dir" + str(e))
if default_level:
w_txt = "\nCould not set custom default level for logger"
if default_level not in [
"INFO", "DEBUG", logging.INFO, logging.DEBUG
]:
_txt = "\nonly 'INFO' and 'DEBUG' is supported"
_txt += " as default_level"
warnings.warn(w_txt + _txt)
else:
try:
config["handlers"]["console"]["level"] = default_level
if default_level in ["DEBUG", logging.DEBUG]:
config["handlers"]["console"]["formatter"] = "stamped"
except Exception as e:
warnings.warn(w_txt + "\n" + str(e))
logging.config.dictConfig(config)
else:
if not default_level:
default_level = logging.INFO
logging.basicConfig(level=default_level) | Setup logging configuration | entailment |
def search_for_files(run_name, raw_extension=None, cellpy_file_extension=None,
raw_file_dir=None, cellpy_file_dir=None, prm_filename=None,
file_name_format=None, cache=None):
"""Searches for files (raw-data files and cellpy-files).
Args:
run_name(str): run-file identification.
raw_extension(str): optional, extension of run-files (without the '.').
cellpy_file_extension(str): optional, extension for cellpy files
(without the '.').
raw_file_dir(path): optional, directory where to look for run-files
(default: read prm-file)
cellpy_file_dir(path): optional, directory where to look for
cellpy-files (default: read prm-file)
prm_filename(path): optional parameter file can be given.
file_name_format(str): format of raw-file names or a glob pattern
(default: YYYYMMDD_[name]EEE_CC_TT_RR).
cache(list): list of cached file names to search through
Returns:
run-file names (list) and cellpy-file-name (path).
"""
time_00 = time.time()
cellpy_file_extension = "h5"
res_extension = "res"
version = 0.1
# might include searching and removing "." in extensions
# should include extension definitions in prm file (version 0.6)
logger.debug(f"searching for {run_name}")
if raw_extension is None:
raw_extension = res_extension
if cellpy_file_extension is None:
cellpy_file_extension = cellpy_file_extension
if prm_filename is not None:
logging.debug("reading prm file disabled")
if not all([raw_file_dir, cellpy_file_dir, file_name_format]):
# import cellpy.parameters.prms as prms
# prms = prmreader.read()
logger.debug("using prms already set")
if raw_file_dir is None:
raw_file_dir = prms.Paths["rawdatadir"]
if cellpy_file_dir is None:
cellpy_file_dir = prms.Paths["cellpydatadir"]
if file_name_format is None:
try:
# To be implemented in version 0.5:
file_name_format = prms.file_name_format
except AttributeError:
file_name_format = "YYYYMMDD_[name]EEE_CC_TT_RR"
if version >= 0.5:
print("Could not read file_name_format "
"from _cellpy_prms_xxx.conf.")
print("Using:")
print("file_name_format:", file_name_format)
file_format_explanation = "YYYYMMDD is date,"
file_format_explanation += " EEE is electrode number"
file_format_explanation += " CC is cell number,"
file_format_explanation += " TT is cell_type, RR is run number."
print(file_format_explanation)
# check if raw_file_dir exists
if not os.path.isdir(raw_file_dir):
warnings.warn("your raw file directory cannot be accessed!")
if file_name_format.upper() == "YYYYMMDD_[NAME]EEE_CC_TT_RR":
glob_text_raw = "%s_*.%s" % (os.path.basename(run_name), raw_extension)
reg_exp_raw = "xxx"
else:
glob_text_raw = file_name_format
cellpy_file = "{0}.{1}".format(run_name, cellpy_file_extension)
cellpy_file = os.path.join(cellpy_file_dir, cellpy_file)
# TODO: @jepe - use pathlib
if cache is None:
use_pathlib_path = False
return_as_str_list = True
if use_pathlib_path:
logger.debug("using pathlib.Path")
if os.path.isdir(raw_file_dir):
run_files = pathlib.Path(raw_file_dir).glob(glob_text_raw)
if return_as_str_list:
run_files = [str(f.resolve()) for f in run_files]
run_files.sort()
else:
run_files = []
else:
if os.path.isdir(raw_file_dir):
glob_text_raw_full = os.path.join(raw_file_dir, glob_text_raw)
run_files = glob.glob(glob_text_raw_full)
run_files.sort()
else:
run_files = []
logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
return run_files, cellpy_file
else:
logger.debug("using cache in filefinder")
if os.path.isdir(raw_file_dir):
if len(cache) == 0:
cache = os.listdir(raw_file_dir)
run_files = [
os.path.join(
raw_file_dir, x
) for x in cache if fnmatch.fnmatch(
x, glob_text_raw
)
]
run_files.sort()
else:
run_files = []
logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
return run_files, cellpy_file, cache | Searches for files (raw-data files and cellpy-files).
Args:
run_name(str): run-file identification.
raw_extension(str): optional, extension of run-files (without the '.').
cellpy_file_extension(str): optional, extension for cellpy files
(without the '.').
raw_file_dir(path): optional, directory where to look for run-files
(default: read prm-file)
cellpy_file_dir(path): optional, directory where to look for
cellpy-files (default: read prm-file)
prm_filename(path): optional parameter file can be given.
file_name_format(str): format of raw-file names or a glob pattern
(default: YYYYMMDD_[name]EEE_CC_TT_RR).
cache(list): list of cached file names to search through
Returns:
run-file names (list) and cellpy-file-name (path). | entailment |
def squash_xml_to_text(elm, remove_namespaces=False):
"""Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str
"""
leading_text = elm.text and elm.text or ''
result = [leading_text]
for child in elm.getchildren():
# Encoding is set to utf-8 because otherwise `ó` would
# become `ó`
child_value = etree.tostring(child, encoding='utf-8')
# Decode to a string for later regexp and whitespace stripping
child_value = child_value.decode('utf-8')
result.append(child_value)
if remove_namespaces:
# Best way to remove the namespaces without having the parser complain
# about producing invalid XML.
result = [re.sub(' xmlns:?[^=]*="[^"]*"', '', v) for v in result]
# Join the results and strip any surrounding whitespace
result = u''.join(result).strip()
return result | Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str | entailment |
def load(self, file_name):
"""Load a raw data-file
Args:
file_name (path)
Returns:
loaded test
"""
new_rundata = self.loader(file_name)
new_rundata = self.inspect(new_rundata)
return new_rundata | Load a raw data-file
Args:
file_name (path)
Returns:
loaded test | entailment |
def datetime2ole(dt):
"""converts from datetime object to ole datetime float"""
delta = dt - OLE_TIME_ZERO
delta_float = delta / datetime.timedelta(days=1) # trick from SO
return delta_float | converts from datetime object to ole datetime float | entailment |
def get_raw_limits():
"""Include the settings for how to decide what kind of
step you are examining here.
The raw limits are 'epsilons' used to check if the current
and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current
is stable (constant) and non-zero).
It is expected that different instruments (with different
resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict)
"""
raw_limits = dict()
raw_limits["current_hard"] = 0.0000000000001
raw_limits["current_soft"] = 0.00001
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 2.0
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits | Include the settings for how to decide what kind of
step you are examining here.
The raw limits are 'epsilons' used to check if the current
and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current
is stable (constant) and non-zero).
It is expected that different instruments (with different
resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict) | entailment |
def loader(self, file_name, bad_steps=None, **kwargs):
"""Loads data from biologics .mpr files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s
(in cycle c) to skip loading.
Returns:
new_tests (list of data objects)
"""
new_tests = []
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.debug(txt)
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("HERE WE LOAD THE DATA")
data = DataSet()
fid = FileID(file_name)
# div parameters and information (probably load this last)
test_no = 1
data.test_no = test_no
data.loaded_from = file_name
# some overall prms
data.channel_index = None
data.channel_number = None
data.creator = None
data.item_ID = None
data.schedule_file_name = None
data.start_datetime = None
data.test_ID = None
data.test_name = None
data.raw_data_files.append(fid)
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
self.mpr_data = None
self.mpr_log = None
self.mpr_settings = None
self._load_mpr_data(temp_filename, bad_steps)
length_of_test = self.mpr_data.shape[0]
self.logger.debug(f"length of test: {length_of_test}")
self.logger.debug("renaming columns")
self._rename_headers()
# --------- stats-data (summary-data) -------------------------
summary_df = self._create_summary_data()
if summary_df.empty:
txt = "\nCould not find any summary (stats-file)!"
txt += " (summary_df.empty = True)"
txt += "\n -> issue make_summary(use_cellpy_stat_file=False)"
warnings.warn(txt)
data.dfsummary = summary_df
data.dfdata = self.mpr_data
data.raw_data_files_length.append(length_of_test)
new_tests.append(data)
self._clean_up(temp_filename)
return new_tests | Loads data from biologics .mpr files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s
(in cycle c) to skip loading.
Returns:
new_tests (list of data objects) | entailment |
def csv_dumper(**kwargs):
"""dump data to csv"""
logging.info("dumping to csv")
barn = kwargs["barn"]
farms = kwargs["farms"]
experiments = kwargs["experiments"]
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
project_dir, batch_dir, raw_dir = \
experiment.journal.paginate()
if batch_dir is None:
logging.info("have to generate folder-name on the fly")
out_data_dir, project_dir, batch_dir, raw_dir = \
generate_folder_names(name, project)
if barn == "batch_dir":
out_dir = batch_dir
elif barn == "project_dir":
out_dir = project_dir
elif barn == "raw_dir":
out_dir = raw_dir
else:
out_dir = barn
for animal in farm:
file_name = os.path.join(
out_dir, "summary_%s_%s.csv" % (
animal.name,
name
)
)
logging.info(f"> {file_name}")
animal.to_csv(file_name, sep=prms.Reader.sep) | dump data to csv | entailment |
def ram_dumper(**kwargs):
"""Dump data to 'memory' for later usage."""
logging.debug("trying to save stuff in memory")
farms = kwargs["farms"]
experiments = kwargs["experiments"]
engine = kwargs["engine"]
try:
engine_name = engine.__name__
except AttributeError:
engine_name = engine.__dict__.__name__
accepted_engines = ["summary_engine",]
if engine_name in accepted_engines:
logging.debug("found the engine that I will try to dump from: "
f"{engine_name}")
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
experiment.memory_dumped[engine_name] = farm
logging.debug(f"farm put into memory_dumped ({project}::{name})") | Dump data to 'memory' for later usage. | entailment |
def screen_dumper(**kwargs):
"""Dump data to screen."""
farms = kwargs["farms"]
engine = kwargs["engine"]
logging.info("dumping to screen")
print(f"\n[Screen dumper] ({engine})")
try:
if len(farms) == 1:
print(f"You have one farm with little pandas.")
else:
print(f"You have {len(farms)} farms with little pandas.")
except TypeError:
print(" - your farm has burned to the ground.")
else:
for number, farm in enumerate(farms):
print(f"[#{number+1}]You have {len(farm)} "
f"little pandas in this farm.")
for animal in farm:
print(80*"=")
try:
print(animal.name)
except AttributeError:
print("no-name")
print(80*"-")
print(animal.head(5))
print() | Dump data to screen. | entailment |
def create_legend(info, c, option="clean", use_index=False):
"""creating more informative legends"""
logging.debug(" - creating legends")
mass, loading, label = info.loc[c, ["masses", "loadings", "labels"]]
if use_index or not label:
label = c.split("_")
label = "_".join(label[1:])
if option == "clean":
return label
if option == "mass":
label = f"{label} ({mass:.2f} mg)"
elif option == "loading":
label = f"{label} ({loading:.2f} mg/cm2)"
elif option == "all":
label = f"{label} ({mass:.2f} mg) ({loading:.2f} mg/cm2)"
return label | creating more informative legends | entailment |
def create_plot_option_dicts(info, marker_types=None, colors=None,
line_dash=None, size=None):
"""Create two dictionaries with plot-options.
The first iterates colors (based on group-number), the second iterates
through marker types.
Returns: group_styles (dict), sub_group_styles (dict)
"""
logging.debug(" - creating plot-options-dict (for bokeh)")
# Current only works for bokeh
if marker_types is None:
marker_types = ["circle", "square", "triangle", "invertedtriangle",
"diamond", "cross", "asterix"]
if line_dash is None:
line_dash = [0, 0]
if size is None:
size = 10
groups = info.groups.unique()
number_of_groups = len(groups)
if colors is None:
if number_of_groups < 4:
# print("using 3")
colors = bokeh.palettes.brewer['YlGnBu'][3]
else:
# print(f"using {min(9, number_of_groups)}")
colors = bokeh.palettes.brewer['YlGnBu'][min(9, number_of_groups)]
sub_groups = info.sub_groups.unique()
marker_it = itertools.cycle(marker_types)
colors_it = itertools.cycle(colors)
group_styles = dict()
sub_group_styles = dict()
for j in groups:
color = next(colors_it)
marker_options = {
"line_color": color,
"fill_color": color,
}
line_options = {
"line_color": color,
}
group_styles[j] = {
"marker": marker_options,
"line": line_options,
}
for j in sub_groups:
marker_type = next(marker_it)
marker_options = {
"marker": marker_type,
"size": size,
}
line_options = {
"line_dash": line_dash,
}
sub_group_styles[j] = {
"marker": marker_options,
"line": line_options,
}
return group_styles, sub_group_styles | Create two dictionaries with plot-options.
The first iterates colors (based on group-number), the second iterates
through marker types.
Returns: group_styles (dict), sub_group_styles (dict) | entailment |
def summary_plotting_engine(**kwargs):
"""creates plots of summary data."""
logging.debug(f"Using {prms.Batch.backend} for plotting")
experiments = kwargs["experiments"]
farms = kwargs["farms"]
barn = None
logging.debug(" - summary_plot_engine")
farms = _preparing_data_and_plotting(
experiments=experiments,
farms=farms
)
return farms, barn | creates plots of summary data. | entailment |
def run_dumper(self, dumper):
"""run dumber (once pr. engine)
Args:
dumper: dumper to run (function or method).
The dumper takes the attributes experiments, farms, and barn as input.
It does not return anything. But can, if the dumper designer feels in
a bad and nasty mood, modify the input objects
(for example experiments).
"""
logging.debug("start dumper::")
dumper(
experiments=self.experiments,
farms=self.farms,
barn=self.barn,
engine=self.current_engine,
)
logging.debug("::dumper ended") | run dumber (once pr. engine)
Args:
dumper: dumper to run (function or method).
The dumper takes the attributes experiments, farms, and barn as input.
It does not return anything. But can, if the dumper designer feels in
a bad and nasty mood, modify the input objects
(for example experiments). | entailment |
def _comports():
'''
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
'''
return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),
columns=['port', 'descriptor', 'hardware_id'])
.set_index('port')) | Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4"). | entailment |
def comports(vid_pid=None, include_all=False, check_available=True,
only_available=False):
'''
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
'''
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = (df_comports.hardware_id.str.lower().str
.extract('vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)',
expand=True))
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = (df_comports.loc[no_id_mask, 'hardware_id']
.str.lower().str
.extract('vid:pid=(?P<vid>[0-9a-f]+):'
'(?P<pid>[0-9a-f]+)', expand=True))
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' +
df_comports.pid).isin(map(str.lower,
vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = (df_comports.sort_values('include', ascending=False)
.drop('include', axis=1))
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = (df_comports.loc[df_comports.include]
.drop('include', axis=1))
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for name_i, port_info_i in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True)
except serial.SerialException:
available.append(False)
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available]
if not check_available:
del df_comports['available']
return df_comports | .. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection. | entailment |
def _get_serial_ports_windows():
'''
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
'''
import six.moves.winreg as winreg
reg_path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path)
except WindowsError:
# No serial ports. Return empty generator.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break | Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows | entailment |
def get_port(self, baud_rate):
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port | Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`. | entailment |
def _read(name):
"""read the yml file"""
logging.debug("Reading config-file: %s" % name)
try:
with open(name, "r") as config_file:
prm_dict = yaml.load(config_file)
except yaml.YAMLError:
raise yaml.YAMLErrorr
else:
return prm_dict | read the yml file | entailment |
def cycles_engine(**kwargs):
"""engine to extract cycles"""
logging.info("cycles_engine:")
logging.info("Not ready for production")
# raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir" # Its a murder in the red barn - murder in the red barn
for experiment in experiments:
farms.append([])
if experiment.all_in_memory:
logging.debug("all in memory")
for key in experiment.cell_data_frames:
logging.debug(f"extracting cycles from {key}")
else:
logging.debug("dont have it in memory - need to lookup in the files")
for key in experiment.cell_data_frames:
logging.debug(f"looking up cellpyfile for {key}")
return farms, barn | engine to extract cycles | entailment |
def raw_data_engine(**kwargs):
"""engine to extract raw data"""
logger.debug("cycles_engine")
raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir"
for experiment in experiments:
farms.append([])
return farms, barn | engine to extract raw data | entailment |
def summary_engine(**kwargs):
"""engine to extract summary data"""
logger.debug("summary_engine")
# farms = kwargs["farms"]
farms = []
experiments = kwargs["experiments"]
for experiment in experiments:
if experiment.selected_summaries is None:
selected_summaries = [
"discharge_capacity", "charge_capacity",
"coulombic_efficiency",
"cumulated_coulombic_efficiency",
"ir_discharge", "ir_charge",
"end_voltage_discharge", "end_voltage_charge",
]
else:
selected_summaries = experiment.selected_summaries
farm = helper.join_summaries(
experiment.summary_frames,
selected_summaries
)
farms.append(farm)
barn = "batch_dir"
return farms, barn | engine to extract summary data | entailment |
def simple_db_engine(reader=None, srnos=None):
"""engine that gets values from the simple excel 'db'"""
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | engine that gets values from the simple excel 'db | entailment |
def orify(event, changed_callback):
'''
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
'''
event.changed = changed_callback
if not hasattr(event, '_set'):
# `set`/`clear` methods have not been overridden on event yet.
# Override methods to call `changed_callback` after performing default
# action.
event._set = event.set
event._clear = event.clear
event.set = lambda: or_set(event)
event.clear = lambda: or_clear(event) | Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
---------- | entailment |
def OrEvent(*events):
'''
Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set.
'''
or_event = threading.Event()
def changed():
'''
Set ``or_event`` if any of the specified events have been set.
'''
bools = [event_i.is_set() for event_i in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for event_i in events:
# Override ``set`` and ``clear`` methods on event to update state of
# `or_event` after performing default behaviour.
orify(event_i, changed)
# Set initial state of `or_event`.
changed()
return or_event | Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set. | entailment |
def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):
'''
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
device.write(payload)
if poll:
# Polling enabled. Wait for response in busy loop.
start = dt.datetime.now()
while not response_queue.qsize():
if (dt.datetime.now() - start).total_seconds() > timeout_s:
raise queue.Empty('No response received.')
return response_queue.get()
else:
# Polling disabled. Use blocking `Queue.get()` method to wait for
# response.
return response_queue.get(timeout=timeout_s) | Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default. | entailment |
def connection_made(self, transport):
"""Called when reader thread is started"""
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear() | Called when reader thread is started | entailment |
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set() | \
Called when the serial port is closed or the reader loop terminated
otherwise. | entailment |
def write(self, data, timeout_s=None):
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data) | Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready. | entailment |
def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES):
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll) | Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default. | entailment |
def fieldname_to_dtype(fieldname):
"""Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype"""
if fieldname == 'mode':
return ('mode', np.uint8)
elif fieldname in ("ox/red", "error", "control changes", "Ns changes",
"counter inc."):
return (fieldname, np.bool_)
elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V",
"control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz",
"|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm",
"Re(Z)/Ohm", "-Im(Z)/Ohm"):
return (fieldname, np.float_)
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
elif fieldname in ("Ewe/V", "<Ewe>/V"):
return ("Ewe/V", np.float_)
else:
raise ValueError("Invalid column header: %s" % fieldname) | Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype | entailment |
def comma_converter(float_string):
"""Convert numbers to floats whether the decimal point is '.' or ','"""
trans_table = maketrans(b',', b'.')
return float(float_string.translate(trans_table)) | Convert numbers to floats whether the decimal point is '.' or ', | entailment |
def MPTfile(file_or_path):
"""Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'rb')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter)
for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
converters=converter_dict)
return mpt_array, comments | Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments | entailment |
def MPTfileCSV(file_or_path):
"""Simple function to open MPT files as csv.DictReader objects
Checks for the correct headings, skips any comments and returns a
csv.DictReader object and a list of comments
"""
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'r')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic.rstrip() != 'EC-Lab ASCII FILE':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
expected_fieldnames = (
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h",
"P/W", "<I>/mA", "(Q-Qo)/mA.h", "x"],
['mode', 'ox/red', 'error', 'control changes', 'Ns changes',
'counter inc.', 'time/s', 'control/V', 'Ewe/V', 'dq/mA.h',
'<I>/mA', '(Q-Qo)/mA.h', 'x'],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "I/mA",
"dQ/mA.h", "P/W"],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "<I>/mA",
"dQ/mA.h", "P/W"])
if mpt_csv.fieldnames not in expected_fieldnames:
raise ValueError("Unrecognised headers for MPT file format")
return mpt_csv, comments | Simple function to open MPT files as csv.DictReader objects
Checks for the correct headings, skips any comments and returns a
csv.DictReader object and a list of comments | entailment |
def read_VMP_modules(fileobj, read_module_data=True):
"""Reads in module headers in the VMPmodule_hdr format. Yields a dict with
the headers and offset for each module.
N.B. the offset yielded is the offset to the start of the data i.e. after
the end of the header. The data runs from (offset) to (offset+length)"""
while True:
module_magic = fileobj.read(len(b'MODULE'))
if len(module_magic) == 0: # end of file
raise StopIteration
elif module_magic != b'MODULE':
raise ValueError(
"Found %r, expecting start of new VMP MODULE" % module_magic)
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
raise IOError("Unexpected end of file while reading module header")
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
print("---hdr-dict---")
pprint(hdr_dict)
hdr_dict['offset'] = fileobj.tell()
if read_module_data:
hdr_dict['data'] = fileobj.read(hdr_dict['length'])
if len(hdr_dict['data']) != hdr_dict['length']:
raise IOError("""Unexpected end of file while reading data
current module: %s
length read: %d
length expected: %d""" % (hdr_dict['longname'],
len(hdr_dict['data']),
hdr_dict['length']))
yield hdr_dict
else:
yield hdr_dict
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET) | Reads in module headers in the VMPmodule_hdr format. Yields a dict with
the headers and offset for each module.
N.B. the offset yielded is the offset to the start of the data i.e. after
the end of the header. The data runs from (offset) to (offset+length) | entailment |
def get_headers_global():
"""Defines the so-called global column headings for Arbin .res-files"""
headers = dict()
# - global column headings (specific for Arbin)
headers["applications_path_txt"] = 'Applications_Path'
headers["channel_index_txt"] = 'Channel_Index'
headers["channel_number_txt"] = 'Channel_Number'
headers["channel_type_txt"] = 'Channel_Type'
headers["comments_txt"] = 'Comments'
headers["creator_txt"] = 'Creator'
headers["daq_index_txt"] = 'DAQ_Index'
headers["item_id_txt"] = 'Item_ID'
headers["log_aux_data_flag_txt"] = 'Log_Aux_Data_Flag'
headers["log_chanstat_data_flag_txt"] = 'Log_ChanStat_Data_Flag'
headers["log_event_data_flag_txt"] = 'Log_Event_Data_Flag'
headers["log_smart_battery_data_flag_txt"] = 'Log_Smart_Battery_Data_Flag'
headers["mapped_aux_conc_cnumber_txt"] = 'Mapped_Aux_Conc_CNumber'
headers["mapped_aux_di_cnumber_txt"] = 'Mapped_Aux_DI_CNumber'
headers["mapped_aux_do_cnumber_txt"] = 'Mapped_Aux_DO_CNumber'
headers["mapped_aux_flow_rate_cnumber_txt"] = 'Mapped_Aux_Flow_Rate_CNumber'
headers["mapped_aux_ph_number_txt"] = 'Mapped_Aux_PH_Number'
headers["mapped_aux_pressure_number_txt"] = 'Mapped_Aux_Pressure_Number'
headers["mapped_aux_temperature_number_txt"] = 'Mapped_Aux_Temperature_Number'
headers["mapped_aux_voltage_number_txt"] = 'Mapped_Aux_Voltage_Number'
headers["schedule_file_name_txt"] = 'Schedule_File_Name' # KEEP FOR CELLPY FILE FORMAT
headers["start_datetime_txt"] = 'Start_DateTime'
headers["test_id_txt"] = 'Test_ID' # KEEP FOR CELLPY FILE FORMAT
headers["test_name_txt"] = 'Test_Name' # KEEP FOR CELLPY FILE FORMAT
return headers | Defines the so-called global column headings for Arbin .res-files | entailment |
def inspect(self, run_data):
"""Inspect the file -> reports to log (debug)"""
# TODO: type checking
if DEBUG_MODE:
checked_rundata = []
for data in run_data:
new_cols = data.dfdata.columns
for col in self.headers_normal:
if col not in new_cols:
logging.debug(f"Missing col: {col}")
# data.dfdata[col] = np.nan
checked_rundata.append(data)
else:
checked_rundata = run_data
return checked_rundata | Inspect the file -> reports to log (debug) | entailment |
def _iterdump(self, file_name, headers=None):
"""
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame
"""
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.headers_normal['step_index_txt']
point_txt = self.headers_normal['data_point_txt']
cycle_txt = self.headers_normal['cycle_index_txt']
self.logger.debug("iterating through file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self.__get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.headers_normal['test_id_txt']]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no]
test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no]) # OBS
test_name = global_data_df[self.headers_global['test_name_txt']][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns.extend(headers)
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.headers_normal['test_id_txt'], test_ID)
sql_5 = "order by %s" % self.headers_normal['data_point_txt']
import time
info_list = []
info_header = ["cycle", "row_count", "start_point", "end_point"]
info_header.extend(headers)
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
start_point = normal_df[point_txt].min()
end_point = normal_df[point_txt].max()
last = normal_df.iloc[-1, :]
step_list = [cycle_number, row_count, start_point, end_point]
step_list.extend([last[x] for x in headers])
info_list.append(step_list)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict | Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame | entailment |
def loader(self, file_name, bad_steps=None, **kwargs):
"""Loads data from arbin .res files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading.
Returns:
new_tests (list of data objects)
"""
# TODO: @jepe - insert kwargs - current chunk, only normal data, etc
new_tests = []
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
self.logger.debug("in loader")
self.logger.debug("filename: %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.debug(txt)
if filesize > prms.Instruments["max_res_filesize"] and not prms.Reader["load_only_summary"]:
error_message = "\nERROR (loader):\n"
error_message += "%s > %s - File is too big!\n" % (
hfilesize, humanize_bytes(prms.Instruments["max_res_filesize"]))
error_message += "(edit prms.Instruments['max_res_filesize'])\n"
print(error_message)
return None
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
self.logger.debug("tmp file: %s" % temp_filename)
use_mdbtools = False
if use_subprocess:
use_mdbtools = True
if is_posix:
use_mdbtools = True
# windows with same python bit as windows bit (the ideal case)
if not use_mdbtools:
constr = self.__get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("constr str: %s" % constr)
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
self.logger.debug("sql statement: %s" % sql)
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
else:
import subprocess
if is_posix:
if is_macos:
self.logger.debug("\nMAC OSX USING MDBTOOLS")
else:
self.logger.debug("\nPOSIX USING MDBTOOLS")
else:
self.logger.debug("\nWINDOWS USING MDBTOOLS-WIN")
# creating tmp-filenames
temp_csv_filename_global = os.path.join(temp_dir, "global_tmp.csv")
temp_csv_filename_normal = os.path.join(temp_dir, "normal_tmp.csv")
temp_csv_filename_stats = os.path.join(temp_dir, "stats_tmp.csv")
# making the cmds
mdb_prms = [(table_name_global, temp_csv_filename_global), (table_name_normal, temp_csv_filename_normal),
(table_name_stats, temp_csv_filename_stats)]
# executing cmds
for table_name, tmp_file in mdb_prms:
with open(tmp_file, "w") as f:
subprocess.call([sub_process_path, temp_filename, table_name], stdout=f)
self.logger.debug(f"ran mdb-export {str(f)} {table_name}")
# use pandas to load in the data
global_data_df = pd.read_csv(temp_csv_filename_global)
tests = global_data_df[self.headers_normal['test_id_txt']] # OBS
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
for counter, test_no in enumerate(range(number_of_sets)):
if counter > 0:
self.logger.warning("***MULTITEST-FILE (not recommended)")
if not ALLOW_MULTI_TEST_FILE:
break
data = DataSet()
data.test_no = test_no
data.loaded_from = file_name
fid = FileID(file_name)
# data.parent_filename = os.path.basename(file_name)# name of the .res file it is loaded from
data.channel_index = int(global_data_df[self.headers_global['channel_index_txt']][test_no])
data.channel_number = int(global_data_df[self.headers_global['channel_number_txt']][test_no])
data.creator = global_data_df[self.headers_global['creator_txt']][test_no]
data.item_ID = global_data_df[self.headers_global['item_id_txt']][test_no]
data.schedule_file_name = global_data_df[self.headers_global['schedule_file_name_txt']][test_no]
data.start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no]
data.test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no]) # OBS
data.test_name = global_data_df[self.headers_global['test_name_txt']][test_no]
data.raw_data_files.append(fid)
self.logger.debug("reading raw-data")
if not use_mdbtools:
# --------- read raw-data (normal-data) -------------------------
length_of_test, normal_df = self._load_res_normal_table(conn, data.test_ID, bad_steps)
# --------- read stats-data (summary-data) ----------------------
sql = "select * from %s where %s=%s order by %s" % (table_name_stats,
self.headers_normal['test_id_txt'],
data.test_ID,
self.headers_normal['data_point_txt'])
summary_df = pd.read_sql_query(sql, conn)
if counter > number_of_sets:
self._clean_up_loadres(None, conn, temp_filename)
else:
normal_df = pd.read_csv(temp_csv_filename_normal)
# filter on test ID
normal_df = normal_df[normal_df[self.headers_normal['test_id_txt']] == data.test_ID]
# sort on data point
if prms._sort_if_subprocess:
normal_df = normal_df.sort_values(self.headers_normal['data_point_txt'])
length_of_test = normal_df.shape[0]
summary_df = pd.read_csv(temp_csv_filename_stats)
# clean up
for f in [temp_filename, temp_csv_filename_stats, temp_csv_filename_normal,
temp_csv_filename_global]:
if os.path.isfile(f):
try:
os.remove(f)
except WindowsError as e:
self.logger.warning(
f"could not remove tmp-file\n{f} {e}"
)
if summary_df.empty and prms.Reader.use_cellpy_stat_file:
txt = "\nCould not find any summary (stats-file)!"
txt += "\n -> issue make_summary(use_cellpy_stat_file=False)"
logging.debug(txt)
# normal_df = normal_df.set_index("Data_Point")
data.dfsummary = summary_df
data.dfdata = normal_df
data.raw_data_files_length.append(length_of_test)
new_tests.append(data)
return new_tests | Loads data from arbin .res files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading.
Returns:
new_tests (list of data objects) | entailment |
def _save_multi(data, file_name, sep=";"):
"""convenience function for storing data column-wise in a csv-file."""
logger.debug("saving multi")
with open(file_name, "w", newline='') as f:
logger.debug(f"{file_name} opened")
writer = csv.writer(f, delimiter=sep)
try:
writer.writerows(itertools.zip_longest(*data))
except Exception as e:
logger.info(f"Exception encountered in batch._save_multi: {e}")
raise ExportFailed
logger.debug("wrote rows using itertools in _save_multi") | convenience function for storing data column-wise in a csv-file. | entailment |
def _extract_dqdv(cell_data, extract_func, last_cycle):
"""Simple wrapper around the cellpy.utils.ica.dqdv function."""
from cellpy.utils.ica import dqdv
list_of_cycles = cell_data.get_cycle_numbers()
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
logger.debug(f"only processing up to cycle {last_cycle}")
logger.debug(f"you have {len(list_of_cycles)} cycles to process")
out_data = []
for cycle in list_of_cycles:
try:
c, v = extract_func(cycle)
v, dq = dqdv(v, c)
v = v.tolist()
dq = dq.tolist()
except NullData as e:
v = list()
dq = list()
logger.info(" Ups! Could not process this (cycle %i)" % cycle)
logger.info(" %s" % e)
header_x = "dQ cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
dq.insert(0, header_x)
v.insert(0, header_y)
out_data.append(v)
out_data.append(dq)
return out_data | Simple wrapper around the cellpy.utils.ica.dqdv function. | entailment |
def make_df_from_batch(batch_name, batch_col="b01", reader=None, reader_label=None):
"""Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame)
"""
batch_name = batch_name
batch_col = batch_col
logger.debug(f"batch_name, batch_col: {batch_name}, {batch_col}")
if reader is None:
reader_obj = get_db_reader(reader_label)
reader = reader_obj()
srnos = reader.select_batch(batch_name, batch_col)
logger.debug("srnos:" + str(srnos))
info_dict = _create_info_dict(reader, srnos)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = _make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame) | entailment |
def create_folder_structure(project_name, batch_name):
"""This function creates a folder structure for the batch project.
The folder structure consists of main working folder ``project_name`
located in the ``outdatadir`` (as defined in the cellpy configuration file)
with a sub-folder named ``batch_name``. It also creates a folder
inside the ``batch_name`` folder for storing the raw data.
If the folders does not exist, they will be made. The function also returns
the name of the info-df.
Args:
project_name: name of the project
batch_name: name of the batch
Returns: (info_file, (project_dir, batch_dir, raw_dir))
"""
out_data_dir = prms.Paths["outdatadir"]
project_dir = os.path.join(out_data_dir, project_name)
batch_dir = os.path.join(project_dir, batch_name)
raw_dir = os.path.join(batch_dir, "raw_data")
# create folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
# create file-name for the info_df (json)
info_file = "cellpy_batch_%s.json" % batch_name
info_file = os.path.join(project_dir, info_file)
return info_file, (project_dir, batch_dir, raw_dir) | This function creates a folder structure for the batch project.
The folder structure consists of main working folder ``project_name`
located in the ``outdatadir`` (as defined in the cellpy configuration file)
with a sub-folder named ``batch_name``. It also creates a folder
inside the ``batch_name`` folder for storing the raw data.
If the folders does not exist, they will be made. The function also returns
the name of the info-df.
Args:
project_name: name of the project
batch_name: name of the batch
Returns: (info_file, (project_dir, batch_dir, raw_dir)) | entailment |
def read_and_save_data(info_df, raw_dir, sep=";", force_raw=False,
force_cellpy=False,
export_cycles=False, shifted_cycles=False,
export_raw=True,
export_ica=False, save=True, use_cellpy_stat_file=False,
parent_level="CellpyData",
last_cycle=None,
):
"""Reads and saves cell data defined by the info-DataFrame.
The function iterates through the ``info_df`` and loads data from the runs.
It saves individual data for each run (if selected), as well as returns a
list of ``cellpy`` summary DataFrames, a list of the indexes (one for each
run; same as used as index in the ``info_df``), as well as a list with
indexes of runs (cells) where an error was encountered during loading.
Args:
use_cellpy_stat_file: use the stat file to perform the calculations.
info_df: pandas.DataFrame with information about the runs.
raw_dir: path to location where you want to save raw data.
sep: delimiter to use when exporting to csv.
force_raw: load raw data even-though cellpy-file is up-to-date.
force_cellpy: load cellpy files even-though cellpy-file is not
up-to-date.
export_cycles: set to True for exporting cycles to csv.
shifted_cycles: set to True for exporting the cycles with a cumulated
shift.
export_raw: set to True for exporting raw data to csv.
export_ica: set to True for calculating and exporting dQ/dV to csv.
save: set to False to prevent saving a cellpy-file.
parent_level: optional, should use "cellpydata" for older hdf5-files and
default for newer ones.
Returns: frames (list of cellpy summary DataFrames), keys (list of indexes),
errors (list of indexes that encountered errors).
"""
no_export = False
do_export_dqdv = export_ica
keys = []
frames = []
number_of_runs = len(info_df)
counter = 0
errors = []
for indx, row in info_df.iterrows():
counter += 1
h_txt = "[" + counter * "|" + (number_of_runs - counter) * "." + "]"
l_txt = "starting to process file # %i (index=%s)" % (counter, indx)
logger.debug(l_txt)
print(h_txt)
if not row.raw_file_names and not force_cellpy:
logger.info("File(s) not found!")
logger.info(indx)
logger.debug("File(s) not found for index=%s" % indx)
errors.append(indx)
continue
else:
logger.info(f"Processing {indx}")
cell_data = cellreader.CellpyData()
if not force_cellpy:
logger.info("setting cycle mode (%s)..." % row.cell_type)
cell_data.set_cycle_mode(row.cell_type)
logger.info("loading cell")
if not force_cellpy:
logger.info("not forcing")
try:
cell_data.loadcell(raw_files=row.raw_file_names,
cellpy_file=row.cellpy_file_names,
mass=row.masses, summary_on_raw=True,
force_raw=force_raw,
use_cellpy_stat_file=use_cellpy_stat_file)
except Exception as e:
logger.debug('Failed to load: ' + str(e))
errors.append("loadcell:" + str(indx))
continue
else:
logger.info("forcing")
try:
cell_data.load(row.cellpy_file_names, parent_level=parent_level)
except Exception as e:
logger.info(f"Critical exception encountered {type(e)} "
"- skipping this file")
logger.debug('Failed to load. Error-message: ' + str(e))
errors.append("load:" + str(indx))
continue
if not cell_data.check():
logger.info("...not loaded...")
logger.debug("Did not pass check(). Could not load cell!")
errors.append("check:" + str(indx))
continue
logger.info("...loaded successfully...")
keys.append(indx)
summary_tmp = cell_data.dataset.dfsummary
logger.info("Trying to get summary_data")
if summary_tmp is None:
logger.info("No existing summary made - running make_summary")
cell_data.make_summary(find_end_voltage=True, find_ir=True)
if summary_tmp.index.name == b"Cycle_Index":
logger.debug("Strange: 'Cycle_Index' is a byte-string")
summary_tmp.index.name = 'Cycle_Index'
if not summary_tmp.index.name == "Cycle_Index":
logger.debug("Setting index to Cycle_Index")
# check if it is a byte-string
if b"Cycle_Index" in summary_tmp.columns:
logger.debug("Seems to be a byte-string in the column-headers")
summary_tmp.rename(columns={b"Cycle_Index": 'Cycle_Index'},
inplace=True)
summary_tmp.set_index("Cycle_Index", inplace=True)
frames.append(summary_tmp)
if save:
if not row.fixed:
logger.info("saving cell to %s" % row.cellpy_file_names)
cell_data.ensure_step_table = True
cell_data.save(row.cellpy_file_names)
else:
logger.debug("saving cell skipped (set to 'fixed' in info_df)")
if no_export:
continue
if export_raw:
logger.info("exporting csv")
cell_data.to_csv(raw_dir, sep=sep, cycles=export_cycles,
shifted=shifted_cycles, raw=export_raw,
last_cycle=last_cycle)
if do_export_dqdv:
logger.info("exporting dqdv")
try:
export_dqdv(cell_data, savedir=raw_dir, sep=sep,
last_cycle=last_cycle)
except Exception as e:
logging.error("Could not make/export dq/dv data")
logger.debug("Failed to make/export "
"dq/dv data (%s): %s" % (indx, str(e)))
errors.append("ica:" + str(indx))
if len(errors) > 0:
logger.error("Finished with errors!")
logger.debug(errors)
else:
logger.info("Finished")
return frames, keys, errors | Reads and saves cell data defined by the info-DataFrame.
The function iterates through the ``info_df`` and loads data from the runs.
It saves individual data for each run (if selected), as well as returns a
list of ``cellpy`` summary DataFrames, a list of the indexes (one for each
run; same as used as index in the ``info_df``), as well as a list with
indexes of runs (cells) where an error was encountered during loading.
Args:
use_cellpy_stat_file: use the stat file to perform the calculations.
info_df: pandas.DataFrame with information about the runs.
raw_dir: path to location where you want to save raw data.
sep: delimiter to use when exporting to csv.
force_raw: load raw data even-though cellpy-file is up-to-date.
force_cellpy: load cellpy files even-though cellpy-file is not
up-to-date.
export_cycles: set to True for exporting cycles to csv.
shifted_cycles: set to True for exporting the cycles with a cumulated
shift.
export_raw: set to True for exporting raw data to csv.
export_ica: set to True for calculating and exporting dQ/dV to csv.
save: set to False to prevent saving a cellpy-file.
parent_level: optional, should use "cellpydata" for older hdf5-files and
default for newer ones.
Returns: frames (list of cellpy summary DataFrames), keys (list of indexes),
errors (list of indexes that encountered errors). | entailment |
def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name):
"""Writes the summaries to csv-files
Args:
frames: list of ``cellpy`` summary DataFrames
keys: list of indexes (typically run-names) for the different runs
selected_summaries: list defining which summary data to save
batch_dir: directory to save to
batch_name: the batch name (will be used for making the file-name(s))
Returns: a pandas DataFrame with your selected summaries.
"""
if not frames:
logger.info("Could save summaries - no summaries to save!")
logger.info("You have no frames - aborting")
return None
if not keys:
logger.info("Could save summaries - no summaries to save!")
logger.info("You have no keys - aborting")
return None
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
summary_df = pd.concat(frames, keys=keys, axis=1)
# saving the selected summaries
for key, value in selected_summaries_dict.items():
_summary_file_name = os.path.join(batch_dir, "summary_%s_%s.csv" % (
key, batch_name))
_summary_df = summary_df.iloc[:,
summary_df.columns.get_level_values(1) == value]
# include function to tweak headers here (need to learn MultiIndex)
_header = _summary_df.columns
_summary_df.to_csv(_summary_file_name, sep=";")
logger.info(
"saved summary (%s) to:\n %s" % (key, _summary_file_name))
logger.info("finished saving summaries")
return summary_df | Writes the summaries to csv-files
Args:
frames: list of ``cellpy`` summary DataFrames
keys: list of indexes (typically run-names) for the different runs
selected_summaries: list defining which summary data to save
batch_dir: directory to save to
batch_name: the batch name (will be used for making the file-name(s))
Returns: a pandas DataFrame with your selected summaries. | entailment |
def pick_summary_data(key, summary_df, selected_summaries):
"""picks the selected pandas.DataFrame"""
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
value = selected_summaries_dict[key]
return summary_df.iloc[:, summary_df.columns.get_level_values(1) == value] | picks the selected pandas.DataFrame | entailment |
def plot_summary_data(ax, df, info_df, color_list, symbol_list, is_charge=False,
plot_style=None):
"""creates a plot of the selected df-data in the given axes.
Typical usage:
standard_fig, (ce_ax, cap_ax, ir_ax) = plt.subplots(nrows=3, ncols=1,
sharex=True)
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df,
info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
the ce_df is a pandas.DataFrame with ce-values for all your selected
cells. the color_list and the symbol_list are both list with colors and
symbols to use when plotting to ensure that if you have several subplots
(axes), then the lines and symbols match up for each given cell.
Args:
ax: the matplotlib axes to plot on
df: DataFrame with the data to plot
info_df: DataFrame with info for the data
color_list: List of colors to use
symbol_list: List of symbols to use
is_charge: plots open symbols if True
plot_style: selected style of the plot
Returns: list of the matplotlib lines (convenient to have if you are adding
a custom legend) the plot style (dictionary with matplotlib plotstyles)
"""
logger.debug("trying to plot summary data")
if plot_style is None:
logger.debug("no plot_style given, using default")
plot_style = DEFAULT_PLOT_STYLE
else:
logger.debug("plot_style given")
list_of_lines = list()
for datacol in df.columns:
group = info_df.get_value(datacol[0], "groups")
sub_group = info_df.get_value(datacol[0], "sub_groups")
color = color_list[group - 1]
marker = symbol_list[sub_group - 1]
plot_style["marker"] = marker
plot_style["markeredgecolor"] = color
plot_style["color"] = color
plot_style["markerfacecolor"] = 'none'
logger.debug("selecting color for group: " + str(color))
if not is_charge:
plot_style["markerfacecolor"] = color
lines = ax.plot(df[datacol], **plot_style)
list_of_lines.extend(lines)
return list_of_lines, plot_style | creates a plot of the selected df-data in the given axes.
Typical usage:
standard_fig, (ce_ax, cap_ax, ir_ax) = plt.subplots(nrows=3, ncols=1,
sharex=True)
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df,
info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
the ce_df is a pandas.DataFrame with ce-values for all your selected
cells. the color_list and the symbol_list are both list with colors and
symbols to use when plotting to ensure that if you have several subplots
(axes), then the lines and symbols match up for each given cell.
Args:
ax: the matplotlib axes to plot on
df: DataFrame with the data to plot
info_df: DataFrame with info for the data
color_list: List of colors to use
symbol_list: List of symbols to use
is_charge: plots open symbols if True
plot_style: selected style of the plot
Returns: list of the matplotlib lines (convenient to have if you are adding
a custom legend) the plot style (dictionary with matplotlib plotstyles) | entailment |
def plot_summary_figure(info_df, summary_df, color_list, symbol_list,
selected_summaries,
batch_dir, batch_name, plot_style=None, show=False,
save=True,
figure_type=None):
"""Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make.
"""
figure_type_object = figure_types[figure_type]
logger.debug("creating figure ({})".format(figure_type))
standard_fig, ax = plt.subplots(nrows=figure_type_object.rows,
ncols=figure_type_object.columns,
sharex=True)
ce_ax = figure_type_object.retrieve_axis("ce_ax", ax)
cap_ax = figure_type_object.retrieve_axis("cap_ax", ax)
ir_ax = figure_type_object.retrieve_axis("ir_ax", ax)
ev_ax = figure_type_object.retrieve_axis("ev_ax", ax)
# pick data (common for all plot types)
# could include a if cd_ax: pick_summary_data...
ce_df = pick_summary_data("coulombic_efficiency", summary_df,
selected_summaries)
cc_df = pick_summary_data("charge_capacity", summary_df, selected_summaries)
dc_df = pick_summary_data("discharge_capacity", summary_df,
selected_summaries)
# generate labels
ce_labels = [info_df.get_value(filename, "labels") for filename in
ce_df.columns.get_level_values(0)]
# adding charge/discharge label
ce_labels.extend(["", "discharge", "charge"])
# plot ce
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
ce_ax.set_ylabel("Coulombic\nefficiency\n(%)")
ce_ax.locator_params(nbins=5)
# adding charge/discharge label
color = plot_style["color"]
markersize = plot_style["markersize"]
open_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color, markerfacecolor='none',
markersize=markersize)
closed_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color,
markerfacecolor=color,
markersize=markersize)
no_label = mpl.lines.Line2D([], [], color='none', marker='s', markersize=0)
list_of_lines.extend([no_label, closed_label, open_label])
# plotting capacity (common)
plot_summary_data(cap_ax, cc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
plot_summary_data(cap_ax, dc_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
cap_ax.set_ylabel("Capacity\n(mAh/g)")
cap_ax.locator_params(nbins=4)
# plotting ir data (common)
plot_ir_charge, plot_ir_discharge = figure_type_object.ir_selector
if plot_ir_charge:
irc_df = pick_summary_data("ir_charge", summary_df, selected_summaries)
plot_summary_data(ir_ax, irc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ir_discharge:
ird_df = pick_summary_data("ir_discharge", summary_df,
selected_summaries)
plot_summary_data(ir_ax, ird_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ir_ax.set_ylabel("Internal\nresistance\n(Ohms)")
ir_ax.set_xlabel("Cycle number")
ir_ax.locator_params(axis="y", nbins=4)
ir_ax.locator_params(axis="x", nbins=10)
# should use MaxNLocator here instead
# pick data (not common for all plot types)
if ev_ax is not None:
plot_ev_charge, plot_ev_discharge = figure_type_object\
.end_voltage_selector
if plot_ev_charge:
evc_df = pick_summary_data("end_voltage_charge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ev_discharge:
evd_df = pick_summary_data("end_voltage_discharge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evd_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ev_ax.set_ylabel("End\nvoltage\n(V)")
ev_ax.locator_params(axis="y", nbins=4)
# tweaking
plt.subplots_adjust(left=0.07, right=0.93, top=0.9, wspace=0.25,
hspace=0.15)
# adding legend
logger.debug("trying to add legends " + str(ce_labels))
standard_fig.legend(handles=list_of_lines, labels=ce_labels,
bbox_to_anchor=(1.02, 1.1), loc=2,
# bbox_transform=plt.gcf().transFigure,
bbox_transform=ce_ax.transAxes,
numpoints=1,
ncol=1, labelspacing=0.,
prop={"size": 10})
# plt.tight_layout()
if save:
extension = prms.Batch["fig_extension"]
dpi = prms.Batch["dpi"]
figure_file_name = os.path.join(batch_dir, "%splot_%s.%s" % (
figure_type, batch_name, extension))
standard_fig.savefig(figure_file_name, dpi=dpi, bbox_inches='tight')
if show:
plt.show()
return standard_fig, (ce_ax, cap_ax, ir_ax) | Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make. | entailment |
def export_dqdv(cell_data, savedir, sep, last_cycle=None):
"""Exports dQ/dV data from a CellpyData instance.
Args:
cell_data: CellpyData instance
savedir: path to the folder where the files should be saved
sep: separator for the .csv-files.
last_cycle: only export up to this cycle (if not None)
"""
logger.debug("exporting dqdv")
filename = cell_data.dataset.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if savedir:
firstname = os.path.join(savedir, os.path.basename(firstname))
logger.debug(f"savedir is true: {firstname}")
outname_charge = firstname + "_dqdv_charge.csv"
outname_discharge = firstname + "_dqdv_discharge.csv"
list_of_cycles = cell_data.get_cycle_numbers()
number_of_cycles = len(list_of_cycles)
logger.debug("%s: you have %i cycles" % (filename, number_of_cycles))
# extracting charge
out_data = _extract_dqdv(cell_data, cell_data.get_ccap, last_cycle)
logger.debug("extracted ica for charge")
try:
_save_multi(data=out_data, file_name=outname_charge, sep=sep)
except ExportFailed:
logger.info("could not export ica for charge")
else:
logger.debug("saved ica for charge")
# extracting discharge
out_data = _extract_dqdv(cell_data, cell_data.get_dcap, last_cycle)
logger.debug("extracxted ica for discharge")
try:
_save_multi(data=out_data, file_name=outname_discharge, sep=sep)
except ExportFailed:
logger.info("could not export ica for discharge")
else:
logger.debug("saved ica for discharge") | Exports dQ/dV data from a CellpyData instance.
Args:
cell_data: CellpyData instance
savedir: path to the folder where the files should be saved
sep: separator for the .csv-files.
last_cycle: only export up to this cycle (if not None) | entailment |
def init(*args, **kwargs):
"""Returns an initialized instance of the Batch class"""
# set up cellpy logger
default_log_level = kwargs.pop("default_log_level", None)
import cellpy.log as log
log.setup_logging(custom_log_dir=prms.Paths["filelogdir"],
default_level=default_log_level)
b = Batch(*args, **kwargs)
return b | Returns an initialized instance of the Batch class | entailment |
def debugging():
"""This one I use for debugging..."""
print("In debugging")
json_file = r"C:\Scripting\Processing\Cell" \
r"data\outdata\SiBEC\cellpy_batch_bec_exp02.json"
b = init(default_log_level="DEBUG")
b.load_info_df(json_file)
print(b.info_df.head())
# setting some variables
b.export_raw = False
b.export_cycles = False
b.export_ica = False
b.save_cellpy_file = True
b.force_raw_file = False
b.force_cellpy_file = True
b.load_and_save_raw(parent_level="cellpydata") | This one I use for debugging... | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.