code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if not bool(tax_id) ^ bool(tax_name):
msg = 'Exactly one of tax_id and tax_name may be provided.'
raise ValueError(msg)
if tax_name:
tax_id, primary_name, is_primary = self.primary_from_name(tax_name)
else:
primary_name = None
# assumes stable ordering of lineage from root --> leaf
lintups = self._get_lineage(tax_id)
ldict = dict(lintups)
ldict['tax_id'] = tax_id
try:
# parent is second to last element, except for root
__, ldict['parent_id'] = lintups[-2]
except IndexError:
ldict['parent_id'] = None
ldict['rank'], __ = lintups[-1] # this taxon is last element in lineage
ldict['tax_name'] = primary_name or self.primary_from_id(tax_id)
return ldict | def lineage(self, tax_id=None, tax_name=None) | Public method for returning a lineage; includes tax_name and rank | 4.439726 | 4.432248 | 1.001687 |
# TODO: shoud be able to do this inside a transaction
if not source_name:
raise ValueError('"source_name" may not be None or an empty string')
sel = select([self.source.c.id], self.source.c.name == source_name).execute()
result = sel.fetchone()
if result:
return result[0], False
else:
ins = self.source.insert().execute(
name=source_name, description=description)
return ins.inserted_primary_key[0], True | def add_source(self, source_name, description=None) | Adds a row to table "source" if "name" does not
exist. Returns (source_id, True) if a new row is created,
(source_id, False) otherwise. | 3.327351 | 2.809675 | 1.184248 |
if not (bool(source_id) ^ bool(source_name)):
raise ValueError('exactly one of source_id or source_name is required')
if source_id:
try:
source_id = int(source_id)
except (ValueError, TypeError):
raise ValueError(
'source_id must be an int or a string representing one')
sel = select([self.source], self.source.c.id == source_id).execute()
else:
sel = select([self.source], self.source.c.name == source_name).execute()
result = sel.fetchone()
if not result:
raise ValueError(
'there is no source with id {} or name {}'.format(
source_id, source_name))
return dict(list(zip(list(sel.keys()), result))) | def get_source(self, source_id=None, source_name=None) | Returns a dict with keys ['id', 'name', 'description'] or None if
no match. The ``id`` field is guaranteed to be an int that
exists in table source. Requires exactly one of ``source_id``
or ``source_name``. A new source corresponding to
``source_name`` is created if necessary. | 2.263047 | 2.07993 | 1.08804 |
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True | def verify_rank_integrity(self, tax_id, rank, parent_id, children) | Confirm that for each node the parent ranks and children ranks are
coherent | 3.254389 | 3.205714 | 1.015184 |
if ignored:
log.info('some arguments were ignored: {} '.format(str(ignored)))
children = children or []
self.verify_rank_integrity(tax_id, rank, parent_id, children)
source_id, __ = self.add_source(source_name)
assert isinstance(is_valid, bool)
statements = []
# add node
statements.append(
self.nodes.insert().values(
tax_id=tax_id,
parent_id=parent_id,
rank=rank,
source_id=source_id))
# add names. Since this is a new node, at least one name must
# be provided; if only one is provided, it is the primary
# name. If more than one is primary, an error will be raised
# from add_names()
if len(names) == 1:
names[0]['is_primary'] = True
else:
primary_names = [n['tax_name'] for n in names if n.get('is_primary')]
if len(primary_names) != 1:
raise ValueError(
'`is_primary` must be True for exactly one name in `names`')
for namedict in names:
namedict['source_id'] = source_id
if 'source_name' in namedict:
del namedict['source_name']
statements.extend(self.add_names(tax_id, names, execute=False))
# add children and update source_id
for child in children:
statements.append(self.nodes.update(
whereclause=self.nodes.c.tax_id == child,
values={'parent_id': tax_id, 'source_id': source_id}))
if execute:
self.execute(statements)
else:
return statements | def add_node(self, tax_id, parent_id, rank, names, source_name, children=None,
is_valid=True, execute=True, **ignored) | Add a node to the taxonomy.
``source_name`` is added to table "source" if necessary. | 3.008952 | 3.05727 | 0.984196 |
assert isinstance(is_primary, bool)
assert is_classified in {None, True, False}
if ignored:
log.info('some arguments were ignored: {} '.format(str(ignored)))
source_id = self.get_source(source_id, source_name)['id']
statements = []
if is_primary:
statements.append(self.names.update(
whereclause=self.names.c.tax_id == tax_id,
values={'is_primary': False}))
statements.append(self.names.insert().values(
tax_id=tax_id,
tax_name=tax_name,
source_id=source_id,
is_primary=is_primary,
name_class=name_class,
is_classified=is_classified))
if execute:
self.execute(statements)
else:
return statements | def add_name(self, tax_id, tax_name, source_name=None, source_id=None,
name_class='synonym', is_primary=False, is_classified=None,
execute=True, **ignored) | Add a record to the names table corresponding to
``tax_id``. Arguments are as follows:
- tax_id (string, required)
- tax_name (string, required)
*one* of the following are required:
- source_id (int or string coercable to int)
- source_name (string)
``source_id`` or ``source_name`` must identify an existing
record in table "source".
The following are optional:
- name_class (string, default 'synonym')
- is_primary (bool, see below)
- is_classified (bool or None, default None)
``is_primary`` is optional and defaults to True if only one
name is provided; otherwise is_primary must be True for
exactly one name (and is optional in others). | 2.567515 | 2.655636 | 0.966817 |
primary_names = [n['tax_name'] for n in names if n.get('is_primary')]
if len(primary_names) > 1:
raise ValueError(
'`is_primary` may be True for no more than one name in `names`')
statements = []
for namevals in names:
if 'tax_id' in namevals:
del namevals['tax_id']
statements.extend(
self.add_name(tax_id=tax_id, execute=False, **namevals))
if execute:
self.execute(statements)
else:
return statements | def add_names(self, tax_id, names, execute=True) | Associate one or more names with ``tax_id``.
``names`` is a list of one or more dicts, with keys
corresponding to the signature of ``self.add_name()``
(excluding ``execute``). | 3.534782 | 3.428338 | 1.031048 |
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == parent_id,
self.nodes.c.tax_id != tax_id,
self.nodes.c.rank == rank))
res = s.execute()
output = res.fetchone()
if not output:
msg = 'No sibling of tax_id {} with rank {} found in taxonomy'
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
return output[0] | def sibling_of(self, tax_id) | Return None or a tax_id of a sibling of *tax_id*.
If ``tax_id`` is None, then always returns None. Otherwise,
returns None if there is no sibling. | 2.743953 | 2.704902 | 1.014437 |
'''
Return all tax_ids in node table
'''
fetch = select([self.nodes.c.tax_id]).execute().fetchall()
ids = [t[0] for t in fetch]
return ids | def tax_ids(self) | Return all tax_ids in node table | 6.724245 | 3.557796 | 1.890003 |
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == tax_id,
or_(*[self.nodes.c.rank == r
for r in self.ranks_below(rank)])))
res = s.execute()
output = res.fetchone()
if not output:
msg = ('No children of tax_id {} with '
'rank below {} found in database')
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
r = output[0]
assert self.is_ancestor_of(r, tax_id)
return r | def child_of(self, tax_id) | Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom). | 3.510083 | 3.291975 | 1.066254 |
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
if rank == 'species':
return [tax_id]
else:
children = self.children_of(tax_id, 2)
species_taxids = []
for t in children:
species_taxids.extend(self.nary_subtree(t, n))
return species_taxids | def nary_subtree(self, tax_id, n=2) | Return a list of species tax_ids under *tax_id* such that
node under *tax_id* and above the species has two children. | 2.801554 | 2.734218 | 1.024627 |
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
def is_db_responsive():
try:
sqlalchemy_bind.session.query('1').first_or_404()
except:
return False
else:
return True
if is_enabled and is_db_responsive():
return func(*args, **kwargs)
else:
abort(HTTP_CODES.UNAUTHORIZED)
return wrapper
return decorator | def validate_db(sqlalchemy_bind, is_enabled=ENABLE_DB) | Checks if a DB is authorized and responding before executing the function | 2.696867 | 2.508055 | 1.075282 |
if mapping is None:
mapping = {}
templ = Template(template)
return templ.substitute(mapping) | def substitute(template, mapping=None) | Render the template *template*. *mapping* is a :class:`dict` with
values to add to the template. | 2.853098 | 3.384417 | 0.84301 |
results = []
for name in glob.glob(os.path.join(path, '*')):
results.append(name)
if os.path.isdir(name):
results.extend(dirtree(name))
return results | def dirtree(path) | Find recursively and return all files and directories from
the path *path*. | 2.093127 | 2.088319 | 1.002302 |
if context.config.app_settings_handler:
app_settings_handler = import_object(
context.config.app_settings_handler)
settings = app_settings_handler(context)
else:
settings = {}
apps = []
for interface in context.config.interfaces:
urls = interface.urls
if not urls:
urls = [tornado.web.URLSpec('/', NullHandler)]
apps.append(
tornado.web.Application(
urls, debug=debug, context=context,
interface=interface, **settings
)
)
return apps | def get_tornado_apps(context, debug=False) | Create Tornado's application for all interfaces which are defined
in the configuration. *context* is instance of the
:class:`shelter.core.context.Context`. If *debug* is :const:`True`,
server will be run in **DEBUG** mode. Return :class:`list` of the
:class:`tornado.web.Application` instances. | 3.123907 | 2.907508 | 1.074428 |
setproctitle.setproctitle("{:s}: {:s}".format(
self.context.config.name, self.__class__.__name__))
self.logger.info(
"Worker '%s' has been started with pid %d",
self.__class__.__name__, os.getpid())
# Register SIGINT handler which will exit service process
def sigint_handler(unused_signum, unused_frame):
self.stop()
signal.signal(signal.SIGINT, sigint_handler)
# Initialize logging
self.context.config.configure_logging()
# Initialize child
self.context.initialize_child(SERVICE_PROCESS, process=self)
next_loop_time = 0
while 1:
# Exit if pid of the parent process has changed (parent process
# has exited and init is new parent) or if stop flag is set.
if os.getppid() != self._parent_pid or self._stop_event.is_set():
break
# Repeatedly call loop method. After first call set ready flag.
if time.time() >= next_loop_time:
try:
self.loop()
except Exception:
self.logger.exception(
"Worker '%s' failed", self.__class__.__name__)
else:
if not next_loop_time and not self.ready:
self._ready.value = True
next_loop_time = time.time() + self.interval
else:
time.sleep(0.25) | def run(self) | Child process. Repeatedly call :meth:`loop` method every
:attribute:`interval` seconds. | 4.144557 | 3.799224 | 1.090896 |
with open(fname, 'rU') as infile:
infile = (line for line in infile if not line.startswith('#'))
reader = list(csv.DictReader(infile))
rows = (d for d in reader if d['tax_id'])
# for now, children are provided as a semicolon-delimited list
# within a cell (yes, yuck). We need to convert thit into a list
# if present.
for d in rows:
if 'children' in d:
if d['children']:
d['children'] = [x.strip() for x in d['children'].split(';')]
else:
del d['children']
yield d | def get_new_nodes(fname) | Return an iterator of dicts given a .csv-format file. | 4.666676 | 4.395922 | 1.061592 |
with open(fname, 'rU') as f:
for line in f:
if line.strip() and not line.startswith('#'):
yield line.split('#', 1)[0].strip() | def getlines(fname) | Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character. | 2.650857 | 2.279712 | 1.162803 |
s = ''.join(handle.readlines())
result = {}
try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s)
try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s)
result['empirical_frequencies'] = (
result['datatype'] != 'AA' or
re.search('empirical base frequencies', s, re.IGNORECASE) is not None)
try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s)
rates = {}
if result['datatype'] != 'AA':
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float)
if len(rates) > 0:
result['subs_rates'] = rates
result['gamma'] = {'n_cats': 4}
try_set_fields(result['gamma'],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result['ras_model'] = 'gamma'
return result | def parse_raxml(handle) | Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned. | 2.57992 | 2.58028 | 0.999861 |
names = OrderedDict()
found_eof = False
for line in fobj:
line = line.strip()
if line == '//':
found_eof = True
elif line.startswith('#') or not line.strip():
continue
else:
name, __ = line.split(None, 1)
names[name] = None
if not found_eof:
raise ValueError('Invalid Stockholm format: no file terminator')
return list(names.keys()) | def parse_stockholm(fobj) | Return a list of names from an Stockholm-format sequence alignment
file. ``fobj`` is an open file or another object representing a
sequence of lines. | 3.307767 | 3.020091 | 1.095254 |
with open(os.devnull) as dn:
try:
subprocess.check_call([rppr_name], stdout=dn, stderr=dn)
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
else:
raise
except subprocess.CalledProcessError as e:
# rppr returns non-zero exit status with no arguments
pass
return True | def has_rppr(rppr_name='rppr') | Check for rppr binary in path | 2.720119 | 2.556864 | 1.06385 |
'''
Add a standard set of database arguments for argparse
'''
parser.add_argument(
'url',
nargs='?',
default='sqlite:///ncbi_taxonomy.db',
type=sqlite_default(),
help=('Database string URI or filename. If no database scheme '
'specified \"sqlite:///\" will be prepended. [%(default)s]'))
db_parser = parser.add_argument_group(title='database options')
# TODO: better description of what --schema does
db_parser.add_argument(
'--schema',
help=('Name of SQL schema in database to query '
'(if database flavor supports this).'))
return parser | def add_database_args(parser) | Add a standard set of database arguments for argparse | 7.054004 | 6.222788 | 1.133576 |
'''
Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database.
'''
def parse_url(url):
# TODO: need separate option for a config file
if url.endswith('.db') or url.endswith('.sqlite'):
if not url.startswith('sqlite:///'):
url = 'sqlite:///' + url
elif url.endswith('.cfg') or url.endswith('.conf'):
conf = configparser.SafeConfigParser(allow_no_value=True)
conf.optionxform = str # options are case-sensitive
conf.read(url)
url = conf.get('sqlalchemy', 'url')
return url
return parse_url | def sqlite_default() | Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database. | 5.68999 | 3.013024 | 1.888465 |
log.info('loading reference package')
refpkg.Refpkg(args.refpkg, create=False).strip() | def action(args) | Strips non-current files and rollback information from a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on). | 38.065212 | 17.264826 | 2.204784 |
if not os.path.isdir(path):
raise ValueError("'%s' is not a directory" % path)
files_mask = os.path.join(path, "*.conf")
return [f for f in sorted(glob.glob(files_mask)) if os.path.isfile(f)] | def get_conf_d_files(path) | Return alphabetical ordered :class:`list` of the *.conf* files
placed in the path. *path* is a directory path.
::
>>> get_conf_d_files('conf/conf.d/')
['conf/conf.d/10-base.conf', 'conf/conf.d/99-dev.conf'] | 2.29857 | 2.972438 | 0.773295 |
if not os.path.isfile(filename):
raise ValueError("'%s' is not a file" % filename)
conf_d_path = "%s.d" % filename
if not os.path.exists(conf_d_path):
return [filename]
return [filename] + get_conf_d_files(conf_d_path) | def get_conf_files(filename) | Return :class:`list` of the all configuration files. *filename* is a
path of the main configuration file.
::
>>> get_conf_files('exampleapp.conf')
['exampleapp.conf', 'exampleapp.conf.d/10-database.conf'] | 2.7706 | 3.043616 | 0.910299 |
filename = filename or os.environ.get('SHELTER_CONFIG_FILENAME', '')
if not filename:
raise ImproperlyConfiguredError(_(
"Configuration file is not defined. You must either "
"set 'SHELTER_CONFIG_FILENAME' environment variable or "
"'-f/--config-file' command line argument."
))
parser = six.moves.configparser.RawConfigParser()
for conf_file in get_conf_files(filename):
logger.info("Found config '%s'", conf_file)
if not parser.read(conf_file):
logger.warning("Error while parsing config '%s'", conf_file)
return parser | def get_configparser(filename='') | Read main configuration file and all files from *conf.d* subdirectory
and return parsed configuration as a **configparser.RawConfigParser**
instance. | 3.697288 | 3.522109 | 1.049737 |
try:
return self.config_parser.get('application', 'name')
except CONFIGPARSER_EXC:
return super(IniConfig, self).name | def name(self) | Application name. It's used as a process name. | 9.614746 | 8.114573 | 1.184874 |
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
interface_name = 'interface_%s' % name
# Hostname:port + unix socket
try:
listen = self.config_parser.get(interface_name, 'Listen')
except CONFIGPARSER_EXC:
listen = interface.get('LISTEN')
try:
unix_socket = self.config_parser.get(
interface_name, 'UnixSocket')
except CONFIGPARSER_EXC:
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
# Processes
try:
processes = self.config_parser.getint(
interface_name, 'Processes')
except CONFIGPARSER_EXC:
processes = int(interface.get('PROCESSES', 1))
# Urls
try:
urls_obj_name = self.config_parser.get(
interface_name, 'Urls')
except CONFIGPARSER_EXC:
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] | def interfaces(self) | Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances. | 2.761042 | 2.675205 | 1.032086 |
try:
filepath = tz_path(name)
return open(filepath, 'rb')
except TimezoneNotFound:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
try:
return resource_stream(__name__, 'zoneinfo/' + name)
except FileNotFoundError:
return tz_path(name)
raise | def tz_file(name) | Open a timezone file from the zoneinfo subdir for reading.
:param name: The name of the timezone.
:type name: str
:rtype: file | 4.628322 | 4.749579 | 0.97447 |
if not name:
raise ValueError('Invalid timezone')
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filepath = os.path.join(_DIRECTORY, *name_parts)
if not os.path.exists(filepath):
raise TimezoneNotFound('Timezone {} not found at {}'.format(name, filepath))
return filepath | def tz_path(name) | Return the path to a timezone file.
:param name: The name of the timezone.
:type name: str
:rtype: str | 3.611897 | 3.701154 | 0.975884 |
base_dir = _DIRECTORY
zones = ()
for root, dirs, files in os.walk(base_dir):
for basename in files:
zone = os.path.join(root, basename)
if os.path.isdir(zone):
continue
zone = os.path.relpath(zone, base_dir)
with open(os.path.join(root, basename), 'rb') as fd:
if fd.read(4) == b'TZif' and zone not in INVALID_ZONES:
zones = zones + (zone,)
return tuple(sorted(zones)) | def get_timezones() | Get the supported timezones.
The list will be cached unless you set the "fresh" attribute to True.
:param fresh: Whether to get a fresh list or not
:type fresh: bool
:rtype: tuple | 3.237659 | 3.725891 | 0.868962 |
point_value, points = get_points(key)
if not ALLOW_NEGATIVE_TOTALS:
total = points_awarded(target)
if total + points < 0:
reason = reason + "(floored from {0} to 0)".format(points)
points = -total
apv = AwardedPointValue(points=points, value=point_value, reason=reason)
if isinstance(target, get_user_model()):
apv.target_user = target
lookup_params = {
"target_user": target
}
else:
apv.target_object = target
lookup_params = {
"target_content_type": apv.target_content_type,
"target_object_id": apv.target_object_id,
}
if source is not None:
if isinstance(source, get_user_model()):
apv.source_user = source
else:
apv.source_object = source
apv.save()
if not TargetStat.update_points(points, lookup_params):
try:
sid = transaction.savepoint()
TargetStat._default_manager.create(
**dict(lookup_params, points=points)
)
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
TargetStat.update_points(points, lookup_params)
signals.points_awarded.send(
sender=target.__class__,
target=target,
key=key,
points=points,
source=source
)
new_points = points_awarded(target)
old_points = new_points - points
TargetStat.update_positions((old_points, new_points))
return apv | def award_points(target, key, reason="", source=None) | Awards target the point value for key. If key is an integer then it's a
one off assignment and should be interpreted as the actual point value. | 3.033003 | 3.058646 | 0.991616 |
lookup_params = {}
if target is not None:
if isinstance(target, get_user_model()):
lookup_params["target_user"] = target
else:
lookup_params.update({
"target_content_type": ContentType.objects.get_for_model(target),
"target_object_id": target.pk,
})
if source is not None:
if isinstance(source, get_user_model()):
lookup_params["source_user"] = source
else:
lookup_params.update({
"source_content_type": ContentType.objects.get_for_model(source),
"source_object_id": source.pk,
})
if since is None:
if target is not None and source is None:
try:
return TargetStat.objects.get(**lookup_params).points
except TargetStat.DoesNotExist:
return 0
else:
return AwardedPointValue.points_awarded(**lookup_params)
else:
lookup_params["timestamp__gte"] = since
return AwardedPointValue.points_awarded(**lookup_params) | def points_awarded(target=None, source=None, since=None) | Determine out how many points the given target has received. | 1.853679 | 1.86726 | 0.992727 |
try:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be yyyy-mm-dd")
if (end_date - start_date).days > 366:
raise ValueError("The difference between start and end date " +
"should be less than or equal to 366 days.")
if (end_date - start_date).days < 0:
raise ValueError("End date cannot be before start date.") | def __validate_dates(start_date, end_date) | Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year. | 1.799169 | 1.73398 | 1.037595 |
query = quote(query)
url = 'https://query.yahooapis.com/v1/public/yql?q=' + query + \
'&format=json&env=store://datatables.org/alltableswithkeys'
response = urlopen(url).read()
return json.loads(response.decode('utf-8'))['query']['results'] | def __yahoo_request(query) | Request Yahoo Finance information.
Request information from YQL.
`Check <http://goo.gl/8AROUD>`_ for more information on YQL. | 1.911765 | 2.097017 | 0.911659 |
__validate_list(tickers_list)
__validate_list(selected_columns)
query = 'select {cols} from yahoo.finance.quotes where symbol in ({vals})'
query = query.format(
cols=', '.join(selected_columns),
vals=', '.join('"{0}"'.format(s) for s in tickers_list)
)
response = __yahoo_request(query)
if not response:
raise RequestError('Unable to process the request. Check if the ' +
'columns selected are valid.')
if not type(response['quote']) is list:
return [response['quote']]
return response['quote'] | def request_quotes(tickers_list, selected_columns=['*']) | Request Yahoo Finance recent quotes.
Returns quotes information from YQL. The columns to be requested are
listed at selected_columns. Check `here <http://goo.gl/8AROUD>`_ for more
information on YQL.
>>> request_quotes(['AAPL'], ['Name', 'PreviousClose'])
{
'PreviousClose': '95.60',
'Name': 'Apple Inc.'
}
:param table: Table name.
:type table: string
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param selected_columns: List of columns to be returned, defaults to ['*']
:type selected_columns: list of strings, optional
:returns: Requested quotes.
:rtype: json
:raises: TypeError, TypeError | 3.223595 | 3.437938 | 0.937653 |
__validate_dates(start_date, end_date)
cols = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj_Close']
query = 'select {cols} from yahoo.finance.historicaldata ' + \
'where symbol in ("{ticker}") and startDate = "{start_date}" ' + \
'and endDate = "{end_date}"'
query = query.format(
cols=', '.join(cols),
ticker=ticker,
start_date=start_date,
end_date=end_date
)
response = __yahoo_request(query)
if not response:
raise RequestError('Unable to process the request. Check if the ' +
'stock ticker used is a valid one.')
if not type(response['quote']) is list:
return [response['quote']]
return response['quote'] | def request_historical(ticker, start_date, end_date) | Get stock's daily historical information.
Returns a dictionary with Adj Close, Close, High, Low, Open and
Volume, between the start_date and the end_date. Is start_date and
end_date were not provided all the available information will be
retrieved. Information provided by YQL platform.
Check `here <http://goo.gl/8AROUD>`_ for more information on YQL.
.. warning:: Request limited to a period not greater than 366 days.
Use download_historical() to download the full historical data.
>>> request_historical('AAPL', '2016-03-01', '2016-03-02')
[
{
'Close': '100.75',
'Low': '99.639999',
'High': '100.889999',
'Adj_Close': '100.140301',
'Date': '2016-03-02',
'Open': '100.510002',
'Volume': '33169600'
},
{
'Close': '100.529999',
'Low': '97.419998',
'High': '100.769997',
'Adj_Close': '99.921631',
'Date': '2016-03-01',
'Open': '97.650002',
'Volume': '50407100'
}
]
:param start_date: Start date
:type start_date: string on the format of "yyyy-mm-dd"
:param end_date: End date
:type end_date: string on the format of "yyyy-mm-dd"
:returns: Daily historical information.
:rtype: list of dictionaries | 2.724534 | 2.81562 | 0.96765 |
__validate_list(tickers_list)
for ticker in tickers_list:
file_name = os.path.join(output_folder, ticker + '.csv')
with open(file_name, 'wb') as f:
base_url = 'http://real-chart.finance.yahoo.com/table.csv?s='
try:
urlopen(base_url + ticker)
urlretrieve(base_url + ticker, f.name)
except:
os.remove(file_name)
raise RequestError('Unable to process the request. Check if ' +
ticker + ' is a valid stock ticker') | def download_historical(tickers_list, output_folder) | Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string | 2.838162 | 3.139013 | 0.904158 |
if not default_json_path:
default_json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "logging.json")
path = default_json_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
log_dir = os.path.abspath(prms.Paths["filelogdir"])
if custom_log_dir:
log_dir = custom_log_dir
if not os.path.isdir(log_dir):
warning_txt = ("\nCould not set custom log-dir - "
"non-existing directory"
f"\nDir: {log_dir}"
"\nUsing current directory instead: "
f"{os.getcwd()}")
logging.warning(warning_txt)
log_dir = os.getcwd()
for file_handler in ["error_file_handler", "info_file_handler",
"debug_file_handler"]:
try:
file_name = config["handlers"][file_handler]["filename"]
logging.debug("Setting file handlers for logging.")
logging.debug(f"Filename: {file_name}")
logging.debug(f"Full path: {os.path.join(log_dir,file_name)}")
# print(f"Filename: {file_name}")
# print(f"Full path: {os.path.join(log_dir,file_name)}")
config["handlers"][file_handler][
"filename"] = os.path.join(log_dir,
file_name)
except Exception as e:
warnings.warn("\nCould not set custom log-dir" + str(e))
if default_level:
w_txt = "\nCould not set custom default level for logger"
if default_level not in [
"INFO", "DEBUG", logging.INFO, logging.DEBUG
]:
_txt = "\nonly 'INFO' and 'DEBUG' is supported"
_txt += " as default_level"
warnings.warn(w_txt + _txt)
else:
try:
config["handlers"]["console"]["level"] = default_level
if default_level in ["DEBUG", logging.DEBUG]:
config["handlers"]["console"]["formatter"] = "stamped"
except Exception as e:
warnings.warn(w_txt + "\n" + str(e))
logging.config.dictConfig(config)
else:
if not default_level:
default_level = logging.INFO
logging.basicConfig(level=default_level) | def setup_logging(default_json_path=None, default_level=None, env_key='LOG_CFG',
custom_log_dir=None) | Setup logging configuration | 2.396673 | 2.374428 | 1.009368 |
leading_text = elm.text and elm.text or ''
result = [leading_text]
for child in elm.getchildren():
# Encoding is set to utf-8 because otherwise `ó` would
# become `ó`
child_value = etree.tostring(child, encoding='utf-8')
# Decode to a string for later regexp and whitespace stripping
child_value = child_value.decode('utf-8')
result.append(child_value)
if remove_namespaces:
# Best way to remove the namespaces without having the parser complain
# about producing invalid XML.
result = [re.sub(' xmlns:?[^=]*="[^"]*"', '', v) for v in result]
# Join the results and strip any surrounding whitespace
result = u''.join(result).strip()
return result | def squash_xml_to_text(elm, remove_namespaces=False) | Squash the given XML element (as `elm`) to a text containing XML.
The outer most element/tag will be removed, but inner elements will
remain. If `remove_namespaces` is specified, XML namespace declarations
will be removed from the text.
:param elm: XML element
:type elm: :class:`xml.etree.ElementTree`
:param remove_namespaces: flag to indicate the removal of XML namespaces
:type remove_namespaces: bool
:return: the inner text and elements of the given XML element
:rtype: str | 5.221803 | 5.554337 | 0.940131 |
new_rundata = self.loader(file_name)
new_rundata = self.inspect(new_rundata)
return new_rundata | def load(self, file_name) | Load a raw data-file
Args:
file_name (path)
Returns:
loaded test | 6.494293 | 6.838838 | 0.949619 |
delta = dt - OLE_TIME_ZERO
delta_float = delta / datetime.timedelta(days=1) # trick from SO
return delta_float | def datetime2ole(dt) | converts from datetime object to ole datetime float | 10.207364 | 9.37714 | 1.088537 |
raw_limits = dict()
raw_limits["current_hard"] = 0.0000000000001
raw_limits["current_soft"] = 0.00001
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 2.0
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits | def get_raw_limits() | Include the settings for how to decide what kind of
step you are examining here.
The raw limits are 'epsilons' used to check if the current
and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current
is stable (constant) and non-zero).
It is expected that different instruments (with different
resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict) | 2.254279 | 2.158266 | 1.044486 |
new_tests = []
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.debug(txt)
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("HERE WE LOAD THE DATA")
data = DataSet()
fid = FileID(file_name)
# div parameters and information (probably load this last)
test_no = 1
data.test_no = test_no
data.loaded_from = file_name
# some overall prms
data.channel_index = None
data.channel_number = None
data.creator = None
data.item_ID = None
data.schedule_file_name = None
data.start_datetime = None
data.test_ID = None
data.test_name = None
data.raw_data_files.append(fid)
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
self.mpr_data = None
self.mpr_log = None
self.mpr_settings = None
self._load_mpr_data(temp_filename, bad_steps)
length_of_test = self.mpr_data.shape[0]
self.logger.debug(f"length of test: {length_of_test}")
self.logger.debug("renaming columns")
self._rename_headers()
# --------- stats-data (summary-data) -------------------------
summary_df = self._create_summary_data()
if summary_df.empty:
txt = "\nCould not find any summary (stats-file)!"
txt += " (summary_df.empty = True)"
txt += "\n -> issue make_summary(use_cellpy_stat_file=False)"
warnings.warn(txt)
data.dfsummary = summary_df
data.dfdata = self.mpr_data
data.raw_data_files_length.append(length_of_test)
new_tests.append(data)
self._clean_up(temp_filename)
return new_tests | def loader(self, file_name, bad_steps=None, **kwargs) | Loads data from biologics .mpr files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s
(in cycle c) to skip loading.
Returns:
new_tests (list of data objects) | 4.730943 | 4.724785 | 1.001303 |
logging.info("dumping to csv")
barn = kwargs["barn"]
farms = kwargs["farms"]
experiments = kwargs["experiments"]
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
project_dir, batch_dir, raw_dir = \
experiment.journal.paginate()
if batch_dir is None:
logging.info("have to generate folder-name on the fly")
out_data_dir, project_dir, batch_dir, raw_dir = \
generate_folder_names(name, project)
if barn == "batch_dir":
out_dir = batch_dir
elif barn == "project_dir":
out_dir = project_dir
elif barn == "raw_dir":
out_dir = raw_dir
else:
out_dir = barn
for animal in farm:
file_name = os.path.join(
out_dir, "summary_%s_%s.csv" % (
animal.name,
name
)
)
logging.info(f"> {file_name}")
animal.to_csv(file_name, sep=prms.Reader.sep) | def csv_dumper(**kwargs) | dump data to csv | 4.087183 | 4.072417 | 1.003626 |
logging.debug("trying to save stuff in memory")
farms = kwargs["farms"]
experiments = kwargs["experiments"]
engine = kwargs["engine"]
try:
engine_name = engine.__name__
except AttributeError:
engine_name = engine.__dict__.__name__
accepted_engines = ["summary_engine",]
if engine_name in accepted_engines:
logging.debug("found the engine that I will try to dump from: "
f"{engine_name}")
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
experiment.memory_dumped[engine_name] = farm
logging.debug(f"farm put into memory_dumped ({project}::{name})") | def ram_dumper(**kwargs) | Dump data to 'memory' for later usage. | 6.48005 | 6.294638 | 1.029456 |
farms = kwargs["farms"]
engine = kwargs["engine"]
logging.info("dumping to screen")
print(f"\n[Screen dumper] ({engine})")
try:
if len(farms) == 1:
print(f"You have one farm with little pandas.")
else:
print(f"You have {len(farms)} farms with little pandas.")
except TypeError:
print(" - your farm has burned to the ground.")
else:
for number, farm in enumerate(farms):
print(f"[#{number+1}]You have {len(farm)} "
f"little pandas in this farm.")
for animal in farm:
print(80*"=")
try:
print(animal.name)
except AttributeError:
print("no-name")
print(80*"-")
print(animal.head(5))
print() | def screen_dumper(**kwargs) | Dump data to screen. | 5.440392 | 5.287858 | 1.028846 |
logging.debug(" - creating legends")
mass, loading, label = info.loc[c, ["masses", "loadings", "labels"]]
if use_index or not label:
label = c.split("_")
label = "_".join(label[1:])
if option == "clean":
return label
if option == "mass":
label = f"{label} ({mass:.2f} mg)"
elif option == "loading":
label = f"{label} ({loading:.2f} mg/cm2)"
elif option == "all":
label = f"{label} ({mass:.2f} mg) ({loading:.2f} mg/cm2)"
return label | def create_legend(info, c, option="clean", use_index=False) | creating more informative legends | 3.341936 | 3.293898 | 1.014584 |
logging.debug(" - creating plot-options-dict (for bokeh)")
# Current only works for bokeh
if marker_types is None:
marker_types = ["circle", "square", "triangle", "invertedtriangle",
"diamond", "cross", "asterix"]
if line_dash is None:
line_dash = [0, 0]
if size is None:
size = 10
groups = info.groups.unique()
number_of_groups = len(groups)
if colors is None:
if number_of_groups < 4:
# print("using 3")
colors = bokeh.palettes.brewer['YlGnBu'][3]
else:
# print(f"using {min(9, number_of_groups)}")
colors = bokeh.palettes.brewer['YlGnBu'][min(9, number_of_groups)]
sub_groups = info.sub_groups.unique()
marker_it = itertools.cycle(marker_types)
colors_it = itertools.cycle(colors)
group_styles = dict()
sub_group_styles = dict()
for j in groups:
color = next(colors_it)
marker_options = {
"line_color": color,
"fill_color": color,
}
line_options = {
"line_color": color,
}
group_styles[j] = {
"marker": marker_options,
"line": line_options,
}
for j in sub_groups:
marker_type = next(marker_it)
marker_options = {
"marker": marker_type,
"size": size,
}
line_options = {
"line_dash": line_dash,
}
sub_group_styles[j] = {
"marker": marker_options,
"line": line_options,
}
return group_styles, sub_group_styles | def create_plot_option_dicts(info, marker_types=None, colors=None,
line_dash=None, size=None) | Create two dictionaries with plot-options.
The first iterates colors (based on group-number), the second iterates
through marker types.
Returns: group_styles (dict), sub_group_styles (dict) | 2.548249 | 2.324961 | 1.09604 |
logging.debug(f"Using {prms.Batch.backend} for plotting")
experiments = kwargs["experiments"]
farms = kwargs["farms"]
barn = None
logging.debug(" - summary_plot_engine")
farms = _preparing_data_and_plotting(
experiments=experiments,
farms=farms
)
return farms, barn | def summary_plotting_engine(**kwargs) | creates plots of summary data. | 9.742916 | 9.721375 | 1.002216 |
logging.debug("start dumper::")
dumper(
experiments=self.experiments,
farms=self.farms,
barn=self.barn,
engine=self.current_engine,
)
logging.debug("::dumper ended") | def run_dumper(self, dumper) | run dumber (once pr. engine)
Args:
dumper: dumper to run (function or method).
The dumper takes the attributes experiments, farms, and barn as input.
It does not return anything. But can, if the dumper designer feels in
a bad and nasty mood, modify the input objects
(for example experiments). | 8.337768 | 4.166445 | 2.001171 |
'''
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
'''
return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),
columns=['port', 'descriptor', 'hardware_id'])
.set_index('port')) | def _comports() | Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4"). | 6.054658 | 2.195223 | 2.758106 |
'''
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
'''
import six.moves.winreg as winreg
reg_path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path)
except WindowsError:
# No serial ports. Return empty generator.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break | def _get_serial_ports_windows() | Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows | 3.361416 | 1.97118 | 1.705281 |
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port | def get_port(self, baud_rate) | Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`. | 4.209146 | 1.709402 | 2.462349 |
logging.debug("Reading config-file: %s" % name)
try:
with open(name, "r") as config_file:
prm_dict = yaml.load(config_file)
except yaml.YAMLError:
raise yaml.YAMLErrorr
else:
return prm_dict | def _read(name) | read the yml file | 3.908825 | 3.747701 | 1.042993 |
logging.info("cycles_engine:")
logging.info("Not ready for production")
# raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir" # Its a murder in the red barn - murder in the red barn
for experiment in experiments:
farms.append([])
if experiment.all_in_memory:
logging.debug("all in memory")
for key in experiment.cell_data_frames:
logging.debug(f"extracting cycles from {key}")
else:
logging.debug("dont have it in memory - need to lookup in the files")
for key in experiment.cell_data_frames:
logging.debug(f"looking up cellpyfile for {key}")
return farms, barn | def cycles_engine(**kwargs) | engine to extract cycles | 8.540155 | 8.351975 | 1.022531 |
logger.debug("cycles_engine")
raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir"
for experiment in experiments:
farms.append([])
return farms, barn | def raw_data_engine(**kwargs) | engine to extract raw data | 16.014477 | 15.696403 | 1.020264 |
logger.debug("summary_engine")
# farms = kwargs["farms"]
farms = []
experiments = kwargs["experiments"]
for experiment in experiments:
if experiment.selected_summaries is None:
selected_summaries = [
"discharge_capacity", "charge_capacity",
"coulombic_efficiency",
"cumulated_coulombic_efficiency",
"ir_discharge", "ir_charge",
"end_voltage_discharge", "end_voltage_charge",
]
else:
selected_summaries = experiment.selected_summaries
farm = helper.join_summaries(
experiment.summary_frames,
selected_summaries
)
farms.append(farm)
barn = "batch_dir"
return farms, barn | def summary_engine(**kwargs) | engine to extract summary data | 5.325634 | 5.333886 | 0.998453 |
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | def simple_db_engine(reader=None, srnos=None) | engine that gets values from the simple excel 'db | 2.784487 | 2.904592 | 0.95865 |
'''
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
'''
event.changed = changed_callback
if not hasattr(event, '_set'):
# `set`/`clear` methods have not been overridden on event yet.
# Override methods to call `changed_callback` after performing default
# action.
event._set = event.set
event._clear = event.clear
event.set = lambda: or_set(event)
event.clear = lambda: or_clear(event) | def orify(event, changed_callback) | Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
---------- | 5.251477 | 2.950233 | 1.780021 |
'''
Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set.
'''
or_event = threading.Event()
def changed():
'''
Set ``or_event`` if any of the specified events have been set.
'''
bools = [event_i.is_set() for event_i in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for event_i in events:
# Override ``set`` and ``clear`` methods on event to update state of
# `or_event` after performing default behaviour.
orify(event_i, changed)
# Set initial state of `or_event`.
changed()
return or_event | def OrEvent(*events) | Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set. | 3.897069 | 3.057079 | 1.274769 |
'''
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
device.write(payload)
if poll:
# Polling enabled. Wait for response in busy loop.
start = dt.datetime.now()
while not response_queue.qsize():
if (dt.datetime.now() - start).total_seconds() > timeout_s:
raise queue.Empty('No response received.')
return response_queue.get()
else:
# Polling disabled. Use blocking `Queue.get()` method to wait for
# response.
return response_queue.get(timeout=timeout_s) | def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES) | Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default. | 3.495172 | 1.691193 | 2.066689 |
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear() | def connection_made(self, transport) | Called when reader thread is started | 4.696286 | 4.418296 | 1.062918 |
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set() | def connection_lost(self, exception) | \
Called when the serial port is closed or the reader loop terminated
otherwise. | 3.364336 | 3.059591 | 1.099603 |
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data) | def write(self, data, timeout_s=None) | Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready. | 3.897392 | 1.896722 | 2.054804 |
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll) | def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES) | Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default. | 5.091687 | 1.516527 | 3.357465 |
if fieldname == 'mode':
return ('mode', np.uint8)
elif fieldname in ("ox/red", "error", "control changes", "Ns changes",
"counter inc."):
return (fieldname, np.bool_)
elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V",
"control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz",
"|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm",
"Re(Z)/Ohm", "-Im(Z)/Ohm"):
return (fieldname, np.float_)
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
elif fieldname in ("Ewe/V", "<Ewe>/V"):
return ("Ewe/V", np.float_)
else:
raise ValueError("Invalid column header: %s" % fieldname) | def fieldname_to_dtype(fieldname) | Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype | 5.039158 | 4.808889 | 1.047884 |
trans_table = maketrans(b',', b'.')
return float(float_string.translate(trans_table)) | def comma_converter(float_string) | Convert numbers to floats whether the decimal point is '.' or ', | 5.651538 | 5.682425 | 0.994564 |
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'rb')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter)
for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
converters=converter_dict)
return mpt_array, comments | def MPTfile(file_or_path) | Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a
numpy record array object and a list of comments | 4.420104 | 4.173347 | 1.059127 |
if isinstance(file_or_path, str):
mpt_file = open(file_or_path, 'r')
else:
mpt_file = file_or_path
magic = next(mpt_file)
if magic.rstrip() != 'EC-Lab ASCII FILE':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
expected_fieldnames = (
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h",
"P/W", "<I>/mA", "(Q-Qo)/mA.h", "x"],
['mode', 'ox/red', 'error', 'control changes', 'Ns changes',
'counter inc.', 'time/s', 'control/V', 'Ewe/V', 'dq/mA.h',
'<I>/mA', '(Q-Qo)/mA.h', 'x'],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "I/mA",
"dQ/mA.h", "P/W"],
["mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V", "Ewe/V", "<I>/mA",
"dQ/mA.h", "P/W"])
if mpt_csv.fieldnames not in expected_fieldnames:
raise ValueError("Unrecognised headers for MPT file format")
return mpt_csv, comments | def MPTfileCSV(file_or_path) | Simple function to open MPT files as csv.DictReader objects
Checks for the correct headings, skips any comments and returns a
csv.DictReader object and a list of comments | 3.565098 | 3.460323 | 1.030279 |
while True:
module_magic = fileobj.read(len(b'MODULE'))
if len(module_magic) == 0: # end of file
raise StopIteration
elif module_magic != b'MODULE':
raise ValueError(
"Found %r, expecting start of new VMP MODULE" % module_magic)
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
raise IOError("Unexpected end of file while reading module header")
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
print("---hdr-dict---")
pprint(hdr_dict)
hdr_dict['offset'] = fileobj.tell()
if read_module_data:
hdr_dict['data'] = fileobj.read(hdr_dict['length'])
if len(hdr_dict['data']) != hdr_dict['length']:
raise IOError( % (hdr_dict['longname'],
len(hdr_dict['data']),
hdr_dict['length']))
yield hdr_dict
else:
yield hdr_dict
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET) | def read_VMP_modules(fileobj, read_module_data=True) | Reads in module headers in the VMPmodule_hdr format. Yields a dict with
the headers and offset for each module.
N.B. the offset yielded is the offset to the start of the data i.e. after
the end of the header. The data runs from (offset) to (offset+length) | 2.88849 | 2.76699 | 1.043911 |
headers = dict()
# - global column headings (specific for Arbin)
headers["applications_path_txt"] = 'Applications_Path'
headers["channel_index_txt"] = 'Channel_Index'
headers["channel_number_txt"] = 'Channel_Number'
headers["channel_type_txt"] = 'Channel_Type'
headers["comments_txt"] = 'Comments'
headers["creator_txt"] = 'Creator'
headers["daq_index_txt"] = 'DAQ_Index'
headers["item_id_txt"] = 'Item_ID'
headers["log_aux_data_flag_txt"] = 'Log_Aux_Data_Flag'
headers["log_chanstat_data_flag_txt"] = 'Log_ChanStat_Data_Flag'
headers["log_event_data_flag_txt"] = 'Log_Event_Data_Flag'
headers["log_smart_battery_data_flag_txt"] = 'Log_Smart_Battery_Data_Flag'
headers["mapped_aux_conc_cnumber_txt"] = 'Mapped_Aux_Conc_CNumber'
headers["mapped_aux_di_cnumber_txt"] = 'Mapped_Aux_DI_CNumber'
headers["mapped_aux_do_cnumber_txt"] = 'Mapped_Aux_DO_CNumber'
headers["mapped_aux_flow_rate_cnumber_txt"] = 'Mapped_Aux_Flow_Rate_CNumber'
headers["mapped_aux_ph_number_txt"] = 'Mapped_Aux_PH_Number'
headers["mapped_aux_pressure_number_txt"] = 'Mapped_Aux_Pressure_Number'
headers["mapped_aux_temperature_number_txt"] = 'Mapped_Aux_Temperature_Number'
headers["mapped_aux_voltage_number_txt"] = 'Mapped_Aux_Voltage_Number'
headers["schedule_file_name_txt"] = 'Schedule_File_Name' # KEEP FOR CELLPY FILE FORMAT
headers["start_datetime_txt"] = 'Start_DateTime'
headers["test_id_txt"] = 'Test_ID' # KEEP FOR CELLPY FILE FORMAT
headers["test_name_txt"] = 'Test_Name' # KEEP FOR CELLPY FILE FORMAT
return headers | def get_headers_global() | Defines the so-called global column headings for Arbin .res-files | 2.325017 | 2.18955 | 1.06187 |
# TODO: type checking
if DEBUG_MODE:
checked_rundata = []
for data in run_data:
new_cols = data.dfdata.columns
for col in self.headers_normal:
if col not in new_cols:
logging.debug(f"Missing col: {col}")
# data.dfdata[col] = np.nan
checked_rundata.append(data)
else:
checked_rundata = run_data
return checked_rundata | def inspect(self, run_data) | Inspect the file -> reports to log (debug) | 4.523885 | 4.238635 | 1.067298 |
logger.debug("saving multi")
with open(file_name, "w", newline='') as f:
logger.debug(f"{file_name} opened")
writer = csv.writer(f, delimiter=sep)
try:
writer.writerows(itertools.zip_longest(*data))
except Exception as e:
logger.info(f"Exception encountered in batch._save_multi: {e}")
raise ExportFailed
logger.debug("wrote rows using itertools in _save_multi") | def _save_multi(data, file_name, sep=";") | convenience function for storing data column-wise in a csv-file. | 4.203194 | 4.095849 | 1.026208 |
from cellpy.utils.ica import dqdv
list_of_cycles = cell_data.get_cycle_numbers()
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
logger.debug(f"only processing up to cycle {last_cycle}")
logger.debug(f"you have {len(list_of_cycles)} cycles to process")
out_data = []
for cycle in list_of_cycles:
try:
c, v = extract_func(cycle)
v, dq = dqdv(v, c)
v = v.tolist()
dq = dq.tolist()
except NullData as e:
v = list()
dq = list()
logger.info(" Ups! Could not process this (cycle %i)" % cycle)
logger.info(" %s" % e)
header_x = "dQ cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
dq.insert(0, header_x)
v.insert(0, header_y)
out_data.append(v)
out_data.append(dq)
return out_data | def _extract_dqdv(cell_data, extract_func, last_cycle) | Simple wrapper around the cellpy.utils.ica.dqdv function. | 3.257844 | 2.907577 | 1.120467 |
batch_name = batch_name
batch_col = batch_col
logger.debug(f"batch_name, batch_col: {batch_name}, {batch_col}")
if reader is None:
reader_obj = get_db_reader(reader_label)
reader = reader_obj()
srnos = reader.select_batch(batch_name, batch_col)
logger.debug("srnos:" + str(srnos))
info_dict = _create_info_dict(reader, srnos)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = _make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | def make_df_from_batch(batch_name, batch_col="b01", reader=None, reader_label=None) | Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame) | 2.981219 | 3.041009 | 0.980339 |
out_data_dir = prms.Paths["outdatadir"]
project_dir = os.path.join(out_data_dir, project_name)
batch_dir = os.path.join(project_dir, batch_name)
raw_dir = os.path.join(batch_dir, "raw_data")
# create folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
# create file-name for the info_df (json)
info_file = "cellpy_batch_%s.json" % batch_name
info_file = os.path.join(project_dir, info_file)
return info_file, (project_dir, batch_dir, raw_dir) | def create_folder_structure(project_name, batch_name) | This function creates a folder structure for the batch project.
The folder structure consists of main working folder ``project_name`
located in the ``outdatadir`` (as defined in the cellpy configuration file)
with a sub-folder named ``batch_name``. It also creates a folder
inside the ``batch_name`` folder for storing the raw data.
If the folders does not exist, they will be made. The function also returns
the name of the info-df.
Args:
project_name: name of the project
batch_name: name of the batch
Returns: (info_file, (project_dir, batch_dir, raw_dir)) | 2.310587 | 1.749324 | 1.320846 |
if not frames:
logger.info("Could save summaries - no summaries to save!")
logger.info("You have no frames - aborting")
return None
if not keys:
logger.info("Could save summaries - no summaries to save!")
logger.info("You have no keys - aborting")
return None
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
summary_df = pd.concat(frames, keys=keys, axis=1)
# saving the selected summaries
for key, value in selected_summaries_dict.items():
_summary_file_name = os.path.join(batch_dir, "summary_%s_%s.csv" % (
key, batch_name))
_summary_df = summary_df.iloc[:,
summary_df.columns.get_level_values(1) == value]
# include function to tweak headers here (need to learn MultiIndex)
_header = _summary_df.columns
_summary_df.to_csv(_summary_file_name, sep=";")
logger.info(
"saved summary (%s) to:\n %s" % (key, _summary_file_name))
logger.info("finished saving summaries")
return summary_df | def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name) | Writes the summaries to csv-files
Args:
frames: list of ``cellpy`` summary DataFrames
keys: list of indexes (typically run-names) for the different runs
selected_summaries: list defining which summary data to save
batch_dir: directory to save to
batch_name: the batch name (will be used for making the file-name(s))
Returns: a pandas DataFrame with your selected summaries. | 3.457001 | 3.301373 | 1.04714 |
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
value = selected_summaries_dict[key]
return summary_df.iloc[:, summary_df.columns.get_level_values(1) == value] | def pick_summary_data(key, summary_df, selected_summaries) | picks the selected pandas.DataFrame | 2.887296 | 2.886495 | 1.000278 |
logger.debug("trying to plot summary data")
if plot_style is None:
logger.debug("no plot_style given, using default")
plot_style = DEFAULT_PLOT_STYLE
else:
logger.debug("plot_style given")
list_of_lines = list()
for datacol in df.columns:
group = info_df.get_value(datacol[0], "groups")
sub_group = info_df.get_value(datacol[0], "sub_groups")
color = color_list[group - 1]
marker = symbol_list[sub_group - 1]
plot_style["marker"] = marker
plot_style["markeredgecolor"] = color
plot_style["color"] = color
plot_style["markerfacecolor"] = 'none'
logger.debug("selecting color for group: " + str(color))
if not is_charge:
plot_style["markerfacecolor"] = color
lines = ax.plot(df[datacol], **plot_style)
list_of_lines.extend(lines)
return list_of_lines, plot_style | def plot_summary_data(ax, df, info_df, color_list, symbol_list, is_charge=False,
plot_style=None) | creates a plot of the selected df-data in the given axes.
Typical usage:
standard_fig, (ce_ax, cap_ax, ir_ax) = plt.subplots(nrows=3, ncols=1,
sharex=True)
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df,
info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
the ce_df is a pandas.DataFrame with ce-values for all your selected
cells. the color_list and the symbol_list are both list with colors and
symbols to use when plotting to ensure that if you have several subplots
(axes), then the lines and symbols match up for each given cell.
Args:
ax: the matplotlib axes to plot on
df: DataFrame with the data to plot
info_df: DataFrame with info for the data
color_list: List of colors to use
symbol_list: List of symbols to use
is_charge: plots open symbols if True
plot_style: selected style of the plot
Returns: list of the matplotlib lines (convenient to have if you are adding
a custom legend) the plot style (dictionary with matplotlib plotstyles) | 2.59047 | 2.451043 | 1.056885 |
logger.debug("exporting dqdv")
filename = cell_data.dataset.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if savedir:
firstname = os.path.join(savedir, os.path.basename(firstname))
logger.debug(f"savedir is true: {firstname}")
outname_charge = firstname + "_dqdv_charge.csv"
outname_discharge = firstname + "_dqdv_discharge.csv"
list_of_cycles = cell_data.get_cycle_numbers()
number_of_cycles = len(list_of_cycles)
logger.debug("%s: you have %i cycles" % (filename, number_of_cycles))
# extracting charge
out_data = _extract_dqdv(cell_data, cell_data.get_ccap, last_cycle)
logger.debug("extracted ica for charge")
try:
_save_multi(data=out_data, file_name=outname_charge, sep=sep)
except ExportFailed:
logger.info("could not export ica for charge")
else:
logger.debug("saved ica for charge")
# extracting discharge
out_data = _extract_dqdv(cell_data, cell_data.get_dcap, last_cycle)
logger.debug("extracxted ica for discharge")
try:
_save_multi(data=out_data, file_name=outname_discharge, sep=sep)
except ExportFailed:
logger.info("could not export ica for discharge")
else:
logger.debug("saved ica for discharge") | def export_dqdv(cell_data, savedir, sep, last_cycle=None) | Exports dQ/dV data from a CellpyData instance.
Args:
cell_data: CellpyData instance
savedir: path to the folder where the files should be saved
sep: separator for the .csv-files.
last_cycle: only export up to this cycle (if not None) | 3.089112 | 3.05875 | 1.009926 |
# set up cellpy logger
default_log_level = kwargs.pop("default_log_level", None)
import cellpy.log as log
log.setup_logging(custom_log_dir=prms.Paths["filelogdir"],
default_level=default_log_level)
b = Batch(*args, **kwargs)
return b | def init(*args, **kwargs) | Returns an initialized instance of the Batch class | 6.812766 | 6.322292 | 1.077578 |
print("In debugging")
json_file = r"C:\Scripting\Processing\Cell" \
r"data\outdata\SiBEC\cellpy_batch_bec_exp02.json"
b = init(default_log_level="DEBUG")
b.load_info_df(json_file)
print(b.info_df.head())
# setting some variables
b.export_raw = False
b.export_cycles = False
b.export_ica = False
b.save_cellpy_file = True
b.force_raw_file = False
b.force_cellpy_file = True
b.load_and_save_raw(parent_level="cellpydata") | def debugging() | This one I use for debugging... | 9.775172 | 9.72611 | 1.005044 |
logger.debug("running create_info_df")
# initializing the reader
reader = self.reader()
self.info_df = make_df_from_batch(self.name, batch_col=self.batch_col,
reader=reader)
logger.debug(str(self.info_df.head(5))) | def create_info_df(self) | Creates a DataFrame with info about the runs (loaded from the DB) | 5.693308 | 5.452822 | 1.044103 |
logger.debug("running save_info_df")
info_df = self.info_df
top_level_dict = {'info_df': info_df, 'metadata': self._prm_packer()}
# packing prms
jason_string = json.dumps(top_level_dict,
default=lambda info_df: json.loads(
info_df.to_json()))
with open(self.info_file, 'w') as outfile:
outfile.write(jason_string)
logger.info("Saved file to {}".format(self.info_file)) | def save_info_df(self) | Saves the DataFrame with info about the runs to a JSON file | 5.023884 | 4.800201 | 1.046599 |
if file_name is None:
file_name = self.info_file
with open(file_name, 'r') as infile:
top_level_dict = json.load(infile)
new_info_df_dict = top_level_dict['info_df']
new_info_df = pd.DataFrame(new_info_df_dict)
self.info_df = new_info_df
self._prm_packer(top_level_dict['metadata'])
self.info_file = file_name
logger.debug("loaded info_df")
logger.debug(" info_file: %s" % self.info_file) | def load_info_df(self, file_name=None) | Loads a DataFrame with all the needed info about the run
(JSON file) | 3.10202 | 3.080471 | 1.006995 |
self.info_file, directories = create_folder_structure(self.project,
self.name)
self.project_dir, self.batch_dir, self.raw_dir = directories
logger.debug("create folders:" + str(directories)) | def create_folder_structure(self) | Creates a folder structure based on the project and batch name.
Project - Batch-name - Raw-data-dir
The info_df JSON-file will be stored in the Project folder.
The summary-files will be saved in the Batch-name folder.
The raw data (including exported cycles and ica-data) will be saved to
the Raw-data-dir. | 7.872791 | 5.645789 | 1.394454 |
sep = prms.Reader["sep"]
if self.use_cellpy_stat_file is None:
use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file
else:
use_cellpy_stat_file = self.use_cellpy_stat_file
logger.debug(f"b.load_and_save_raw: "
f"use_cellpy_stat_file = {use_cellpy_stat_file}")
self.frames, self.keys, errors = read_and_save_data(
self.info_df,
self.raw_dir,
sep=sep,
force_raw=self.force_raw_file,
force_cellpy=self.force_cellpy_file,
export_cycles=self.export_cycles,
shifted_cycles=self.shifted_cycles,
export_raw=self.export_raw,
export_ica=self.export_ica,
save=self.save_cellpy_file,
use_cellpy_stat_file=use_cellpy_stat_file,
parent_level=parent_level,
last_cycle=self.last_cycle
)
logger.debug("loaded and saved data. errors:" + str(errors)) | def load_and_save_raw(self, parent_level="CellpyData") | Loads the cellpy or raw-data file(s) and saves to csv | 3.169027 | 3.08047 | 1.028748 |
self.summary_df = save_summaries(self.frames, self.keys,
self.selected_summaries,
self.batch_dir, self.name)
logger.debug("made and saved summaries") | def make_summaries(self) | Make and save summary csv files,
each containing values from all cells | 10.032148 | 8.659361 | 1.158532 |
if not figure_type:
figure_type = self.default_figure_type
if not figure_type in self.default_figure_types:
logger.debug("unknown figure type selected")
figure_type = self.default_figure_type
color_list, symbol_list = self._create_colors_markers_list()
summary_df = self.summary_df
selected_summaries = self.selected_summaries
batch_dir = self.batch_dir
batch_name = self.name
fig, ax = plot_summary_figure(self.info_df, summary_df, color_list,
symbol_list, selected_summaries,
batch_dir, batch_name, show=show,
save=save, figure_type=figure_type)
self.figure[figure_type] = fig
self.axes[figure_type] = ax | def plot_summaries(self, show=False, save=True, figure_type=None) | Plot summary graphs.
Args:
show: shows the figure if True.
save: saves the figure if True.
figure_type: optional, figure type to create. | 2.967981 | 3.075691 | 0.96498 |
logging.info("[estblishing links]")
logging.debug("checking and establishing link to data")
cell_data_frames = dict()
counter = 0
errors = []
try:
for indx, row in self.journal.pages.iterrows():
counter += 1
l_txt = "starting to process file # %i (index=%s)" % (counter, indx)
logging.debug(l_txt)
logging.info(f"linking cellpy-file: {row.cellpy_file_names}")
if not os.path.isfile(row.cellpy_file_names):
logging.error("File does not exist")
raise IOError
cell_data_frames[indx] = cellreader.CellpyData(initialize=True)
step_table = helper.look_up_and_get(
row.cellpy_file_names,
"step_table"
)
cell_data_frames[indx].dataset.step_table = step_table
self.cell_data_frames = cell_data_frames
except IOError as e:
logging.warning(e)
e_txt = "links not established - try update"
logging.warning(e_txt)
errors.append(e_txt)
self.errors["link"] = errors | def link(self) | Ensure that an appropriate link to the cellpy-files exists for
each cell.
The experiment will then contain a CellpyData object for each cell
(in the cell_data_frames attribute) with only the step-table stored.
Remark that running update persists the summary frames instead (or
everything in case you specify all_in_memory=True).
This might be considered "a strange and unexpected behaviour". Sorry
for that (but the authors of this package is also a bit strange...). | 5.478053 | 4.612666 | 1.187611 |
prm_dir = get_package_prm_dir()
if not init_filename:
init_filename = DEFAULT_FILENAME
src = os.path.join(prm_dir, init_filename)
return src | def get_default_config_file_path(init_filename=None) | gets the path to the default config-file | 3.887202 | 3.665866 | 1.060377 |
user_dir = get_user_dir()
dst_file = os.path.join(user_dir, init_filename)
return user_dir, dst_file | def get_user_dir_and_dst(init_filename) | gets the name of the user directory and full prm filepath | 1.951708 | 1.86002 | 1.049294 |
click.echo("[cellpy] (setup)")
# generate variables
init_filename = create_custom_init_filename()
userdir, dst_file = get_user_dir_and_dst(init_filename)
if testuser:
if not root_dir:
root_dir = os.getcwd()
click.echo(f"[cellpy] (setup) DEV-MODE testuser: {testuser}")
init_filename = create_custom_init_filename(testuser)
userdir = root_dir
dst_file = get_dst_file(userdir, init_filename)
click.echo(f"[cellpy] (setup) DEV-MODE userdir: {userdir}")
click.echo(f"[cellpy] (setup) DEV-MODE dst_file: {dst_file}")
if not pathlib.Path(dst_file).is_file():
reset = True
if interactive:
click.echo(" interactive mode ".center(80, "-"))
_update_paths(root_dir, not not_relative, dry_run=dry_run, reset=reset)
_write_config_file(
userdir, dst_file,
init_filename, dry_run,
)
_check()
else:
_write_config_file(userdir, dst_file, init_filename, dry_run)
_check() | def setup(interactive, not_relative, dry_run, reset, root_dir, testuser) | This will help you to setup cellpy. | 3.378734 | 3.275656 | 1.031468 |
for f in repo.get_contents(gdirpath):
if f.type == "dir":
for sf in repo.get_contents(f.path):
yield sf
else:
yield f | def _parse_g_dir(repo, gdirpath) | parses a repo directory two-levels deep | 3.116143 | 3.06847 | 1.015537 |
# infoname = '/CellpyData/info'
# dataname = '/CellpyData/dfdata'
# summaryname = '/CellpyData/dfsummary'
# fidname = '/CellpyData/fidtable'
# stepname = '/CellpyData/step_table'
root = '/CellpyData'
table_path = '/'.join([root, table_name])
logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}")
store = pd.HDFStore(cellpy_file_name)
table = store.select(table_path)
store.close()
return table | def look_up_and_get(cellpy_file_name, table_name) | Extracts table from cellpy hdf5-file. | 3.699271 | 3.582183 | 1.032686 |
_groups = []
for g in groups:
try:
if not float(g) > 0:
_groups.append(1000)
else:
_groups.append(int(g))
except TypeError as e:
logging.info("Error in reading group number (check your db)")
logging.debug(g)
logging.debug(e)
_groups.append(1000)
return _groups | def fix_groups(groups) | Takes care of strange group numbers. | 3.352087 | 3.329778 | 1.0067 |
headers_summary = cellpy.parameters.internal_settings.get_headers_summary()
selected_summaries = dict()
for h in summaries_list:
selected_summaries[h] = headers_summary[h]
return selected_summaries | def create_selected_summaries_dict(summaries_list) | Creates a dictionary with summary column headers.
Examples:
>>> summaries_to_output = ["discharge_capacity", "charge_capacity"]
>>> summaries_to_output_dict = create_selected_summaries_dict(
>>> summaries_to_output
>>> )
>>> print(summaries_to_output_dict)
{'discharge_capacity': "Discharge_Capacity(mAh/g)",
'charge_capacity': "Charge_Capacity(mAh/g)}
Args:
summaries_list: list containing cellpy summary column id names
Returns: dictionary of the form {cellpy id name: cellpy summary
header name,} | 5.358166 | 5.077538 | 1.055268 |
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
frames = []
keys = []
for key in summary_frames:
keys.append(key)
if summary_frames[key].empty:
logging.debug("Empty summary_frame encountered")
frames.append(summary_frames[key])
out = []
summary_df = pd.concat(frames, keys=keys, axis=1)
for key, value in selected_summaries_dict.items():
_summary_df = summary_df.iloc[
:, summary_df.columns.get_level_values(1) == value
]
_summary_df.name = key
if not keep_old_header:
try:
_summary_df.columns = _summary_df.columns.droplevel(-1)
except AttributeError as e:
logging.debug("could not drop level from frame")
logging.debug(e)
out.append(_summary_df)
logger.debug("finished joining summaries")
return out | def join_summaries(summary_frames, selected_summaries, keep_old_header=False) | parse the summaries and combine based on column (selected_summaries) | 2.795867 | 2.758943 | 1.013383 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.