code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, project)
batch_dir = os.path.join(project_dir, name)
raw_dir = os.path.join(batch_dir, "raw_data")
return out_data_dir, project_dir, batch_dir, raw_dir | def generate_folder_names(name, project) | Creates sensible folder names. | 3.128502 | 2.958607 | 1.057424 |
time_00 = time.time()
if x is None:
x = HEADERS_NORMAL.step_time_txt
if y is None:
y = HEADERS_NORMAL.voltage_txt
if group_by is None:
group_by = [HEADERS_NORMAL.cycle_index_txt]
if not isinstance(group_by, (list, tuple)):
group_by = [group_by]
if not generate_new_x:
# check if it makes sence
if (not tidy) and (not individual_x_cols):
logging.warning("Unlogical condition")
generate_new_x = True
new_x = None
if generate_new_x:
x_max = df[x].max()
x_min = df[x].min()
if number_of_points:
new_x = np.linspace(x_max, x_min, number_of_points)
else:
new_x = np.arange(x_max, x_min, dx)
new_dfs = []
keys = []
for name, group in df.groupby(group_by):
keys.append(name)
if not isinstance(name, (list, tuple)):
name = [name]
new_group = _interpolate_df_col(
group, x=x, y=y, new_x=new_x,
number_of_points=number_of_points,
dx=dx,
)
if tidy or (not tidy and not individual_x_cols):
for i, j in zip(group_by, name):
new_group[i] = j
new_dfs.append(new_group)
if tidy:
new_df = pd.concat(new_dfs)
else:
if individual_x_cols:
new_df = pd.concat(new_dfs, axis=1, keys=keys)
group_by.append(header_name)
new_df.columns.names = group_by
else:
new_df = pd.concat(new_dfs)
new_df = new_df.pivot(index=x, columns=group_by[0], values=y, )
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
return new_df | def group_by_interpolate(df, x=None, y=None, group_by=None,
number_of_points=100, tidy=False,
individual_x_cols=False, header_name="Unit",
dx=10.0, generate_new_x=True) | Use this for generating wide format from long (tidy) data | 2.503385 | 2.508615 | 0.997915 |
if x is None:
x = df.columns[0]
if y is None:
y = df.columns[1]
xs = df[x].values
ys = df[y].values
if direction > 0:
x_min = xs.min()
x_max = xs.max()
else:
x_max = xs.min()
x_min = xs.max()
dx = -dx
bounds_error = kwargs.pop("bounds_error", False)
f = interpolate.interp1d(xs, ys, bounds_error=bounds_error, **kwargs)
if new_x is None:
if number_of_points:
new_x = np.linspace(x_min, x_max, number_of_points)
else:
new_x = np.arange(x_min, x_max, dx)
new_y = f(new_x)
new_df = pd.DataFrame(
{x: new_x, y: new_y}
)
return new_df | def _interpolate_df_col(df, x=None, y=None, new_x=None, dx=10.0,
number_of_points=None, direction=1, **kwargs) | Interpolate a column based on another column.
Args:
df: DataFrame with the (cycle) data.
x: Column name for the x-value (defaults to the step-time column).
y: Column name for the y-value (defaults to the voltage column).
new_x (numpy array or None): Interpolate using these new x-values
instead of generating x-values based on dx or number_of_points.
dx: step-value (defaults to 10.0)
number_of_points: number of points for interpolated values (use
instead of dx and overrides dx if given).
direction (-1,1): if direction is negetive, then invert the
x-values before interpolating.
**kwargs: arguments passed to scipy.interpolate.interp1d
Returns: DataFrame with interpolated y-values based on given or
generated x-values. | 1.709252 | 1.741789 | 0.98132 |
minimum_v_value = np.Inf
maximum_v_value = -np.Inf
charge_list = []
cycles = data.get_cycle_numbers()
for cycle in cycles:
try:
if direction == "charge":
q, v = data.get_ccap(cycle)
else:
q, v = data.get_dcap(cycle)
except NullData as e:
logging.warning(e)
break
else:
d = pd.DataFrame({"q": q, "v": v})
# d.name = f"{cycle}"
d.name = cycle
charge_list.append(d)
v_min = v.min()
v_max = v.max()
if v_min < minimum_v_value:
minimum_v_value = v_min
if v_max > maximum_v_value:
maximum_v_value = v_max
return charge_list, cycles, minimum_v_value, maximum_v_value | def _collect_capacity_curves(data, direction="charge") | Create a list of pandas.DataFrames, one for each charge step.
The DataFrames are named by its cycle number.
Input: CellpyData
Returns: list of pandas.DataFrames
minimum voltage value,
maximum voltage value | 2.723419 | 2.47154 | 1.101912 |
from cellpy import log
log.setup_logging(default_level=logging_mode)
cellpy_instance = setup_cellpy_instance()
if instrument is not None:
cellpy_instance.set_instrument(instrument=instrument)
if cycle_mode is not None:
cellpy_instance.cycle_mode = cycle_mode
if filename is not None:
filename = Path(filename)
if filename.suffix in [".h5", ".hdf5", ".cellpy", ".cpy"]:
logging.info(f"Loading cellpy-file: {filename}")
cellpy_instance.load(filename)
else:
logging.info(f"Loading raw-file: {filename}")
cellpy_instance.from_raw(filename)
if mass is not None:
logging.info("Setting mass")
cellpy_instance.set_mass(mass)
if auto_summary:
logging.info("Creating step table")
cellpy_instance.make_step_table()
logging.info("Creating summary data")
cellpy_instance.make_summary()
logging.info("Created CellpyData object")
return cellpy_instance | def cell(filename=None, mass=None, instrument=None, logging_mode="INFO",
cycle_mode=None, auto_summary=True) | Create a CellpyData object | 2.504571 | 2.384472 | 1.050367 |
from cellpy import dbreader, filefinder
print("just_load_srno: srno: %i" % srno)
# ------------reading parameters--------------------------------------------
# print "just_load_srno: read prms"
# prm = prmreader.read(prm_filename)
#
# print prm
print("just_load_srno: making class and setting prms")
d = CellpyData()
# ------------reading db----------------------------------------------------
print()
print("just_load_srno: starting to load reader")
# reader = dbreader.reader(prm_filename)
reader = dbreader.Reader()
print("------ok------")
run_name = reader.get_cell_name(srno)
print("just_load_srno: run_name:")
print(run_name)
m = reader.get_mass(srno)
print("just_load_srno: mass: %f" % m)
print()
# ------------loadcell------------------------------------------------------
print("just_load_srno: getting file_names")
raw_files, cellpy_file = filefinder.search_for_files(run_name)
print("raw_files:", raw_files)
print("cellpy_file:", cellpy_file)
print("just_load_srno: running loadcell")
d.loadcell(raw_files, cellpy_file, mass=m)
print("------ok------")
# ------------do stuff------------------------------------------------------
print("just_load_srno: getting step_numbers for charge")
v = d.get_step_numbers("charge")
print(v)
print()
print("just_load_srno: finding C-rates")
d.find_C_rates(v, silent=False)
print()
print("just_load_srno: OK")
return True | def just_load_srno(srno, prm_filename=None) | Simply load an dataset based on serial number (srno).
This convenience function reads a dataset based on a serial number. This
serial number (srno) must then be defined in your database. It is mainly
used to check that things are set up correctly.
Args:
prm_filename: name of parameter file (optional).
srno (int): serial number
Example:
>>> srno = 918
>>> just_load_srno(srno)
srno: 918
read prms
.... | 3.756568 | 3.951658 | 0.950631 |
d = CellpyData()
if not outdir:
outdir = prms.Paths["cellpydatadir"]
if not outfile:
outfile = os.path.basename(filename).split(".")[0] + ".h5"
outfile = os.path.join(outdir, outfile)
print("filename:", filename)
print("outfile:", outfile)
print("outdir:", outdir)
print("mass:", mass, "mg")
d.from_raw(filename)
d.set_mass(mass)
d.make_step_table()
d.make_summary()
d.save(filename=outfile)
d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True)
return outfile | def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.00) | Load a raw data file and save it as cellpy-file.
Args:
mass (float): active material mass [mg].
outdir (path): optional, path to directory for saving the hdf5-file.
outfile (str): optional, name of hdf5-file.
filename (str): name of the resfile.
Returns:
out_file_name (str): name of saved file. | 3.546351 | 3.313532 | 1.070263 |
# self.test_no = None
# self.mass = 1.0 # mass of (active) material (in mg)
# self.no_cycles = 0.0
# self.charge_steps = None # not in use at the moment
# self.discharge_steps = None # not in use at the moment
# self.ir_steps = None # dict # not in use at the moment
# self.ocv_steps = None # dict # not in use at the moment
# self.nom_cap = 3579 # mAh/g (used for finding c-rates)
# self.mass_given = False
# self.c_mode = True
# self.starts_with = "discharge"
# self.material = "noname"
# self.merged = False
# self.file_errors = None # not in use at the moment
# self.loaded_from = None # name of the .res file it is loaded from
# (can be list if merged)
# self.raw_data_files = []
# self.raw_data_files_length = []
# # self.parent_filename = None # name of the .res file it is loaded from
# (basename) (can be list if merded)
# # self.parent_filename = if listtype, for file in etc,,,
# os.path.basename(self.loaded_from)
# self.channel_index = None
# self.channel_number = None
# self.creator = None
# self.item_ID = None
# self.schedule_file_name = None
# self.start_datetime = None
# self.test_ID = None
# self.name = None
# NEXT: include nom_cap, tot_mass and parameters table in save/load hdf5
if info_dict is None:
info_dict = dict()
info_dict["mass"] = 1.23 # mg
info_dict["nom_cap"] = 3600 # mAh/g (active material)
info_dict["tot_mass"] = 2.33 # mAh/g (total mass of material)
d = CellpyData()
print("filename:", filename)
print("info_dict in:", end=' ')
print(info_dict)
d.from_raw(filename)
d.set_mass(info_dict["mass"])
d.make_step_table()
d.make_summary()
for test in d.datasets:
print("newtest")
print(test)
return info_dict | def load_and_print_resfile(filename, info_dict=None) | Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something. | 5.863907 | 5.768832 | 1.016481 |
if instrument is None:
instrument = self.tester
if instrument in ["arbin", "arbin_res"]:
self._set_arbin()
self.tester = "arbin"
elif instrument == "arbin_sql":
self._set_arbin_sql()
self.tester = "arbin"
elif instrument == "arbin_experimental":
self._set_arbin_experimental()
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
self._set_pec()
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
self._set_biologic()
self.tester = "biologic"
elif instrument == "custom":
self._set_custom()
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'") | def set_instrument(self, instrument=None) | Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat) | 3.111824 | 2.760567 | 1.127241 |
if directory is None:
self.logger.info("no directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("directory does not exist")
return
self.raw_datadir = directory | def set_raw_datadir(self, directory=None) | Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory) | 2.682324 | 3.047851 | 0.880071 |
if directory is None:
self.logger.info("no directory name given")
return
if not os.path.isdir(directory):
self.logger.info("directory does not exist")
return
self.cellpy_datadir = directory | def set_cellpy_datadir(self, directory=None) | Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory) | 2.79795 | 3.255547 | 0.859441 |
txt = "checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True | def check_file_ids(self, rawfiles, cellpyfile) | Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument. | 3.452243 | 3.439641 | 1.003664 |
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names, ]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids | def _check_raw(self, file_names, abort_on_missing=False) | Get the file-ids for the res_files. | 3.227449 | 3.00777 | 1.073037 |
strip_filenames = True
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select("CellpyData/fidtable")
except KeyError:
self.logger.warning("no fidtable -"
" you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = \
self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None | def _check_cellpy_file(self, filename) | Get the file-ids for the cellpy_file. | 3.318903 | 3.163015 | 1.049285 |
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
self.logger.info("started loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.info("cellpy file(s) needs updating - loading raw")
self.logger.debug(raw_files)
self.from_raw(raw_files)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(all_tests=False, find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
return self | def loadcell(self, raw_files, cellpy_file=None, mass=None,
summary_on_raw=False, summary_ir=True, summary_ocv=False,
summary_end_v=True, only_summary=False, only_first=False,
force_raw=False,
use_cellpy_stat_file=None) | Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
only_first (bool): only use the first file fitting search criteria
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>> | 4.345361 | 4.471426 | 0.971806 |
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names, ]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError("Too many files to merge - "
"could be a p2-p3 zip thing")
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty")
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.datasets.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self | def from_raw(self, file_names=None, **kwargs) | Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together. | 4.04487 | 3.993807 | 1.012786 |
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False | def check(self) | Returns False if no datasets exists or if one or more of the datasets
are empty | 6.929861 | 3.972368 | 1.744516 |
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning("This cellpy-file version is not supported by"
"current reader (try to update cellpy).")
if new_datasets:
for dataset in new_datasets:
self.datasets.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
return self | def load(self, cellpy_file, parent_level="CellpyData") | Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level | 3.834738 | 3.936867 | 0.974058 |
if not os.path.isfile(filename):
self.logger.info(f"file does not exist: {filename}")
raise IOError
store = pd.HDFStore(filename)
# required_keys = ['dfdata', 'dfsummary', 'fidtable', 'info']
required_keys = ['dfdata', 'dfsummary', 'info']
required_keys = ["/" + parent_level + "/" + _ for _ in required_keys]
for key in required_keys:
if key not in store.keys():
self.logger.info(f"This hdf-file is not good enough - "
f"at least one key is missing: {key}")
raise Exception(f"OH MY GOD! At least one crucial key"
f"is missing {key}!")
self.logger.debug(f"Keys in current hdf5-file: {store.keys()}")
data = DataSet()
if parent_level != "CellpyData":
self.logger.debug("Using non-default parent label for the "
"hdf-store: {}".format(parent_level))
# checking file version
infotable = store.select(parent_level + "/info")
try:
data.cellpy_file_version = \
self._extract_from_dict(infotable, "cellpy_file_version")
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f"Unhandled exception raised: {e}")
if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion
data.dfsummary = store.select(parent_level + "/dfsummary")
data.dfdata = store.select(parent_level + "/dfdata")
try:
data.step_table = store.select(parent_level + "/step_table")
except Exception as e:
self.logging.debug("could not get step_table from cellpy-file")
data.step_table = pd.DataFrame()
warnings.warn(f"Unhandled exception raised: {e}")
try:
fidtable = store.select(
parent_level + "/fidtable") # remark! changed spelling from
# lower letter to camel-case!
fidtable_selected = True
except Exception as e:
self.logging.debug("could not get fid-table from cellpy-file")
fidtable = []
warnings.warn("no fidtable - you should update your hdf5-file")
fidtable_selected = False
self.logger.debug(" h5")
# this does not yet allow multiple sets
newtests = [] # but this is ready when that time comes
# The infotable stores "meta-data". The follwing statements loads the
# content of infotable and updates div. DataSet attributes.
# Maybe better use it as dict?
data = self._load_infotable(data, infotable, filename)
if fidtable_selected:
data.raw_data_files, data.raw_data_files_length = \
self._convert2fid_list(fidtable)
else:
data.raw_data_files = None
data.raw_data_files_length = None
newtests.append(data)
store.close()
# self.datasets.append(data)
return newtests | def _load_hdf5(self, filename, parent_level="CellpyData") | Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData")
Returns:
loaded datasets (DataSet-object) | 4.308253 | 4.283025 | 1.00589 |
self.logger.info("merging")
if separate_datasets:
warnings.warn("The option seperate_datasets=True is"
"not implemented yet. Performing merging, but"
"neglecting the option.")
else:
if datasets is None:
datasets = list(range(len(self.datasets)))
first = True
for dataset_number in datasets:
if first:
dataset = self.datasets[dataset_number]
first = False
else:
dataset = self._append(dataset, self.datasets[dataset_number])
for raw_data_file, file_size in zip(self.datasets[dataset_number].raw_data_files,
self.datasets[dataset_number].raw_data_files_length):
dataset.raw_data_files.append(raw_data_file)
dataset.raw_data_files_length.append(file_size)
self.datasets = [dataset]
self.number_of_datasets = 1
return self | def merge(self, datasets=None, separate_datasets=False) | This function merges datasets into one set. | 2.844453 | 2.770583 | 1.026662 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
st = self.datasets[dataset_number].step_table
print(st) | def print_step_table(self, dataset_number=None) | Print the step table. | 3.54861 | 3.226972 | 1.099672 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
# if short:
# # the table only consists of steps (not cycle,step pairs) assuming
# # that the step numbers uniquely defines step type (this is true
# # for arbin at least).
# raise NotImplementedError
step_specs = pd.read_csv(file_name, sep=prms.Reader.sep)
if "step" not in step_specs.columns:
self.logger.info("step col is missing")
raise IOError
if "type" not in step_specs.columns:
self.logger.info("type col is missing")
raise IOError
if not short and "cycle" not in step_specs.columns:
self.logger.info("cycle col is missing")
raise IOError
self.make_step_table(custom_step_definition=True,
step_specifications=step_specs,
short=short) | def load_step_specifications(self, file_name, short=False,
dataset_number=None) | Load a table that contains step-type definitions.
This function loads a file containing a specification for each step or
for each (cycle_number, step_number) combinations if short==False. The
step_cycle specifications that are allowed are stored in the variable
cellreader.list_of_step_types. | 4.856728 | 4.498314 | 1.079677 |
if sep is None:
sep = self.sep
self.logger.debug("saving to csv")
dataset_number = -1
for data in self.datasets:
dataset_number += 1
if not self._is_not_empty_dataset(data):
self.logger.info("to_csv -")
self.logger.info("empty test [%i]" % dataset_number)
self.logger.info("not saved!")
else:
if isinstance(data.loaded_from, (list, tuple)):
txt = "merged file"
txt += "using first file as basename"
self.logger.debug(txt)
no_merged_sets = len(data.loaded_from)
no_merged_sets = "_merged_" + str(no_merged_sets).zfill(3)
filename = data.loaded_from[0]
else:
filename = data.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if datadir:
firstname = os.path.join(datadir,
os.path.basename(firstname))
if raw:
outname_normal = firstname + "_normal.csv"
self._export_normal(data, outname=outname_normal, sep=sep)
if data.step_table_made is True:
outname_steps = firstname + "_steps.csv"
self._export_steptable(data, outname=outname_steps,
sep=sep)
else:
self.logger.debug("step_table_made is not True")
if summary:
outname_stats = firstname + "_stats.csv"
self._export_stats(data, outname=outname_stats, sep=sep)
if cycles:
outname_cycles = firstname + "_cycles.csv"
self._export_cycles(outname=outname_cycles,
dataset_number=dataset_number,
sep=sep, shifted=shifted,
method=method, shift=shift,
last_cycle=last_cycle) | def to_csv(self, datadir=None, sep=None, cycles=False, raw=True,
summary=True, shifted=False,
method=None, shift=0.0,
last_cycle=None) | Saves the data as .csv file(s).
Args:
datadir: folder where to save the data (uses current folder if not
given).
sep: the separator to use in the csv file
(defaults to CellpyData.sep).
cycles: (bool) export voltage-capacity curves if True.
raw: (bool) export raw-data if True.
summary: (bool) export summary if True.
shifted (bool): export with cumulated shift.
method (string): how the curves are given
"back-and-forth" - standard back and forth; discharge
(or charge) reversed from where charge (or
discharge) ends.
"forth" - discharge (or charge) continues along x-axis.
"forth-and-forth" - discharge (or charge) also starts at 0 (or
shift if not shift=0.0)
shift: start-value for charge (or discharge)
last_cycle: process only up to this cycle (if not None).
Returns: Nothing | 3.004484 | 3.032624 | 0.990721 |
time_00 = time.time()
set_number = self._validate_dataset_number(set_number)
if set_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[set_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test[(test[cycle_index_header] == cycle) &
(test[step_index_header] == step)]
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
return None | def sget_voltage(self, cycle, step, set_number=None) | Returns voltage for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][voltage_header]
Args:
cycle: cycle number
step: step number
set_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty | 4.201229 | 3.517105 | 1.194513 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
# step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if cycle:
self.logger.debug("getting voltage curve for cycle")
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
if not full:
self.logger.debug(
"getting list of voltage-curves for all cycles"
)
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[voltage_header])
else:
self.logger.debug("getting frame of all voltage-curves")
v = test[voltage_header]
return v | def get_voltage(self, cycle=None, dataset_number=None, full=True) | Returns voltage (in V).
Args:
cycle: cycle number (all cycles if None)
dataset_number: first dataset if None
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False) | 3.330591 | 3.336166 | 0.998329 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
current_header = self.headers_normal.current_txt
# step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if cycle:
self.logger.debug(f"getting current for cycle {cycle}")
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[current_header]
return v
else:
if not full:
self.logger.debug(
"getting a list of current-curves for all cycles"
)
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[current_header])
else:
self.logger.debug("getting all current-curves ")
v = test[current_header]
return v | def get_current(self, cycle=None, dataset_number=None, full=True) | Returns current (in mA).
Args:
cycle: cycle number (all cycles if None)
dataset_number: first dataset if None
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False) | 3.25312 | 3.254295 | 0.999639 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
step_time_header = self.headers_normal.step_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test.loc[
(test[cycle_index_header] == cycle) &
(test[step_index_header] == step), :
]
if not self.is_empty(c):
t = c[step_time_header]
return t
else:
return None | def sget_steptime(self, cycle, step, dataset_number=None) | Returns step time for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][step_time_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty | 3.758993 | 3.1454 | 1.195076 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test[(test[cycle_index_header] == cycle) &
(test[step_index_header] == step)]
if not self.is_empty(c):
t = c[timestamp_header]
return t
else:
return pd.Series() | def sget_timestamp(self, cycle, step, dataset_number=None) | Returns timestamp for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][timestamp_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series | 3.997557 | 3.438064 | 1.162735 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
v = pd.Series()
test = self.datasets[dataset_number].dfdata
if cycle:
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[timestamp_header]
else:
if not full:
self.logger.debug("getting timestapm for all cycles")
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[timestamp_header])
else:
self.logger.debug("returning full timestamp col")
v = test[timestamp_header]
if in_minutes and v is not None:
v /= 60.0
if in_minutes and v is not None:
v /= 60.0
return v | def get_timestamp(self, cycle=None, dataset_number=None,
in_minutes=False, full=True) | Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False) | 3.534724 | 3.601224 | 0.981534 |
# TODO: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
dc, v = self._get_cap(cycle, dataset_number, "discharge")
return dc, v | def get_dcap(self, cycle=None, dataset_number=None) | Returns discharge_capacity (in mAh/g), and voltage. | 9.151556 | 7.981824 | 1.146549 |
# TODO: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cc, v = self._get_cap(cycle, dataset_number, "charge")
return cc, v | def get_ccap(self, cycle=None, dataset_number=None) | Returns charge_capacity (in mAh/g), and voltage. | 9.285386 | 7.863528 | 1.180817 |
if cycles is None:
cycles = self.get_cycle_numbers()
else:
if not isinstance(cycles, (list, tuple)):
cycles = [cycles, ]
else:
remove_first = False
ocv_rlx_id = "ocvrlx"
if direction == "up":
ocv_rlx_id += "_up"
elif direction == "down":
ocv_rlx_id += "_down"
step_table = self.dataset.step_table
dfdata = self.dataset.dfdata
ocv_steps = step_table.loc[
step_table["cycle"].isin(cycles), :
]
ocv_steps = ocv_steps.loc[
ocv_steps.type.str.startswith(ocv_rlx_id), :
]
if remove_first:
ocv_steps = ocv_steps.iloc[1:, :]
step_time_label = self.headers_normal.step_time_txt
voltage_label = self.headers_normal.voltage_txt
cycle_label = self.headers_normal.cycle_index_txt
step_label = self.headers_normal.step_index_txt
selected_df = dfdata.where(
dfdata[cycle_label].isin(ocv_steps.cycle) &
dfdata[step_label].isin(ocv_steps.step)
).dropna()
selected_df = selected_df.loc[
:, [cycle_label, step_label, step_time_label, voltage_label]
]
if interpolated:
if dx is None and number_of_points is None:
dx = prms.Reader.time_interpolation_step
new_dfs = list()
groupby_list = [cycle_label, step_label]
for name, group in selected_df.groupby(groupby_list):
new_group = _interpolate_df_col(
group,
x=step_time_label,
y=voltage_label,
dx=dx,
number_of_points=number_of_points,
)
for i, j in zip(groupby_list, name):
new_group[i] = j
new_dfs.append(new_group)
selected_df = pd.concat(new_dfs)
return selected_df | def get_ocv(self, cycles=None, direction="up",
remove_first=False,
interpolated=False,
dx=None,
number_of_points=None) | get the open curcuit voltage relaxation curves.
Args:
cycles (list of ints or None): the cycles to extract from
(selects all if not given).
direction ("up", "down", or "both"): extract only relaxations that
is performed during discharge for "up" (because then the
voltage relaxes upwards) etc.
remove_first: remove the first relaxation curve (typically,
the first curve is from the initial rest period between
assembling the cell to the actual testing/cycling starts)
interpolated (bool): set to True if you want the data to be
interpolated (e.g. for creating smaller files)
dx (float): the step used when interpolating.
number_of_points (int): number of points to use (over-rides dx)
for interpolation (i.e. the length of the interpolated data).
Returns:
A pandas.DataFrame with cycle-number, step-number, step-time, and
voltage columns. | 2.554695 | 2.516237 | 1.015284 |
# function for getting ocv curves
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
if ocv_type in ['ocvrlx_up', 'ocvrlx_down']:
ocv = self._get_ocv(dataset_number=None,
ocv_type=ocv_type,
select_last=True,
select_columns=True,
cycle_number=cycle_number,
)
return ocv
else:
ocv_up = self._get_ocv(dataset_number=None,
ocv_type='ocvrlx_up',
select_last=True,
select_columns=True,
cycle_number=cycle_number,
)
ocv_down = self._get_ocv(dataset_number=None,
ocv_type='ocvrlx_down',
select_last=True,
select_columns=True,
cycle_number=cycle_number,
)
return ocv_up, ocv_down | def get_ocv_old(self, cycle_number=None, ocv_type='ocv', dataset_number=None) | Find ocv data in DataSet (voltage vs time).
Args:
cycle_number (int): find for all cycles if None.
ocv_type ("ocv", "ocvrlx_up", "ocvrlx_down"):
ocv - get up and down (default)
ocvrlx_up - get up
ocvrlx_down - get down
dataset_number (int): test number (default first)
(usually not used).
Returns:
if cycle_number is not None
ocv or [ocv_up, ocv_down]
ocv (and ocv_up and ocv_down) are list
containg [time,voltage] (that are Series)
if cycle_number is None
[ocv1,ocv2,...ocvN,...] N = cycle
ocvN = pandas DataFrame containing the columns
cycle inded, step time, step index, data point, datetime,
voltage
(TODO: check if copy or reference of dfdata is returned) | 2.203769 | 1.972384 | 1.117313 |
if steptable is None:
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
d = self.datasets[dataset_number].dfdata
no_cycles = np.amax(d[self.headers_normal.cycle_index_txt])
else:
no_cycles = np.amax(steptable[self.headers_step_table.cycle])
return no_cycles | def get_number_of_cycles(self, dataset_number=None, steptable=None) | Get the number of cycles in the test. | 4.42628 | 4.244077 | 1.042931 |
if steptable is None:
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
d = self.datasets[dataset_number].dfdata
cycles = np.unique(d[self.headers_normal.cycle_index_txt])
else:
cycles = np.unique(steptable[self.headers_step_table.cycle])
return cycles | def get_cycle_numbers(self, dataset_number=None, steptable=None) | Get a list containing all the cycle numbers in the test. | 4.382921 | 4.16296 | 1.052838 |
if not dataset:
dataset_number = self._validate_dataset_number(None)
if dataset_number is None:
self._report_empty_dataset()
return
dataset = self.datasets[dataset_number]
if not mass:
mass = dataset.mass
if not to_unit:
to_unit_cap = self.cellpy_units["charge"]
to_unit_mass = self.cellpy_units["specific"]
to_unit = to_unit_cap / to_unit_mass
if not from_unit:
from_unit_cap = self.raw_units["charge"]
from_unit_mass = self.raw_units["mass"]
from_unit = from_unit_cap / from_unit_mass
return from_unit / to_unit / mass | def get_converter_to_specific(self, dataset=None, mass=None,
to_unit=None, from_unit=None) | get the convertion values
Args:
dataset: DataSet object
mass: mass of electrode (for example active material in mg)
to_unit: (float) unit of input, f.ex. if unit of charge
is mAh and unit of mass is g, then to_unit for charge/mass
will be 0.001 / 1.0 = 0.001
from_unit: float) unit of output, f.ex. if unit of charge
is mAh and unit of mass is g, then to_unit for charge/mass
will be 1.0 / 0.001 = 1000.0
Returns:
multiplier (float) from_unit/to_unit * mass | 2.775584 | 2.742007 | 1.012246 |
self._set_run_attribute("mass", masses, dataset_number=dataset_number,
validated=validated) | def set_mass(self, masses, dataset_number=None, validated=None) | Sets the mass (masses) for the test (datasets). | 4.387321 | 4.119765 | 1.064944 |
self._set_run_attribute("tot_mass", masses,
dataset_number=dataset_number,
validated=validated) | def set_tot_mass(self, masses, dataset_number=None, validated=None) | Sets the mass (masses) for the test (datasets). | 3.888971 | 3.824753 | 1.01679 |
self._set_run_attribute("nom_cap", nom_caps,
dataset_number=dataset_number,
validated=validated) | def set_nom_cap(self, nom_caps, dataset_number=None, validated=None) | Sets the mass (masses) for the test (datasets). | 3.811183 | 3.623394 | 1.051827 |
column_headings = df.columns
column_headings = column_headings.tolist()
try:
for col_name in col_names:
i = column_headings.index(col_name)
column_headings.pop(column_headings.index(col_name))
column_headings.insert(0, col_name)
finally:
df = df.reindex(columns=column_headings)
return df | def set_col_first(df, col_names) | set selected columns first in a pandas.DataFrame.
This function sets cols with names given in col_names (a list) first in
the DataFrame. The last col in col_name will come first (processed last) | 2.380863 | 2.373904 | 1.002932 |
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return None
test = self.get_dataset(dataset_number)
# This is a bit convoluted; in the old days, we used an attribute
# called dfsummary_made,
# that was set to True when the summary was made successfully.
# It is most likely never
# used anymore. And will most probably be deleted.
if use_dfsummary_made:
dfsummary_made = test.dfsummary_made
else:
dfsummary_made = True
if not dfsummary_made:
warnings.warn("Summary is not made yet")
return None
else:
self.logger.info("returning datasets[test_no].dfsummary")
return test.dfsummary | def get_summary(self, dataset_number=None, use_dfsummary_made=False) | Retrieve summary returned as a pandas DataFrame. | 4.7694 | 4.624852 | 1.031255 |
# first - check if we need some "instrument-specific" prms
if self.tester == "arbin":
convert_date = True
if ensure_step_table is None:
ensure_step_table = self.ensure_step_table
# Cycle_Index Test_Time(s) Test_Time(h) Date_Time Current(A)
# Current(mA) Voltage(V) Charge_Capacity(Ah) Discharge_Capacity(Ah)
# Charge_Energy(Wh) Discharge_Energy(Wh) Internal_Resistance(Ohm)
# AC_Impedance(Ohm) ACI_Phase_Angle(Deg) Charge_Time(s)
# DisCharge_Time(s) Vmax_On_Cycle(V) Coulombic_Efficiency
if use_cellpy_stat_file is None:
use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file
self.logger.debug("using use_cellpy_stat_file from prms")
self.logger.debug(f"use_cellpy_stat_file: {use_cellpy_stat_file}")
if all_tests is True:
for j in range(len(self.datasets)):
txt = "creating summary for file "
test = self.datasets[j]
if not self._is_not_empty_dataset(test):
self.logger.info("empty test %i" % j)
return
if isinstance(test.loaded_from, (list, tuple)):
for f in test.loaded_from:
txt += f
txt += "\n"
else:
txt += str(test.loaded_from)
if not test.mass_given:
txt += " mass for test %i is not given" % j
txt += " setting it to %f mg" % test.mass
self.logger.debug(txt)
self._make_summary(j,
find_ocv=find_ocv,
find_ir=find_ir,
find_end_voltage=find_end_voltage,
use_cellpy_stat_file=use_cellpy_stat_file,
ensure_step_table=ensure_step_table,
convert_date=convert_date,
)
else:
self.logger.debug("creating summary for only one test")
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
self._make_summary(dataset_number,
find_ocv=find_ocv,
find_ir=find_ir,
find_end_voltage=find_end_voltage,
use_cellpy_stat_file=use_cellpy_stat_file,
ensure_step_table=ensure_step_table,
convert_date=convert_date,
)
return self | def make_summary(self, find_ocv=False, find_ir=False,
find_end_voltage=False,
use_cellpy_stat_file=None, all_tests=True,
dataset_number=0, ensure_step_table=True,
convert_date=False) | Convenience function that makes a summary of the cycling data. | 2.980392 | 2.979744 | 1.000217 |
epub = cnxepub.EPUB.from_file(epub_file_path)
if len(epub) != 1:
raise Exception('Expecting an epub with one book')
package = epub[0]
binder = cnxepub.adapt_package(package)
partcount.update({}.fromkeys(parts, 0))
partcount['book'] += 1
html = cnxepub.SingleHTMLFormatter(binder, includes=includes)
# Truncate binder to the first N chapters where N = numchapters.
logger.debug('Full binder: {}'.format(cnxepub.model_to_tree(binder)))
if numchapters is not None:
apply_numchapters(html.get_node_type, binder, numchapters)
logger.debug('Truncated Binder: {}'.format(
cnxepub.model_to_tree(binder)))
# Add mathjax to the page.
if mathjax_version:
etree.SubElement(
html.head,
'script',
src=MATHJAX_URL.format(mathjax_version=mathjax_version))
print(str(html), file=html_out)
if hasattr(html_out, 'name'):
# html_out is a file, close after writing
html_out.close() | def single_html(epub_file_path, html_out=sys.stdout, mathjax_version=None,
numchapters=None, includes=None) | Generate complete book HTML. | 4.814253 | 4.923881 | 0.977736 |
config_dict = {
"Paths": prms.Paths.to_dict(),
"FileNames": prms.FileNames.to_dict(),
"Db": prms.Db.to_dict(),
"DbCols": prms.DbCols.to_dict(),
"DataSet": prms.DataSet.to_dict(),
"Reader": prms.Reader.to_dict(),
"Instruments": prms.Instruments.to_dict(),
# "excel_db_cols": prms.excel_db_cols.to_dict(),
# "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(),
"Batch": prms.Batch.to_dict(),
}
return config_dict | def _pack_prms() | if you introduce new 'save-able' parameter dictionaries, then you have
to include them here | 2.761233 | 2.736414 | 1.00907 |
logger.debug("Reading config-file: %s" % prm_filename)
try:
with open(prm_filename, "r") as config_file:
prm_dict = yaml.load(config_file)
except yaml.YAMLError:
raise ConfigFileNotRead
else:
_update_prms(prm_dict) | def _read_prm_file(prm_filename) | read the prm file | 3.209723 | 3.22324 | 0.995806 |
if file_name is not None:
if os.path.isfile(file_name):
return file_name
else:
logger.info("Could not find the prm-file")
default_name = prms._prm_default_name
prm_globtxt = prms._prm_globtxt
script_dir = os.path.abspath(os.path.dirname(__file__))
search_path = dict()
search_path["curdir"] = os.path.abspath(os.path.dirname(sys.argv[0]))
search_path["filedir"] = script_dir
search_path["userdir"] = os.path.expanduser("~")
if search_order is None:
search_order = ["userdir", ] # ["curdir","filedir", "userdir",]
else:
search_order = search_order
# The default name for the prm file is at the moment in the script-dir,@
# while default searching is in the userdir (yes, I know):
prm_default = os.path.join(script_dir, default_name)
# -searching-----------------------
search_dict = OrderedDict()
for key in search_order:
search_dict[key] = [None, None]
prm_directory = search_path[key]
default_file = os.path.join(prm_directory, default_name)
if os.path.isfile(default_file):
# noinspection PyTypeChecker
search_dict[key][0] = default_file
prm_globtxt_full = os.path.join(prm_directory, prm_globtxt)
user_files = glob.glob(prm_globtxt_full)
for f in user_files:
if os.path.basename(f) != os.path.basename(default_file):
search_dict[key][1] = f
break
# -selecting----------------------
prm_file = None
for key, file_list in search_dict.items():
if file_list[-1]:
prm_file = file_list[-1]
break
else:
if not prm_file:
prm_file = file_list[0]
if prm_file:
prm_filename = prm_file
else:
prm_filename = prm_default
return prm_filename | def _get_prm_file(file_name=None, search_order=None) | returns name of the prm file | 2.686847 | 2.649796 | 1.013983 |
print("convenience function for listing prms")
print(type(prms))
print(prms.__name__)
print(f"prm file: {_get_prm_file()}")
for key in prms.__dict__:
if isinstance(prms.__dict__[key], box.Box):
print()
print(80 * "=")
print(f"prms.{key}:")
print(80 * "-")
for subkey in prms.__dict__[key]:
print(
f"prms.{key}.{subkey} = ",
f"{prms.__dict__[key][subkey]}"
)
print(80 * "=") | def info() | this function will show only the 'box'-type
attributes and their content in the cellpy.prms module | 3.793133 | 3.306737 | 1.147092 |
math = node.attrib['data-math'] or node.text
if math is None:
return None
eq = {}
if mc_client:
math_key = hashlib.md5(math.encode('utf-8')).hexdigest()
eq = json.loads(mc_client.get(math_key) or '{}')
if not eq:
res = requests.post(mml_url, {'math': math.encode('utf-8'),
'mathType': 'TeX',
'mml': 'true'})
if res: # Non-error response from requests
eq = res.json()
if mc_client:
mc_client.set(math_key, res.text)
if 'components' in eq and len(eq['components']) > 0:
for component in eq['components']:
if component['format'] == 'mml':
mml = etree.fromstring(component['source'])
if node.tag.endswith('span'):
mml.set('display', 'inline')
elif node.tag.endswith('div'):
mml.set('display', 'block')
mml.tail = node.tail
return mml
else:
logger.warning('Retrying math TeX conversion: '
'{}'.format(json.dumps(eq, indent=4)))
retry += 1
if retry < 2:
return _replace_tex_math(node, mml_url, mc_client, retry)
return None | def _replace_tex_math(node, mml_url, mc_client=None, retry=0) | call mml-api service to replace TeX math in body of node with mathml | 2.985593 | 2.963454 | 1.007471 |
def _replace_exercises(elem):
item_code = elem.get('href')[len(match):]
url = url_template.format(itemCode=item_code)
exercise = {}
if mc_client:
mc_key = item_code + (token or '')
exercise = json.loads(mc_client.get(mc_key) or '{}')
if not exercise:
if token:
headers = {'Authorization': 'Bearer {}'.format(token)}
res = requests.get(url, headers=headers)
else:
res = requests.get(url)
if res:
# grab the json exercise, run it through Jinja2 template,
# replace element w/ it
exercise = res.json()
if mc_client:
mc_client.set(mc_key, res.text)
if exercise['total_count'] == 0:
logger.warning('MISSING EXERCISE: {}'.format(url))
XHTML = '{{{}}}'.format(HTML_DOCUMENT_NAMESPACES['xhtml'])
missing = etree.Element(XHTML + 'div',
{'class': 'missing-exercise'},
nsmap=HTML_DOCUMENT_NAMESPACES)
missing.text = 'MISSING EXERCISE: tag:{}'.format(item_code)
nodes = [missing]
else:
html = EXERCISE_TEMPLATE.render(data=exercise)
try:
nodes = etree.fromstring('<div>{}</div>'.format(html))
except etree.XMLSyntaxError: # Probably HTML
nodes = etree.HTML(html)[0] # body node
if mml_url:
for node in nodes.xpath('//*[@data-math]'):
mathml = _replace_tex_math(node, mml_url, mc_client)
if mathml is not None:
mparent = node.getparent()
mparent.replace(node, mathml)
else:
mathtext = node.get('data-math') or node.text or ''
logger.warning('BAD TEX CONVERSION: "%s" URL: %s'
% (mathtext.encode('utf-8'), url))
parent = elem.getparent()
if etree.QName(parent.tag).localname == 'p':
elem = parent
parent = elem.getparent()
parent.remove(elem) # Special case - assumes single wrapper elem
for child in nodes:
parent.append(child)
xpath = '//xhtml:a[contains(@href, "{}")]'.format(match)
return (xpath, _replace_exercises) | def exercise_callback_factory(match, url_template,
mc_client=None, token=None, mml_url=None) | Create a callback function to replace an exercise by fetching from
a server. | 3.965661 | 3.932964 | 1.008314 |
for node in tree:
li_elm = etree.SubElement(root_xl_element, 'li')
if node['id'] not in extensions: # no extension, no associated file
span_elm = lxml.html.fragment_fromstring(
node['title'], create_parent='span')
li_elm.append(span_elm)
else:
a_elm = lxml.html.fragment_fromstring(
node['title'], create_parent='a')
a_elm.set('href', ''.join([node['id'], extensions[node['id']]]))
li_elm.append(a_elm)
if node['id'] is not None and node['id'] != 'subcol':
li_elm.set('cnx-archive-uri', node['id'])
if node['shortId'] is not None:
li_elm.set('cnx-archive-shortid', node['shortId'])
if 'contents' in node:
elm = etree.SubElement(li_elm, list_type)
html_listify(node['contents'], elm, extensions) | def html_listify(tree, root_xl_element, extensions, list_type='ol') | Convert a node tree into an xhtml nested list-of-lists.
This will create 'li' elements under the root_xl_element,
additional sublists of the type passed as list_type. The contents
of each li depends on the extensions dictonary: the keys of this
dictionary are the ids of tree elements that are repesented by files
in the epub, with associated filename extensions as the value. Those
nodes will be rendered as links to the reassembled filename: i.e.
id='abc-2345-54e4' {'abc-2345-54e4': 'xhtml'} -> abc-2345-54e4.xhtml
Other nodes will render as spans. If the node has id or short id values,
the associated li will be populated with cnx-archive-uri and
cnx-archive-shortid attributes, respectively | 2.966494 | 2.367736 | 1.252882 |
existing_ids = content.xpath('//*/@id')
elements = [
'p', 'dl', 'dt', 'dd', 'table', 'div', 'section', 'figure',
'blockquote', 'q', 'code', 'pre', 'object', 'img', 'audio',
'video',
]
elements_xpath = '|'.join(['.//{}|.//xhtml:{}'.format(elem, elem)
for elem in elements])
data_types = [
'equation', 'list', 'exercise', 'rule', 'example', 'note',
'footnote-number', 'footnote-ref', 'problem', 'solution', 'media',
'proof', 'statement', 'commentary'
]
data_types_xpath = '|'.join(['.//*[@data-type="{}"]'.format(data_type)
for data_type in data_types])
xpath = '|'.join([elements_xpath, data_types_xpath])
mapping = {} # old id -> new id
for node in content.xpath(xpath, namespaces=HTML_DOCUMENT_NAMESPACES):
old_id = node.attrib.get('id')
document_id = document.id.replace('_', '')
if old_id:
new_id = 'auto_{}_{}'.format(document_id, old_id)
else:
random_number = random.randint(0, 100000)
new_id = 'auto_{}_{}'.format(document_id, random_number)
while new_id in existing_ids:
random_number = random.randint(0, 100000)
new_id = 'auto_{}_{}'.format(document_id, random_number)
node.attrib['id'] = new_id
if old_id:
mapping[old_id] = new_id
existing_ids.append(new_id)
for a in content.xpath('//a[@href]|//xhtml:a[@href]',
namespaces=HTML_DOCUMENT_NAMESPACES):
href = a.attrib['href']
if href.startswith('#') and href[1:] in mapping:
a.attrib['href'] = '#{}'.format(mapping[href[1:]]) | def _generate_ids(self, document, content) | Generate unique ids for html elements in page content so that it's
possible to link to them. | 2.748579 | 2.712003 | 1.013487 |
if isinstance(node, CompositeDocument):
return 'composite-page'
elif isinstance(node, (Document, DocumentPointer)):
return 'page'
elif isinstance(node, Binder) and parent is None:
return 'book'
for child in node:
if isinstance(child, TranslucentBinder):
return 'unit'
return 'chapter' | def get_node_type(self, node, parent=None) | If node is a document, the type is page.
If node is a binder with no parent, the type is book.
If node is a translucent binder, the type is either chapters (only
contain pages) or unit (contains at least one translucent binder). | 8.110513 | 3.590647 | 2.258789 |
with zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED) as zippy:
base_path = os.path.abspath(directory)
for root, dirs, filenames in os.walk(directory):
# Strip the absolute path
archive_path = os.path.relpath(root, base_path)
for filename in filenames:
filepath = os.path.join(root, filename)
archival_filepath = os.path.join(archive_path, filename)
zippy.write(filepath, archival_filepath) | def pack_epub(directory, file) | Pack the given ``directory`` into an epub (i.e. zip) archive
given as ``file``, which can be a file-path or file-like object. | 2.034235 | 2.161 | 0.941339 |
if zipfile.is_zipfile(file):
# Extract the epub to the current working directory.
with zipfile.ZipFile(file, 'r') as zf:
zf.extractall(path=directory) | def unpack_epub(file, directory) | Unpack the given ``file`` (a file-path or file-like object)
to the given ``directory``. | 2.491881 | 2.838733 | 0.877814 |
root = None
if zipfile.is_zipfile(file):
unpack_dir = tempfile.mkdtemp('-epub')
# Extract the epub to the current working directory.
with zipfile.ZipFile(file, 'r') as zf:
zf.extractall(path=unpack_dir)
root = unpack_dir
elif os.path.isdir(file):
root = file
else:
raise TypeError("Can't decipher what should be done "
"with the given file.")
# NOTE We ignore the mimetype file, as it's not extremely important
# to anything done here.
# Build a blank epub object then parse the packages.
container_xml_filepath = os.path.join(root,
EPUB_CONTAINER_XML_RELATIVE_PATH)
container_xml = etree.parse(container_xml_filepath)
packages = []
for pkg_filepath in container_xml.xpath(
'//ns:rootfile/@full-path',
namespaces=EPUB_CONTAINER_XML_NAMESPACES):
filepath = os.path.join(root, pkg_filepath)
packages.append(Package.from_file(filepath))
return cls(packages=packages, root=root) | def from_file(cls, file) | Create the object from a *file* or *file-like object*.
The file can point to an ``.epub`` file or a directory
(the contents of which reflect
the internal struture of an ``.epub`` archive).
If given an non-archive file,
this structure will be used when reading in and parsing the epub.
If an archive file is given,
it will be extracted to the temporal filesystem. | 3.768784 | 3.570724 | 1.055468 |
directory = tempfile.mkdtemp('-epub')
# Write out the contents to the filesystem.
package_filenames = []
for package in epub:
opf_filepath = Package.to_file(package, directory)
opf_filename = os.path.basename(opf_filepath)
package_filenames.append(opf_filename)
# Create the container.xml
container_xml_filepath = os.path.join(directory,
EPUB_CONTAINER_XML_RELATIVE_PATH)
template = jinja2.Template(CONTAINER_XML_TEMPLATE,
trim_blocks=True, lstrip_blocks=True)
os.makedirs(os.path.dirname(container_xml_filepath)) # FIXME PY3
with open(container_xml_filepath, 'w') as fb:
xml = template.render(package_filenames=package_filenames)
fb.write(xml)
# Write the mimetype file.
with open(os.path.join(directory, 'mimetype'), 'w') as fb:
fb.write("application/epub+zip")
# Pack everything up
pack_epub(directory, file=file) | def to_file(epub, file) | Export to ``file``, which is a *file* or *file-like object*. | 2.95198 | 2.970148 | 0.993883 |
opf_xml = etree.parse(file)
# Check if ``file`` is file-like.
if hasattr(file, 'read'):
name = os.path.basename(file.name)
root = os.path.abspath(os.path.dirname(file.name))
else: # ...a filepath
name = os.path.basename(file)
root = os.path.abspath(os.path.dirname(file))
parser = OPFParser(opf_xml)
# Roll through the item entries
manifest = opf_xml.xpath('/opf:package/opf:manifest/opf:item',
namespaces=EPUB_OPF_NAMESPACES)
pkg_items = []
for item in manifest:
absolute_filepath = os.path.join(root, item.get('href'))
properties = item.get('properties', '').split()
is_navigation = 'nav' in properties
media_type = item.get('media-type')
pkg_items.append(Item.from_file(absolute_filepath,
media_type=media_type,
is_navigation=is_navigation,
properties=properties))
# Ignore spine ordering, because it is not important
# for our use cases.
return cls(name, pkg_items, parser.metadata) | def from_file(cls, file) | Create the object from a *file* or *file-like object*. | 3.515429 | 3.420507 | 1.027751 |
opf_filepath = os.path.join(directory, package.name)
# Create the directory structure
for name in ('contents', 'resources',):
path = os.path.join(directory, name)
if not os.path.exists(path):
os.mkdir(path)
# Write the items to the filesystem
locations = {} # Used when rendering
for item in package:
if item.media_type == 'application/xhtml+xml':
base = os.path.join(directory, 'contents')
else:
base = os.path.join(directory, 'resources')
filename = item.name
filepath = os.path.join(base, filename)
locations[item] = os.path.relpath(filepath, directory)
with open(filepath, 'wb') as item_file:
item_file.write(item.data.read())
# Write the OPF
template = jinja2.Template(OPF_TEMPLATE,
trim_blocks=True, lstrip_blocks=True)
with open(opf_filepath, 'wb') as opf_file:
opf = template.render(package=package, locations=locations)
if not isinstance(opf, bytes):
opf = opf.encode('utf-8')
opf_file.write(opf)
return opf_filepath | def to_file(package, directory) | Write the package to the given ``directory``.
Returns the OPF filename. | 2.399264 | 2.266692 | 1.058487 |
file_name = self._check_file_name(file_name)
with open(file_name, 'r') as infile:
top_level_dict = json.load(infile)
pages_dict = top_level_dict['info_df']
pages = pd.DataFrame(pages_dict)
self.pages = pages
self.file_name = file_name
self._prm_packer(top_level_dict['metadata'])
self.generate_folder_names()
self.paginate() | def from_file(self, file_name=None) | Loads a DataFrame with all the needed info about the experiment | 4.86632 | 4.690666 | 1.037447 |
file_name = self._check_file_name(file_name)
pages = self.pages
top_level_dict = {
'info_df': pages,
'metadata': self._prm_packer()
}
jason_string = json.dumps(
top_level_dict,
default=lambda info_df: json.loads(
info_df.to_json()
)
)
self.paginate()
with open(file_name, 'w') as outfile:
outfile.write(jason_string)
self.file_name = file_name
logging.info("Saved file to {}".format(file_name)) | def to_file(self, file_name=None) | Saves a DataFrame with all the needed info about the experiment | 4.867167 | 4.610046 | 1.055774 |
self.project_dir = os.path.join(prms.Paths.outdatadir, self.project)
self.batch_dir = os.path.join(self.project_dir, self.name)
self.raw_dir = os.path.join(self.batch_dir, "raw_data") | def generate_folder_names(self) | Set appropriate folder names. | 3.737854 | 3.363941 | 1.111153 |
project_dir = self.project_dir
raw_dir = self.raw_dir
batch_dir = self.batch_dir
if project_dir is None:
raise UnderDefined("no project directory defined")
if raw_dir is None:
raise UnderDefined("no raw directory defined")
if batch_dir is None:
raise UnderDefined("no batcb directory defined")
# create the folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
logging.info(f"created folder {project_dir}")
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
logging.info(f"created folder {batch_dir}")
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
logging.info(f"created folder {raw_dir}")
return project_dir, batch_dir, raw_dir | def paginate(self) | Make folders where we would like to put results etc. | 2.127976 | 1.975252 | 1.077319 |
if not self.project:
raise UnderDefined("project name not given")
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, self.project)
file_name = "cellpy_batch_%s.json" % self.name
self.file_name = os.path.join(project_dir, file_name) | def generate_file_name(self) | generate a suitable file name for the experiment | 5.634378 | 5.230555 | 1.077205 |
print("Sorry, but I don't have much to share.")
print("This is me:")
print(self)
print("And these are the experiments assigned to me:")
print(self.experiments) | def info(self) | Delivers some info to you about the class. | 13.80759 | 10.683798 | 1.292386 |
self.experiments.append(experiment)
self.farms.append(empty_farm) | def assign(self, experiment) | Assign an experiment. | 9.938104 | 9.20518 | 1.079621 |
id = model.ident_hash
if id is None and isinstance(model, TranslucentBinder):
id = lucent_id
md = model.metadata
shortid = md.get('shortId', md.get('cnx-archive-shortid'))
title = title is not None and title or md.get('title')
tree = {'id': id, 'title': title, 'shortId': shortid}
if hasattr(model, '__iter__'):
contents = tree['contents'] = []
for node in model:
item = model_to_tree(node, model.get_title_for_node(node),
lucent_id=lucent_id)
contents.append(item)
return tree | def model_to_tree(model, title=None, lucent_id=TRANSLUCENT_BINDER_ID) | Given an model, build the tree::
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]} | 3.747213 | 3.765065 | 0.995258 |
if 'contents' in item_or_tree:
tree = item_or_tree
if tree['id'] != lucent_id:
yield tree['id']
for i in tree['contents']:
# yield from flatten_tree_to_ident_hashs(i, lucent_id)
for x in flatten_tree_to_ident_hashes(i, lucent_id):
yield x
else:
item = item_or_tree
yield item['id'] | def flatten_tree_to_ident_hashes(item_or_tree,
lucent_id=TRANSLUCENT_BINDER_ID) | Flatten a tree to id and version values (ident_hash). | 2.400734 | 2.414311 | 0.994377 |
yield model
if isinstance(model, (TranslucentBinder, Binder,)):
for m in model:
# yield from flatten_model(m)
for x in flatten_model(m):
yield x | def flatten_model(model) | Flatten a model to a list of models.
This is used to flatten a ``Binder``'ish model down to a list
of contained models. | 7.049991 | 5.577969 | 1.263899 |
types = [Document]
if include_pointers:
types.append(DocumentPointer)
types = tuple(types)
def _filter(m):
return isinstance(m, types)
return flatten_to(model, _filter) | def flatten_to_documents(model, include_pointers=False) | Flatten the model to a list of documents (aka ``Document`` objects).
This is to flatten a ``Binder``'ish model down to a list of documents.
If ``include_pointers`` has been set to ``True``, ``DocumentPointers``
will also be included in the results. | 4.655803 | 4.721338 | 0.986119 |
parsed_uri = urlparse(uri)
if not parsed_uri.netloc:
if parsed_uri.scheme == 'data':
type_ = INLINE_REFERENCE_TYPE
else:
type_ = INTERNAL_REFERENCE_TYPE
else:
type_ = EXTERNAL_REFERENCE_TYPE
return type_ | def _discover_uri_type(uri) | Given a ``uri``, determine if it is internal or external. | 3.040192 | 2.742891 | 1.10839 |
references = []
ref_finder = HTMLReferenceFinder(xml)
for elm, uri_attr in ref_finder:
type_ = _discover_uri_type(elm.get(uri_attr))
references.append(Reference(elm, type_, uri_attr))
return references | def _parse_references(xml) | Parse the references to ``Reference`` instances. | 5.504357 | 5.288282 | 1.040859 |
value = self._uri_template.format(self._bound_model.id)
self.elm.set(self._uri_attr, value) | def _set_uri_from_bound_model(self) | Using the bound model, set the uri. | 6.4697 | 4.927146 | 1.313072 |
self._bound_model = model
self._uri_template = template
self._set_uri_from_bound_model() | def bind(self, model, template="{}") | Bind the ``model`` to the reference. This uses the model's
``id`` attribute and the given ``template`` to
dynamically produce a uri when accessed. | 6.558189 | 5.502678 | 1.191818 |
if isinstance(x, (pd.DataFrame, pd.Series)):
return x.iloc[0], x.iloc[-1]
else:
return x[0], x[-1] | def index_bounds(x) | returns tuple with first and last item | 2.404696 | 2.348835 | 1.023782 |
c_first = cycle.loc[cycle["direction"] == -1]
c_last = cycle.loc[cycle["direction"] == 1]
converter = Converter(**kwargs)
converter.set_data(c_first["capacity"], c_first["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_first = converter.voltage_processed
incremental_capacity_first = converter.incremental_capacity
if splitter:
voltage_first = np.append(voltage_first, np.NaN)
incremental_capacity_first = np.append(incremental_capacity_first,
np.NaN)
converter = Converter(**kwargs)
converter.set_data(c_last["capacity"], c_last["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_last = converter.voltage_processed[::-1]
incremental_capacity_last = converter.incremental_capacity[::-1]
voltage = np.concatenate((voltage_first,
voltage_last))
incremental_capacity = np.concatenate((incremental_capacity_first,
incremental_capacity_last))
return voltage, incremental_capacity | def dqdv_cycle(cycle, splitter=True, **kwargs) | Convenience functions for creating dq-dv data from given capacity and
voltage cycle.
Returns the a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',
'direction' (1 or -1)).
splitter (bool): insert a np.NaN row between charge and discharge.
Returns:
List of step numbers corresponding to the selected steptype.
Returns a pandas.DataFrame
instead of a list if pdtype is set to True.
Example:
>>> cycle_df = my_data.get_cap(
>>> ... 1,
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth"
>>> ... )
>>> voltage, incremental = ica.dqdv_cycle(cycle_df) | 2.351969 | 2.039438 | 1.153244 |
# TODO: should add option for normalising based on first cycle capacity
# this is e.g. done by first finding the first cycle capacity (nom_cap)
# (or use nominal capacity given as input) and then propagating this to
# Converter using the key-word arguments
# normalize=True, normalization_factor=1.0, normalization_roof=nom_cap
ica_dfs = list()
cycle_group = cycles.groupby("cycle")
for cycle_number, cycle in cycle_group:
v, dq = dqdv_cycle(cycle, splitter=True, **kwargs)
_ica_df = pd.DataFrame(
{
"voltage": v,
"dq": dq,
}
)
_ica_df["cycle"] = cycle_number
_ica_df = _ica_df[['cycle', 'voltage', 'dq']]
ica_dfs.append(_ica_df)
ica_df = pd.concat(ica_dfs)
return ica_df | def dqdv_cycles(cycles, **kwargs) | Convenience functions for creating dq-dv data from given capacity and
voltage cycles.
Returns a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycles (pandas.DataFrame): the cycle data ('cycle', 'voltage',
'capacity', 'direction' (1 or -1)).
Returns:
pandas.DataFrame with columns 'cycle', 'voltage', 'dq'.
Example:
>>> cycles_df = my_data.get_cap(
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth",
>>> ... label_cycle_number=True,
>>> ... )
>>> ica_df = ica.dqdv_cycles(cycles_df) | 6.223299 | 5.427543 | 1.146614 |
cycles = cell.get_cap(
method="forth-and-forth",
categorical_column=True,
label_cycle_number=True,
)
ica_df = dqdv_cycles(cycles, **kwargs)
assert isinstance(ica_df, pd.DataFrame)
return ica_df | def _dqdv_combinded_frame(cell, **kwargs) | Returns full cycle dqdv data for all cycles as one pd.DataFrame.
Args:
cell: CellpyData-object
Returns:
pandas.DataFrame with the following columns:
cycle: cycle number
voltage: voltage
dq: the incremental capacity | 9.467773 | 9.184346 | 1.03086 |
# TODO: should add option for normalising based on first cycle capacity
# this is e.g. done by first finding the first cycle capacity (nom_cap)
# (or use nominal capacity given as input) and then propagating this to
# Converter using the key-word arguments
# normalize=True, normalization_factor=1.0, normalization_roof=nom_cap
if split:
return _dqdv_split_frames(cell, tidy=True, **kwargs)
else:
return _dqdv_combinded_frame(cell, **kwargs) | def dqdv_frames(cell, split=False, **kwargs) | Returns dqdv data as pandas.DataFrame(s) for all cycles.
Args:
cell (CellpyData-object).
split (bool): return one frame for charge and one for
discharge if True (defaults to False).
Returns:
pandas.DataFrame(s) with the following columns:
cycle: cycle number (if split is set to True).
voltage: voltage
dq: the incremental capacity
Example:
>>> from cellpy.utils import ica
>>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True)
>>> charge_df.plot(x=("voltage", "v")) | 13.768374 | 15.814966 | 0.870591 |
charge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="charge"
)
# charge_df = pd.concat(
# charge_dfs, axis=1, keys=[k.name for k in charge_dfs])
ica_charge_dfs = _make_ica_charge_curves(
charge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_charge_df = pd.concat(
ica_charge_dfs,
axis=1,
keys=[k.name for k in ica_charge_dfs]
)
dcharge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="discharge"
)
ica_dcharge_dfs = _make_ica_charge_curves(
dcharge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_discharge_df = pd.concat(
ica_dcharge_dfs,
axis=1,
keys=[k.name for k in ica_dcharge_dfs]
)
ica_charge_df.columns.names = ["cycle", "value"]
ica_discharge_df.columns.names = ["cycle", "value"]
if tidy:
ica_charge_df = ica_charge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
ica_discharge_df = ica_discharge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
return ica_charge_df, ica_discharge_df | def _dqdv_split_frames(cell, tidy=False, **kwargs) | Returns dqdv data as pandas.DataFrames for all cycles.
Args:
cell (CellpyData-object).
tidy (bool): return in wide format if False (default),
long (tidy) format if True.
Returns:
(charge_ica_frame, discharge_ica_frame) where the frames are
pandas.DataFrames where the first column is voltage ('v') and
the following columns are the incremental capcaity for each
cycle (multi-indexed, where cycle number is on the top level).
Example:
>>> from cellpy.utils import ica
>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)
>>> charge_ica_df.plot(x=("voltage", "v")) | 2.131069 | 2.011936 | 1.059213 |
logging.debug("setting data (capacity and voltage)")
if isinstance(capacity, pd.DataFrame):
logging.debug("recieved a pandas.DataFrame")
self.capacity = capacity[capacity_label]
self.voltage = capacity[voltage_label]
else:
assert len(capacity) == len(voltage)
self.capacity = capacity
self.voltage = voltage | def set_data(self, capacity, voltage=None,
capacity_label="q", voltage_label="v"
) | Set the data | 2.8768 | 3.055854 | 0.941406 |
logging.debug("inspecting the data")
if capacity is None:
capacity = self.capacity
if voltage is None:
voltage = self.voltage
if capacity is None or voltage is None:
raise NullData
self.len_capacity = len(capacity)
self.len_voltage = len(voltage)
if self.len_capacity <= 1:
raise NullData
if self.len_voltage <= 1:
raise NullData
self.min_capacity, self.max_capacity = value_bounds(capacity)
self.start_capacity, self.end_capacity = index_bounds(capacity)
self.number_of_points = len(capacity)
if diff_est:
d_capacity = np.diff(capacity)
d_voltage = np.diff(voltage)
self.d_capacity_mean = np.mean(d_capacity)
self.d_voltage_mean = np.mean(d_voltage)
if err_est:
splits = int(self.number_of_points / self.points_pr_split)
rest = self.number_of_points % self.points_pr_split
if splits < self.minimum_splits:
txt = "no point in splitting, too little data"
logging.debug(txt)
self.errors.append("splitting: to few points")
else:
if rest > 0:
_cap = capacity[:-rest]
_vol = voltage[:-rest]
else:
_cap = capacity
_vol = voltage
c_pieces = np.split(_cap, splits)
v_pieces = np.split(_vol, splits)
# c_middle = int(np.amax(c_pieces) / 2)
std_err = []
c_pieces_avg = []
for c, v in zip(c_pieces, v_pieces):
_slope, _intercept, _r_value, _p_value, _std_err = stats.linregress(c, v)
std_err.append(_std_err)
c_pieces_avg.append(np.mean(c))
self.std_err_median = np.median(std_err)
self.std_err_mean = np.mean(std_err)
if not self.start_capacity == self.min_capacity:
self.errors.append("capacity: start<>min")
if not self.end_capacity == self.max_capacity:
self.errors.append("capacity: end<>max")
if self.normalizing_factor is None:
self.normalizing_factor = self.end_capacity
if self.normalizing_roof is not None:
self.normalizing_factor = self.normalizing_factor * \
self.end_capacity / self.normalizing_roof | def inspect_data(self, capacity=None, voltage=None,
err_est=False, diff_est=False) | check and inspect the data | 2.536237 | 2.535849 | 1.000153 |
logging.debug("pre-processing the data")
capacity = self.capacity
voltage = self.voltage
# performing an interpolation in v(q) space
logging.debug(" - interpolating voltage(capacity)")
c1, c2 = index_bounds(capacity)
if self.max_points is not None:
len_capacity = min(self.max_points, self.len_capacity)
elif self.capacity_resolution is not None:
len_capacity = round(abs(c2-c1) / self.capacity_resolution, 0)
else:
len_capacity = self.len_capacity
f = interp1d(capacity, voltage, kind=self.interpolation_method)
self.capacity_preprocessed = np.linspace(c1, c2, len_capacity)
self.voltage_preprocessed = f(self.capacity_preprocessed)
if self.pre_smoothing:
logging.debug(" - pre-smoothing (savgol filter window)")
savgol_filter_window_divisor = np.amin(
(self.savgol_filter_window_divisor_default, len_capacity / 5)
)
savgol_filter_window_length = int(
len_capacity / savgol_filter_window_divisor
)
if savgol_filter_window_length % 2 == 0:
savgol_filter_window_length -= 1
savgol_filter_window_length = np.amax(
[3, savgol_filter_window_length]
)
self.voltage_preprocessed = savgol_filter(
self.voltage_preprocessed,
savgol_filter_window_length,
self.savgol_filter_window_order
) | def pre_process_data(self) | perform some pre-processing of the data (i.e. interpolation) | 3.109693 | 3.02714 | 1.027271 |
# NOTE TO ASBJOERN: Probably insert method for "binning" instead of
# differentiating here
# (use self.increment_method as the variable for selecting method for)
logging.debug("incrementing data")
# ---- shifting to y-x ----------------------------------------
v1, v2 = value_bounds(self.voltage_preprocessed)
if self.voltage_resolution is not None:
len_voltage = round(abs(v2 - v1) / self.voltage_resolution, 0)
else:
len_voltage = len(self.voltage_preprocessed)
# ---- interpolating ------------------------------------------
logging.debug(" - interpolating capacity(voltage)")
f = interp1d(
self.voltage_preprocessed,
self.capacity_preprocessed,
kind=self.interpolation_method
)
self.voltage_inverted = np.linspace(v1, v2, len_voltage)
self.voltage_inverted_step = (v2 - v1) / (len_voltage - 1)
self.capacity_inverted = f(self.voltage_inverted)
if self.smoothing:
logging.debug(" - smoothing (savgol filter window)")
savgol_filter_window_divisor = np.amin(
(self.savgol_filter_window_divisor_default, len_voltage / 5)
)
savgol_filter_window_length = int(
len(self.voltage_inverted) / savgol_filter_window_divisor
)
if savgol_filter_window_length % 2 == 0:
savgol_filter_window_length -= 1
self.capacity_inverted = savgol_filter(
self.capacity_inverted,
np.amax([3, savgol_filter_window_length]),
self.savgol_filter_window_order
)
# --- diff --------------------
if self.increment_method == "diff":
logging.debug(" - diff using DIFF")
self.incremental_capacity = np.ediff1d(self.capacity_inverted) / self.voltage_inverted_step
self._incremental_capacity = self.incremental_capacity
# --- need to adjust voltage ---
self._voltage_processed = self.voltage_inverted[1:]
self.voltage_processed = self.voltage_inverted[1:] - 0.5 * self.voltage_inverted_step # centering
elif self.increment_method == "hist":
logging.debug(" - diff using HIST")
# TODO: Asbjoern, maybe you can put your method here?
raise NotImplementedError | def increment_data(self) | perform the dq-dv transform | 4.376357 | 4.293853 | 1.019215 |
logging.debug("post-processing data")
if voltage is None:
voltage = self.voltage_processed
incremental_capacity = self.incremental_capacity
voltage_step = self.voltage_inverted_step
if self.post_smoothing:
logging.debug(" - post smoothing (gaussian)")
logging.debug(f" * using voltage fwhm: {self.voltage_fwhm}")
points_fwhm = int(self.voltage_fwhm / voltage_step)
sigma = np.amax([1, points_fwhm / 2])
self.incremental_capacity = gaussian_filter1d(
incremental_capacity, sigma=sigma, order=self.gaussian_order,
mode=self.gaussian_mode,
cval=self.gaussian_cval,
truncate=self.gaussian_truncate
)
if self.normalize:
logging.debug(" - normalizing")
area = simps(incremental_capacity, voltage)
self.incremental_capacity = incremental_capacity * self.normalizing_factor / abs(area)
fixed_range = False
if isinstance(self.fixed_voltage_range, np.ndarray):
fixed_range = True
else:
if self.fixed_voltage_range:
fixed_range = True
if fixed_range:
logging.debug(" - using fixed voltage range (interpolating)")
v1, v2, number_of_points = self.fixed_voltage_range
v = np.linspace(v1, v2, number_of_points)
f = interp1d(x=self.voltage_processed, y=self.incremental_capacity,
kind=self.interpolation_method, bounds_error=False,
fill_value=np.NaN)
self.incremental_capacity = f(v)
self.voltage_processed = v | def post_process_data(self, voltage=None, incremental_capacity=None,
voltage_step=None) | perform post-processing (smoothing, normalisation, interpolation) of
the data | 2.893963 | 2.925988 | 0.989055 |
html = etree.parse(in_html)
oven = Oven(ruleset)
oven.bake(html)
out_html.write(etree.tostring(html)) | def easybake(ruleset, in_html, out_html) | This adheres to the same interface as
``cnxeasybake.scripts.main.easyback``.
``ruleset`` is a string containing the ruleset CSS
while ``in_html`` and ``out_html`` are file-like objects,
with respective read and write ability. | 3.489961 | 3.947585 | 0.884075 |
try:
htree = etree.parse(html)
except etree.XMLSyntaxError:
html.seek(0)
htree = etree.HTML(html.read())
xhtml = etree.tostring(htree, encoding='utf-8')
return adapt_single_html(xhtml) | def reconstitute(html) | Given a file-like object as ``html``, reconstruct it into models. | 4.148549 | 3.839443 | 1.080508 |
html_formatter = SingleHTMLFormatter(binder, includes)
raw_html = io.BytesIO(bytes(html_formatter))
collated_html = io.BytesIO()
if ruleset is None:
# No ruleset found, so no cooking necessary.
return binder
easybake(ruleset, raw_html, collated_html)
collated_html.seek(0)
collated_binder = reconstitute(collated_html)
return collated_binder | def collate(binder, ruleset=None, includes=None) | Given a ``Binder`` as ``binder``, collate the content into a new set
of models.
Returns the collated binder. | 5.903528 | 5.590542 | 1.055985 |
navigation_item = package.navigation
html = etree.parse(navigation_item.data)
tree = parse_navigation_html_to_tree(html, navigation_item.name)
return _node_to_model(tree, package) | def adapt_package(package) | Adapts ``.epub.Package`` to a ``BinderItem`` and cascades
the adaptation downward to ``DocumentItem``
and ``ResourceItem``.
The results of this process provide the same interface as
``.models.Binder``, ``.models.Document`` and ``.models.Resource``. | 8.160524 | 8.829926 | 0.924189 |
if item.media_type == 'application/xhtml+xml':
try:
html = etree.parse(item.data)
except Exception as exc:
logger.error("failed parsing {}".format(item.name))
raise
metadata = DocumentPointerMetadataParser(
html, raise_value_error=False)()
item.data.seek(0)
if metadata.get('is_document_pointer'):
model = DocumentPointerItem(item, package)
else:
model = DocumentItem(item, package)
else:
model = Resource(item.name, item.data, item.media_type,
filename or item.name)
return model | def adapt_item(item, package, filename=None) | Adapts ``.epub.Item`` to a ``DocumentItem``. | 4.781865 | 4.544037 | 1.052338 |
if not isinstance(binders, (list, set, tuple,)):
binders = [binders]
epub = EPUB([_make_package(binder) for binder in binders])
epub.to_file(epub, file) | def make_epub(binders, file) | Creates an EPUB file from a binder(s). | 3.97163 | 3.765705 | 1.054684 |
if not isinstance(binders, (list, set, tuple,)):
binders = [binders]
packages = []
for binder in binders:
metadata = binder.metadata
binder.metadata = deepcopy(metadata)
binder.metadata.update({'publisher': publisher,
'publication_message': publication_message})
packages.append(_make_package(binder))
binder.metadata = metadata
epub = EPUB(packages)
epub.to_file(epub, file) | def make_publication_epub(binders, publisher, publication_message, file) | Creates an epub file from a binder(s). Also requires
publication information, meant to be used in a EPUB publication
request. | 3.123334 | 3.182936 | 0.981275 |
package_id = binder.id
if package_id is None:
package_id = hash(binder)
package_name = "{}.opf".format(package_id)
extensions = get_model_extensions(binder)
template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
# Build the package item list.
items = []
# Build the binder as an item, specifically a navigation item.
navigation_document = bytes(HTMLFormatter(binder, extensions))
navigation_document_name = "{}{}".format(
package_id,
mimetypes.guess_extension('application/xhtml+xml', strict=False))
item = Item(str(navigation_document_name),
io.BytesIO(navigation_document),
'application/xhtml+xml',
is_navigation=True, properties=['nav'])
items.append(item)
resources = {}
# Roll through the model list again, making each one an item.
for model in flatten_model(binder):
for resource in getattr(model, 'resources', []):
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
if isinstance(model, (Binder, TranslucentBinder,)):
continue
if isinstance(model, DocumentPointer):
content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(content),
model.media_type)
items.append(item)
continue
for reference in model.references:
if reference.remote_type == INLINE_REFERENCE_TYPE:
# has side effects - converts ref type to INTERNAL w/
# appropriate uri, so need to replicate resource treatment from
# above
resource = _make_resource_from_inline(reference)
model.resources.append(resource)
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
reference.bind(resource, '../resources/{}')
elif reference.remote_type == INTERNAL_REFERENCE_TYPE:
filename = os.path.basename(reference.uri)
resource = resources.get(filename)
if resource:
reference.bind(resource, '../resources/{}')
complete_content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(complete_content),
model.media_type)
items.append(item)
# Build the package.
package = Package(package_name, items, binder.metadata)
return package | def _make_package(binder) | Makes an ``.epub.Package`` from a Binder'ish instance. | 4.319249 | 4.249531 | 1.016406 |
uri = DataURI(reference.uri)
data = io.BytesIO(uri.data)
mimetype = uri.mimetype
res = Resource('dummy', data, mimetype)
res.id = res.filename
return res | def _make_resource_from_inline(reference) | Makes an ``models.Resource`` from a ``models.Reference``
of type INLINE. That is, a data: uri | 5.195529 | 5.823752 | 0.892127 |
item = Item(model.id, model.content, model.media_type)
return item | def _make_item(model) | Makes an ``.epub.Item`` from
a ``.models.Document`` or ``.models.Resource`` | 6.79926 | 6.677016 | 1.018308 |
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
# Grab the package metadata, so we have required license info
metadata = package.metadata.copy()
if tree['id'] == lucent_id:
metadata['title'] = tree['title']
binder = TranslucentBinder(metadata=metadata)
else:
try:
package_item = package.grab_by_name(tree['id'])
binder = BinderItem(package_item, package)
except KeyError: # Translucent w/ id
metadata.update({
'title': tree['title'],
'cnx-archive-uri': tree['id'],
'cnx-archive-shortid': tree['shortId']})
binder = Binder(tree['id'], metadata=metadata)
for item in tree['contents']:
node = _node_to_model(item, package, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is a document.
item = tree_or_item
package_item = package.grab_by_name(item['id'])
result = adapt_item(package_item, package)
if parent is not None:
parent.append(result)
return result | def _node_to_model(tree_or_item, package, parent=None,
lucent_id=TRANSLUCENT_BINDER_ID) | Given a tree, parse to a set of models | 3.772787 | 3.791779 | 0.994991 |
html_root = etree.fromstring(html)
metadata = parse_metadata(html_root.xpath('//*[@data-type="metadata"]')[0])
id_ = metadata['cnx-archive-uri'] or 'book'
binder = Binder(id_, metadata=metadata)
nav_tree = parse_navigation_html_to_tree(html_root, id_)
body = html_root.xpath('//xhtml:body', namespaces=HTML_DOCUMENT_NAMESPACES)
_adapt_single_html_tree(binder, body[0], nav_tree, top_metadata=metadata)
return binder | def adapt_single_html(html) | Adapts a single html document generated by
``.formatters.SingleHTMLFormatter`` to a ``models.Binder`` | 6.147618 | 6.01261 | 1.022454 |
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters]
for i in range(self.circuits):
result_dict['t' + str(i)] = [parameters['t' + str(i)] for parameters
in self.best_fit_parameters]
result_dict['w' + str(i)] = [parameters['w' + str(i)] for parameters
in self.best_fit_parameters]
return result_dict | def get_best_fit_parameters_grouped(self) | Returns a dictionary of the best fit. | 2.640758 | 2.544394 | 1.037873 |
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters_translated]
result_dict['ir'] = [parameters['ir'] for parameters in
self.best_fit_parameters_translated]
for i in range(self.circuits):
result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters
in self.best_fit_parameters_translated]
result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters
in self.best_fit_parameters_translated]
return result_dict | def get_best_fit_parameters_translated_grouped(self) | Returns the parameters as a dictionary of the 'real units' for the best fit. | 2.271131 | 2.24774 | 1.010407 |
if cycles is None:
cycles = [0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(221)
ax1.set_title('Fit')
ax2 = fig1.add_subplot(222)
ax2.set_title('OCV')
ax3 = fig1.add_subplot(223)
ax3.set_title('Tau')
ax3.set_yscale("log")
ax4 = fig1.add_subplot(224)
ax4.set_title('Voltage Impact')
plot_data = self.get_best_fit_data()
for cycle in cycles:
ax1.plot(plot_data[cycle][0], plot_data[cycle][1])
ax1.plot(plot_data[cycle][0], plot_data[cycle][2])
plot_data = self.get_best_fit_parameters_grouped()
for i in range(self.circuits):
ax3.plot(self.get_fit_cycles(), plot_data['t' + str(i)])
ax4.plot(self.get_fit_cycles(), plot_data['w' + str(i)])
ax2.plot(self.get_fit_cycles(), plot_data['ocv']) | def plot_summary(self, cycles=None) | Convenience function for plotting the summary of the fit | 2.297072 | 2.25371 | 1.01924 |
fig2 = plt.figure()
ax1 = fig2.add_subplot(221)
ax1.set_title('OCV (V)')
ax2 = fig2.add_subplot(222)
ax2.set_title('IR (Ohm)')
ax3 = fig2.add_subplot(223)
ax3.set_title('Resistances (Ohm)')
ax4 = fig2.add_subplot(224)
ax4.set_title('Capacitances (F)')
ax4.set_yscale("log")
plot_data = self.get_best_fit_parameters_translated_grouped()
print(plot_data['ocv'])
print(plot_data['ir'])
print(plot_data['r0'])
ax1.plot(self.get_fit_cycles(), plot_data['ocv'])
ax2.plot(self.get_fit_cycles(), plot_data['ir'])
for i in range(self.circuits):
ax3.plot(self.get_fit_cycles(), plot_data['r' + str(i)])
ax4.plot(self.get_fit_cycles(), plot_data['c' + str(i)])
plt.show() | def plot_summary_translated(self) | Convenience function for plotting the summary of the
fit (translated) | 2.313043 | 2.240274 | 1.032482 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.