sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def create_info_df(self): """Creates a DataFrame with info about the runs (loaded from the DB)""" logger.debug("running create_info_df") # initializing the reader reader = self.reader() self.info_df = make_df_from_batch(self.name, batch_col=self.batch_col, reader=reader) logger.debug(str(self.info_df.head(5)))
Creates a DataFrame with info about the runs (loaded from the DB)
entailment
def save_info_df(self): """Saves the DataFrame with info about the runs to a JSON file""" logger.debug("running save_info_df") info_df = self.info_df top_level_dict = {'info_df': info_df, 'metadata': self._prm_packer()} # packing prms jason_string = json.dumps(top_level_dict, default=lambda info_df: json.loads( info_df.to_json())) with open(self.info_file, 'w') as outfile: outfile.write(jason_string) logger.info("Saved file to {}".format(self.info_file))
Saves the DataFrame with info about the runs to a JSON file
entailment
def load_info_df(self, file_name=None): """Loads a DataFrame with all the needed info about the run (JSON file)""" if file_name is None: file_name = self.info_file with open(file_name, 'r') as infile: top_level_dict = json.load(infile) new_info_df_dict = top_level_dict['info_df'] new_info_df = pd.DataFrame(new_info_df_dict) self.info_df = new_info_df self._prm_packer(top_level_dict['metadata']) self.info_file = file_name logger.debug("loaded info_df") logger.debug(" info_file: %s" % self.info_file)
Loads a DataFrame with all the needed info about the run (JSON file)
entailment
def create_folder_structure(self): """Creates a folder structure based on the project and batch name. Project - Batch-name - Raw-data-dir The info_df JSON-file will be stored in the Project folder. The summary-files will be saved in the Batch-name folder. The raw data (including exported cycles and ica-data) will be saved to the Raw-data-dir. """ self.info_file, directories = create_folder_structure(self.project, self.name) self.project_dir, self.batch_dir, self.raw_dir = directories logger.debug("create folders:" + str(directories))
Creates a folder structure based on the project and batch name. Project - Batch-name - Raw-data-dir The info_df JSON-file will be stored in the Project folder. The summary-files will be saved in the Batch-name folder. The raw data (including exported cycles and ica-data) will be saved to the Raw-data-dir.
entailment
def load_and_save_raw(self, parent_level="CellpyData"): """Loads the cellpy or raw-data file(s) and saves to csv""" sep = prms.Reader["sep"] if self.use_cellpy_stat_file is None: use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file else: use_cellpy_stat_file = self.use_cellpy_stat_file logger.debug(f"b.load_and_save_raw: " f"use_cellpy_stat_file = {use_cellpy_stat_file}") self.frames, self.keys, errors = read_and_save_data( self.info_df, self.raw_dir, sep=sep, force_raw=self.force_raw_file, force_cellpy=self.force_cellpy_file, export_cycles=self.export_cycles, shifted_cycles=self.shifted_cycles, export_raw=self.export_raw, export_ica=self.export_ica, save=self.save_cellpy_file, use_cellpy_stat_file=use_cellpy_stat_file, parent_level=parent_level, last_cycle=self.last_cycle ) logger.debug("loaded and saved data. errors:" + str(errors))
Loads the cellpy or raw-data file(s) and saves to csv
entailment
def make_summaries(self): """Make and save summary csv files, each containing values from all cells""" self.summary_df = save_summaries(self.frames, self.keys, self.selected_summaries, self.batch_dir, self.name) logger.debug("made and saved summaries")
Make and save summary csv files, each containing values from all cells
entailment
def plot_summaries(self, show=False, save=True, figure_type=None): """Plot summary graphs. Args: show: shows the figure if True. save: saves the figure if True. figure_type: optional, figure type to create. """ if not figure_type: figure_type = self.default_figure_type if not figure_type in self.default_figure_types: logger.debug("unknown figure type selected") figure_type = self.default_figure_type color_list, symbol_list = self._create_colors_markers_list() summary_df = self.summary_df selected_summaries = self.selected_summaries batch_dir = self.batch_dir batch_name = self.name fig, ax = plot_summary_figure(self.info_df, summary_df, color_list, symbol_list, selected_summaries, batch_dir, batch_name, show=show, save=save, figure_type=figure_type) self.figure[figure_type] = fig self.axes[figure_type] = ax
Plot summary graphs. Args: show: shows the figure if True. save: saves the figure if True. figure_type: optional, figure type to create.
entailment
def update(self, all_in_memory=None): """Updates the selected datasets. Args: all_in_memory (bool): store the cellpydata in memory (default False) """ logging.info("[update experiment]") if all_in_memory is not None: self.all_in_memory = all_in_memory pages = self.journal.pages summary_frames = dict() cell_data_frames = dict() number_of_runs = len(pages) counter = 0 errors = [] for indx, row in pages.iterrows(): counter += 1 h_txt = "[" + counter * "|" + ( number_of_runs - counter) * "." + "]" l_txt = "starting to process file # %i (index=%s)" % (counter, indx) logging.debug(l_txt) print(h_txt) if not row.raw_file_names and not self.force_cellpy: logging.info("File(s) not found!") logging.info(indx) logging.debug("File(s) not found for index=%s" % indx) errors.append(indx) continue else: logging.info(f"Processing {indx}") cell_data = cellreader.CellpyData() if not self.force_cellpy or self.force_recalc: logging.info( "setting cycle mode (%s)..." % row.cell_type) cell_data.cycle_mode = row.cell_type logging.info("loading cell") if not self.force_cellpy: logging.debug("not forcing to load cellpy-file instead of raw file.") try: cell_data.loadcell( raw_files=row.raw_file_names, cellpy_file=row.cellpy_file_names, mass=row.masses, summary_on_raw=True, force_raw=self.force_raw_file, use_cellpy_stat_file=prms.Reader.use_cellpy_stat_file ) except Exception as e: logging.info('Failed to load: ' + str(e)) errors.append("loadcell:" + str(indx)) if not self.accept_errors: raise e continue else: logging.info("forcing") try: cell_data.load(row.cellpy_file_names, parent_level=self.parent_level) except Exception as e: logging.info( f"Critical exception encountered {type(e)} " "- skipping this file") logging.debug( 'Failed to load. Error-message: ' + str(e)) errors.append("load:" + str(indx)) if not self.accept_errors: raise e continue if not cell_data.check(): logging.info("...not loaded...") logging.debug( "Did not pass check(). Could not load cell!") errors.append("check:" + str(indx)) continue logging.info("...loaded successfully...") summary_tmp = cell_data.dataset.dfsummary logging.info("Trying to get summary_data") if cell_data.dataset.step_table is None or self.force_recalc: logging.info( "Running make_step_table" ) cell_data.make_step_table() if summary_tmp is None or self.force_recalc: logging.info( "Running make_summary" ) cell_data.make_summary(find_end_voltage=True, find_ir=True) if summary_tmp.index.name == b"Cycle_Index": logging.debug("Strange: 'Cycle_Index' is a byte-string") summary_tmp.index.name = 'Cycle_Index' if not summary_tmp.index.name == "Cycle_Index": logging.debug("Setting index to Cycle_Index") # check if it is a byte-string if b"Cycle_Index" in summary_tmp.columns: logging.debug( "Seems to be a byte-string in the column-headers") summary_tmp.rename( columns={b"Cycle_Index": 'Cycle_Index'}, inplace=True) summary_tmp.set_index("Cycle_Index", inplace=True) summary_frames[indx] = summary_tmp if self.all_in_memory: cell_data_frames[indx] = cell_data else: cell_data_frames[indx] = cellreader.CellpyData(initialize=True) cell_data_frames[indx].dataset.step_table = \ cell_data.dataset.step_table # cell_data_frames[indx].dataset.step_table_made = True if self.save_cellpy: logging.info("saving to cellpy-format") if not row.fixed: logging.info("saving cell to %s" % row.cellpy_file_names) cell_data.ensure_step_table = True cell_data.save(row.cellpy_file_names) else: logging.debug( "saving cell skipped (set to 'fixed' in info_df)") if self.export_raw or self.export_cycles: export_text = "exporting" if self.export_raw: export_text += " [raw]" if self.export_cycles: export_text += " [cycles]" logging.info(export_text) cell_data.to_csv( self.journal.raw_dir, sep=prms.Reader.sep, cycles=self.export_cycles, shifted=self.shifted_cycles, raw=self.export_raw, last_cycle=self.last_cycle ) if self.export_ica: logging.info("exporting [ica]") try: helper.export_dqdv( cell_data, savedir=self.journal.raw_dir, sep=prms.Reader.sep, last_cycle=self.last_cycle ) except Exception as e: logging.error( "Could not make/export dq/dv data" ) logging.debug( "Failed to make/export " "dq/dv data (%s): %s" % (indx, str(e)) ) errors.append("ica:" + str(indx)) self.errors["update"] = errors self.summary_frames = summary_frames self.cell_data_frames = cell_data_frames
Updates the selected datasets. Args: all_in_memory (bool): store the cellpydata in memory (default False)
entailment
def link(self): """Ensure that an appropriate link to the cellpy-files exists for each cell. The experiment will then contain a CellpyData object for each cell (in the cell_data_frames attribute) with only the step-table stored. Remark that running update persists the summary frames instead (or everything in case you specify all_in_memory=True). This might be considered "a strange and unexpected behaviour". Sorry for that (but the authors of this package is also a bit strange...). """ logging.info("[estblishing links]") logging.debug("checking and establishing link to data") cell_data_frames = dict() counter = 0 errors = [] try: for indx, row in self.journal.pages.iterrows(): counter += 1 l_txt = "starting to process file # %i (index=%s)" % (counter, indx) logging.debug(l_txt) logging.info(f"linking cellpy-file: {row.cellpy_file_names}") if not os.path.isfile(row.cellpy_file_names): logging.error("File does not exist") raise IOError cell_data_frames[indx] = cellreader.CellpyData(initialize=True) step_table = helper.look_up_and_get( row.cellpy_file_names, "step_table" ) cell_data_frames[indx].dataset.step_table = step_table self.cell_data_frames = cell_data_frames except IOError as e: logging.warning(e) e_txt = "links not established - try update" logging.warning(e_txt) errors.append(e_txt) self.errors["link"] = errors
Ensure that an appropriate link to the cellpy-files exists for each cell. The experiment will then contain a CellpyData object for each cell (in the cell_data_frames attribute) with only the step-table stored. Remark that running update persists the summary frames instead (or everything in case you specify all_in_memory=True). This might be considered "a strange and unexpected behaviour". Sorry for that (but the authors of this package is also a bit strange...).
entailment
def get_default_config_file_path(init_filename=None): """gets the path to the default config-file""" prm_dir = get_package_prm_dir() if not init_filename: init_filename = DEFAULT_FILENAME src = os.path.join(prm_dir, init_filename) return src
gets the path to the default config-file
entailment
def get_user_dir_and_dst(init_filename): """gets the name of the user directory and full prm filepath""" user_dir = get_user_dir() dst_file = os.path.join(user_dir, init_filename) return user_dir, dst_file
gets the name of the user directory and full prm filepath
entailment
def setup(interactive, not_relative, dry_run, reset, root_dir, testuser): """This will help you to setup cellpy.""" click.echo("[cellpy] (setup)") # generate variables init_filename = create_custom_init_filename() userdir, dst_file = get_user_dir_and_dst(init_filename) if testuser: if not root_dir: root_dir = os.getcwd() click.echo(f"[cellpy] (setup) DEV-MODE testuser: {testuser}") init_filename = create_custom_init_filename(testuser) userdir = root_dir dst_file = get_dst_file(userdir, init_filename) click.echo(f"[cellpy] (setup) DEV-MODE userdir: {userdir}") click.echo(f"[cellpy] (setup) DEV-MODE dst_file: {dst_file}") if not pathlib.Path(dst_file).is_file(): reset = True if interactive: click.echo(" interactive mode ".center(80, "-")) _update_paths(root_dir, not not_relative, dry_run=dry_run, reset=reset) _write_config_file( userdir, dst_file, init_filename, dry_run, ) _check() else: _write_config_file(userdir, dst_file, init_filename, dry_run) _check()
This will help you to setup cellpy.
entailment
def _parse_g_dir(repo, gdirpath): """parses a repo directory two-levels deep""" for f in repo.get_contents(gdirpath): if f.type == "dir": for sf in repo.get_contents(f.path): yield sf else: yield f
parses a repo directory two-levels deep
entailment
def look_up_and_get(cellpy_file_name, table_name): """Extracts table from cellpy hdf5-file.""" # infoname = '/CellpyData/info' # dataname = '/CellpyData/dfdata' # summaryname = '/CellpyData/dfsummary' # fidname = '/CellpyData/fidtable' # stepname = '/CellpyData/step_table' root = '/CellpyData' table_path = '/'.join([root, table_name]) logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}") store = pd.HDFStore(cellpy_file_name) table = store.select(table_path) store.close() return table
Extracts table from cellpy hdf5-file.
entailment
def fix_groups(groups): """Takes care of strange group numbers.""" _groups = [] for g in groups: try: if not float(g) > 0: _groups.append(1000) else: _groups.append(int(g)) except TypeError as e: logging.info("Error in reading group number (check your db)") logging.debug(g) logging.debug(e) _groups.append(1000) return _groups
Takes care of strange group numbers.
entailment
def create_selected_summaries_dict(summaries_list): """Creates a dictionary with summary column headers. Examples: >>> summaries_to_output = ["discharge_capacity", "charge_capacity"] >>> summaries_to_output_dict = create_selected_summaries_dict( >>> summaries_to_output >>> ) >>> print(summaries_to_output_dict) {'discharge_capacity': "Discharge_Capacity(mAh/g)", 'charge_capacity': "Charge_Capacity(mAh/g)} Args: summaries_list: list containing cellpy summary column id names Returns: dictionary of the form {cellpy id name: cellpy summary header name,} """ headers_summary = cellpy.parameters.internal_settings.get_headers_summary() selected_summaries = dict() for h in summaries_list: selected_summaries[h] = headers_summary[h] return selected_summaries
Creates a dictionary with summary column headers. Examples: >>> summaries_to_output = ["discharge_capacity", "charge_capacity"] >>> summaries_to_output_dict = create_selected_summaries_dict( >>> summaries_to_output >>> ) >>> print(summaries_to_output_dict) {'discharge_capacity': "Discharge_Capacity(mAh/g)", 'charge_capacity': "Charge_Capacity(mAh/g)} Args: summaries_list: list containing cellpy summary column id names Returns: dictionary of the form {cellpy id name: cellpy summary header name,}
entailment
def join_summaries(summary_frames, selected_summaries, keep_old_header=False): """parse the summaries and combine based on column (selected_summaries)""" selected_summaries_dict = create_selected_summaries_dict(selected_summaries) frames = [] keys = [] for key in summary_frames: keys.append(key) if summary_frames[key].empty: logging.debug("Empty summary_frame encountered") frames.append(summary_frames[key]) out = [] summary_df = pd.concat(frames, keys=keys, axis=1) for key, value in selected_summaries_dict.items(): _summary_df = summary_df.iloc[ :, summary_df.columns.get_level_values(1) == value ] _summary_df.name = key if not keep_old_header: try: _summary_df.columns = _summary_df.columns.droplevel(-1) except AttributeError as e: logging.debug("could not drop level from frame") logging.debug(e) out.append(_summary_df) logger.debug("finished joining summaries") return out
parse the summaries and combine based on column (selected_summaries)
entailment
def generate_folder_names(name, project): """Creates sensible folder names.""" out_data_dir = prms.Paths.outdatadir project_dir = os.path.join(out_data_dir, project) batch_dir = os.path.join(project_dir, name) raw_dir = os.path.join(batch_dir, "raw_data") return out_data_dir, project_dir, batch_dir, raw_dir
Creates sensible folder names.
entailment
def group_by_interpolate(df, x=None, y=None, group_by=None, number_of_points=100, tidy=False, individual_x_cols=False, header_name="Unit", dx=10.0, generate_new_x=True): """Use this for generating wide format from long (tidy) data""" time_00 = time.time() if x is None: x = HEADERS_NORMAL.step_time_txt if y is None: y = HEADERS_NORMAL.voltage_txt if group_by is None: group_by = [HEADERS_NORMAL.cycle_index_txt] if not isinstance(group_by, (list, tuple)): group_by = [group_by] if not generate_new_x: # check if it makes sence if (not tidy) and (not individual_x_cols): logging.warning("Unlogical condition") generate_new_x = True new_x = None if generate_new_x: x_max = df[x].max() x_min = df[x].min() if number_of_points: new_x = np.linspace(x_max, x_min, number_of_points) else: new_x = np.arange(x_max, x_min, dx) new_dfs = [] keys = [] for name, group in df.groupby(group_by): keys.append(name) if not isinstance(name, (list, tuple)): name = [name] new_group = _interpolate_df_col( group, x=x, y=y, new_x=new_x, number_of_points=number_of_points, dx=dx, ) if tidy or (not tidy and not individual_x_cols): for i, j in zip(group_by, name): new_group[i] = j new_dfs.append(new_group) if tidy: new_df = pd.concat(new_dfs) else: if individual_x_cols: new_df = pd.concat(new_dfs, axis=1, keys=keys) group_by.append(header_name) new_df.columns.names = group_by else: new_df = pd.concat(new_dfs) new_df = new_df.pivot(index=x, columns=group_by[0], values=y, ) self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)") return new_df
Use this for generating wide format from long (tidy) data
entailment
def _interpolate_df_col(df, x=None, y=None, new_x=None, dx=10.0, number_of_points=None, direction=1, **kwargs): """Interpolate a column based on another column. Args: df: DataFrame with the (cycle) data. x: Column name for the x-value (defaults to the step-time column). y: Column name for the y-value (defaults to the voltage column). new_x (numpy array or None): Interpolate using these new x-values instead of generating x-values based on dx or number_of_points. dx: step-value (defaults to 10.0) number_of_points: number of points for interpolated values (use instead of dx and overrides dx if given). direction (-1,1): if direction is negetive, then invert the x-values before interpolating. **kwargs: arguments passed to scipy.interpolate.interp1d Returns: DataFrame with interpolated y-values based on given or generated x-values. """ if x is None: x = df.columns[0] if y is None: y = df.columns[1] xs = df[x].values ys = df[y].values if direction > 0: x_min = xs.min() x_max = xs.max() else: x_max = xs.min() x_min = xs.max() dx = -dx bounds_error = kwargs.pop("bounds_error", False) f = interpolate.interp1d(xs, ys, bounds_error=bounds_error, **kwargs) if new_x is None: if number_of_points: new_x = np.linspace(x_min, x_max, number_of_points) else: new_x = np.arange(x_min, x_max, dx) new_y = f(new_x) new_df = pd.DataFrame( {x: new_x, y: new_y} ) return new_df
Interpolate a column based on another column. Args: df: DataFrame with the (cycle) data. x: Column name for the x-value (defaults to the step-time column). y: Column name for the y-value (defaults to the voltage column). new_x (numpy array or None): Interpolate using these new x-values instead of generating x-values based on dx or number_of_points. dx: step-value (defaults to 10.0) number_of_points: number of points for interpolated values (use instead of dx and overrides dx if given). direction (-1,1): if direction is negetive, then invert the x-values before interpolating. **kwargs: arguments passed to scipy.interpolate.interp1d Returns: DataFrame with interpolated y-values based on given or generated x-values.
entailment
def _collect_capacity_curves(data, direction="charge"): """Create a list of pandas.DataFrames, one for each charge step. The DataFrames are named by its cycle number. Input: CellpyData Returns: list of pandas.DataFrames minimum voltage value, maximum voltage value""" minimum_v_value = np.Inf maximum_v_value = -np.Inf charge_list = [] cycles = data.get_cycle_numbers() for cycle in cycles: try: if direction == "charge": q, v = data.get_ccap(cycle) else: q, v = data.get_dcap(cycle) except NullData as e: logging.warning(e) break else: d = pd.DataFrame({"q": q, "v": v}) # d.name = f"{cycle}" d.name = cycle charge_list.append(d) v_min = v.min() v_max = v.max() if v_min < minimum_v_value: minimum_v_value = v_min if v_max > maximum_v_value: maximum_v_value = v_max return charge_list, cycles, minimum_v_value, maximum_v_value
Create a list of pandas.DataFrames, one for each charge step. The DataFrames are named by its cycle number. Input: CellpyData Returns: list of pandas.DataFrames minimum voltage value, maximum voltage value
entailment
def cell(filename=None, mass=None, instrument=None, logging_mode="INFO", cycle_mode=None, auto_summary=True): """Create a CellpyData object""" from cellpy import log log.setup_logging(default_level=logging_mode) cellpy_instance = setup_cellpy_instance() if instrument is not None: cellpy_instance.set_instrument(instrument=instrument) if cycle_mode is not None: cellpy_instance.cycle_mode = cycle_mode if filename is not None: filename = Path(filename) if filename.suffix in [".h5", ".hdf5", ".cellpy", ".cpy"]: logging.info(f"Loading cellpy-file: {filename}") cellpy_instance.load(filename) else: logging.info(f"Loading raw-file: {filename}") cellpy_instance.from_raw(filename) if mass is not None: logging.info("Setting mass") cellpy_instance.set_mass(mass) if auto_summary: logging.info("Creating step table") cellpy_instance.make_step_table() logging.info("Creating summary data") cellpy_instance.make_summary() logging.info("Created CellpyData object") return cellpy_instance
Create a CellpyData object
entailment
def just_load_srno(srno, prm_filename=None): """Simply load an dataset based on serial number (srno). This convenience function reads a dataset based on a serial number. This serial number (srno) must then be defined in your database. It is mainly used to check that things are set up correctly. Args: prm_filename: name of parameter file (optional). srno (int): serial number Example: >>> srno = 918 >>> just_load_srno(srno) srno: 918 read prms .... """ from cellpy import dbreader, filefinder print("just_load_srno: srno: %i" % srno) # ------------reading parameters-------------------------------------------- # print "just_load_srno: read prms" # prm = prmreader.read(prm_filename) # # print prm print("just_load_srno: making class and setting prms") d = CellpyData() # ------------reading db---------------------------------------------------- print() print("just_load_srno: starting to load reader") # reader = dbreader.reader(prm_filename) reader = dbreader.Reader() print("------ok------") run_name = reader.get_cell_name(srno) print("just_load_srno: run_name:") print(run_name) m = reader.get_mass(srno) print("just_load_srno: mass: %f" % m) print() # ------------loadcell------------------------------------------------------ print("just_load_srno: getting file_names") raw_files, cellpy_file = filefinder.search_for_files(run_name) print("raw_files:", raw_files) print("cellpy_file:", cellpy_file) print("just_load_srno: running loadcell") d.loadcell(raw_files, cellpy_file, mass=m) print("------ok------") # ------------do stuff------------------------------------------------------ print("just_load_srno: getting step_numbers for charge") v = d.get_step_numbers("charge") print(v) print() print("just_load_srno: finding C-rates") d.find_C_rates(v, silent=False) print() print("just_load_srno: OK") return True
Simply load an dataset based on serial number (srno). This convenience function reads a dataset based on a serial number. This serial number (srno) must then be defined in your database. It is mainly used to check that things are set up correctly. Args: prm_filename: name of parameter file (optional). srno (int): serial number Example: >>> srno = 918 >>> just_load_srno(srno) srno: 918 read prms ....
entailment
def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.00): """Load a raw data file and save it as cellpy-file. Args: mass (float): active material mass [mg]. outdir (path): optional, path to directory for saving the hdf5-file. outfile (str): optional, name of hdf5-file. filename (str): name of the resfile. Returns: out_file_name (str): name of saved file. """ d = CellpyData() if not outdir: outdir = prms.Paths["cellpydatadir"] if not outfile: outfile = os.path.basename(filename).split(".")[0] + ".h5" outfile = os.path.join(outdir, outfile) print("filename:", filename) print("outfile:", outfile) print("outdir:", outdir) print("mass:", mass, "mg") d.from_raw(filename) d.set_mass(mass) d.make_step_table() d.make_summary() d.save(filename=outfile) d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True) return outfile
Load a raw data file and save it as cellpy-file. Args: mass (float): active material mass [mg]. outdir (path): optional, path to directory for saving the hdf5-file. outfile (str): optional, name of hdf5-file. filename (str): name of the resfile. Returns: out_file_name (str): name of saved file.
entailment
def load_and_print_resfile(filename, info_dict=None): """Load a raw data file and print information. Args: filename (str): name of the resfile. info_dict (dict): Returns: info (str): string describing something. """ # self.test_no = None # self.mass = 1.0 # mass of (active) material (in mg) # self.no_cycles = 0.0 # self.charge_steps = None # not in use at the moment # self.discharge_steps = None # not in use at the moment # self.ir_steps = None # dict # not in use at the moment # self.ocv_steps = None # dict # not in use at the moment # self.nom_cap = 3579 # mAh/g (used for finding c-rates) # self.mass_given = False # self.c_mode = True # self.starts_with = "discharge" # self.material = "noname" # self.merged = False # self.file_errors = None # not in use at the moment # self.loaded_from = None # name of the .res file it is loaded from # (can be list if merged) # self.raw_data_files = [] # self.raw_data_files_length = [] # # self.parent_filename = None # name of the .res file it is loaded from # (basename) (can be list if merded) # # self.parent_filename = if listtype, for file in etc,,, # os.path.basename(self.loaded_from) # self.channel_index = None # self.channel_number = None # self.creator = None # self.item_ID = None # self.schedule_file_name = None # self.start_datetime = None # self.test_ID = None # self.name = None # NEXT: include nom_cap, tot_mass and parameters table in save/load hdf5 if info_dict is None: info_dict = dict() info_dict["mass"] = 1.23 # mg info_dict["nom_cap"] = 3600 # mAh/g (active material) info_dict["tot_mass"] = 2.33 # mAh/g (total mass of material) d = CellpyData() print("filename:", filename) print("info_dict in:", end=' ') print(info_dict) d.from_raw(filename) d.set_mass(info_dict["mass"]) d.make_step_table() d.make_summary() for test in d.datasets: print("newtest") print(test) return info_dict
Load a raw data file and print information. Args: filename (str): name of the resfile. info_dict (dict): Returns: info (str): string describing something.
entailment
def set_instrument(self, instrument=None): """Set the instrument (i.e. tell cellpy the file-type you use). Args: instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...] Sets the instrument used for obtaining the data (i.e. sets fileformat) """ if instrument is None: instrument = self.tester if instrument in ["arbin", "arbin_res"]: self._set_arbin() self.tester = "arbin" elif instrument == "arbin_sql": self._set_arbin_sql() self.tester = "arbin" elif instrument == "arbin_experimental": self._set_arbin_experimental() self.tester = "arbin" elif instrument in ["pec", "pec_csv"]: self._set_pec() self.tester = "pec" elif instrument in ["biologics", "biologics_mpr"]: self._set_biologic() self.tester = "biologic" elif instrument == "custom": self._set_custom() self.tester = "custom" else: raise Exception(f"option does not exist: '{instrument}'")
Set the instrument (i.e. tell cellpy the file-type you use). Args: instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...] Sets the instrument used for obtaining the data (i.e. sets fileformat)
entailment
def set_raw_datadir(self, directory=None): """Set the directory containing .res-files. Used for setting directory for looking for res-files.@ A valid directory name is required. Args: directory (str): path to res-directory Example: >>> d = CellpyData() >>> directory = "MyData/Arbindata" >>> d.set_raw_datadir(directory) """ if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info(directory) self.logger.info("directory does not exist") return self.raw_datadir = directory
Set the directory containing .res-files. Used for setting directory for looking for res-files.@ A valid directory name is required. Args: directory (str): path to res-directory Example: >>> d = CellpyData() >>> directory = "MyData/Arbindata" >>> d.set_raw_datadir(directory)
entailment
def set_cellpy_datadir(self, directory=None): """Set the directory containing .hdf5-files. Used for setting directory for looking for hdf5-files. A valid directory name is required. Args: directory (str): path to hdf5-directory Example: >>> d = CellpyData() >>> directory = "MyData/HDF5" >>> d.set_raw_datadir(directory) """ if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info("directory does not exist") return self.cellpy_datadir = directory
Set the directory containing .hdf5-files. Used for setting directory for looking for hdf5-files. A valid directory name is required. Args: directory (str): path to hdf5-directory Example: >>> d = CellpyData() >>> directory = "MyData/HDF5" >>> d.set_raw_datadir(directory)
entailment
def check_file_ids(self, rawfiles, cellpyfile): """Check the stats for the files (raw-data and cellpy hdf5). This function checks if the hdf5 file and the res-files have the same timestamps etc to find out if we need to bother to load .res -files. Args: cellpyfile (str): filename of the cellpy hdf5-file. rawfiles (list of str): name(s) of raw-data file(s). Returns: False if the raw files are newer than the cellpy hdf5-file (update needed). If return_res is True it also returns list of raw-file_names as second argument. """ txt = "checking file ids - using '%s'" % self.filestatuschecker self.logger.info(txt) ids_cellpy_file = self._check_cellpy_file(cellpyfile) self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}") if not ids_cellpy_file: # self.logger.debug("hdf5 file does not exist - needs updating") return False ids_raw = self._check_raw(rawfiles) similar = self._compare_ids(ids_raw, ids_cellpy_file) if not similar: # self.logger.debug("hdf5 file needs updating") return False else: # self.logger.debug("hdf5 file is updated") return True
Check the stats for the files (raw-data and cellpy hdf5). This function checks if the hdf5 file and the res-files have the same timestamps etc to find out if we need to bother to load .res -files. Args: cellpyfile (str): filename of the cellpy hdf5-file. rawfiles (list of str): name(s) of raw-data file(s). Returns: False if the raw files are newer than the cellpy hdf5-file (update needed). If return_res is True it also returns list of raw-file_names as second argument.
entailment
def _check_raw(self, file_names, abort_on_missing=False): """Get the file-ids for the res_files.""" strip_file_names = True check_on = self.filestatuschecker if not self._is_listtype(file_names): file_names = [file_names, ] ids = dict() for f in file_names: self.logger.debug(f"checking res file {f}") fid = FileID(f) # self.logger.debug(fid) if fid.name is None: warnings.warn(f"file does not exist: {f}") if abort_on_missing: sys.exit(-1) else: if strip_file_names: name = os.path.basename(f) else: name = f if check_on == "size": ids[name] = int(fid.size) elif check_on == "modified": ids[name] = int(fid.last_modified) else: ids[name] = int(fid.last_accessed) return ids
Get the file-ids for the res_files.
entailment
def _check_cellpy_file(self, filename): """Get the file-ids for the cellpy_file.""" strip_filenames = True check_on = self.filestatuschecker self.logger.debug("checking cellpy-file") self.logger.debug(filename) if not os.path.isfile(filename): self.logger.debug("cellpy-file does not exist") return None try: store = pd.HDFStore(filename) except Exception as e: self.logger.debug(f"could not open cellpy-file ({e})") return None try: fidtable = store.select("CellpyData/fidtable") except KeyError: self.logger.warning("no fidtable -" " you should update your hdf5-file") fidtable = None finally: store.close() if fidtable is not None: raw_data_files, raw_data_files_length = \ self._convert2fid_list(fidtable) txt = "contains %i res-files" % (len(raw_data_files)) self.logger.debug(txt) ids = dict() for fid in raw_data_files: full_name = fid.full_name size = fid.size mod = fid.last_modified self.logger.debug(f"fileID information for: {full_name}") self.logger.debug(f" modified: {mod}") self.logger.debug(f" size: {size}") if strip_filenames: name = os.path.basename(full_name) else: name = full_name if check_on == "size": ids[name] = int(fid.size) elif check_on == "modified": ids[name] = int(fid.last_modified) else: ids[name] = int(fid.last_accessed) return ids else: return None
Get the file-ids for the cellpy_file.
entailment
def loadcell(self, raw_files, cellpy_file=None, mass=None, summary_on_raw=False, summary_ir=True, summary_ocv=False, summary_end_v=True, only_summary=False, only_first=False, force_raw=False, use_cellpy_stat_file=None): """Loads data for given cells. Args: raw_files (list): name of res-files cellpy_file (path): name of cellpy-file mass (float): mass of electrode or active material summary_on_raw (bool): use raw-file for summary summary_ir (bool): summarize ir summary_ocv (bool): summarize ocv steps summary_end_v (bool): summarize end voltage only_summary (bool): get only the summary of the runs only_first (bool): only use the first file fitting search criteria force_raw (bool): only use raw-files use_cellpy_stat_file (bool): use stat file if creating summary from raw Example: >>> srnos = my_dbreader.select_batch("testing_new_solvent") >>> cell_datas = [] >>> for srno in srnos: >>> ... my_run_name = my_dbreader.get_cell_name(srno) >>> ... mass = my_dbreader.get_mass(srno) >>> ... rawfiles, cellpyfiles = \ >>> ... filefinder.search_for_files(my_run_name) >>> ... cell_data = cellreader.CellpyData() >>> ... cell_data.loadcell(raw_files=rawfiles, >>> ... cellpy_file=cellpyfiles) >>> ... cell_data.set_mass(mass) >>> ... if not cell_data.summary_exists: >>> ... cell_data.make_summary() # etc. etc. >>> ... cell_datas.append(cell_data) >>> """ # This is a part of a dramatic API change. It will not be possible to # load more than one set of datasets (i.e. one single cellpy-file or # several raw-files that will be automatically merged) self.logger.info("started loadcell") if cellpy_file is None: similar = False elif force_raw: similar = False else: similar = self.check_file_ids(raw_files, cellpy_file) self.logger.debug("checked if the files were similar") if only_summary: self.load_only_summary = True else: self.load_only_summary = False if not similar: self.logger.info("cellpy file(s) needs updating - loading raw") self.logger.debug(raw_files) self.from_raw(raw_files) self.logger.debug("loaded files") # Check if the run was loaded ([] if empty) if self.status_datasets: if mass: self.set_mass(mass) if summary_on_raw: self.make_summary(all_tests=False, find_ocv=summary_ocv, find_ir=summary_ir, find_end_voltage=summary_end_v, use_cellpy_stat_file=use_cellpy_stat_file) else: self.logger.warning("Empty run!") else: self.load(cellpy_file) return self
Loads data for given cells. Args: raw_files (list): name of res-files cellpy_file (path): name of cellpy-file mass (float): mass of electrode or active material summary_on_raw (bool): use raw-file for summary summary_ir (bool): summarize ir summary_ocv (bool): summarize ocv steps summary_end_v (bool): summarize end voltage only_summary (bool): get only the summary of the runs only_first (bool): only use the first file fitting search criteria force_raw (bool): only use raw-files use_cellpy_stat_file (bool): use stat file if creating summary from raw Example: >>> srnos = my_dbreader.select_batch("testing_new_solvent") >>> cell_datas = [] >>> for srno in srnos: >>> ... my_run_name = my_dbreader.get_cell_name(srno) >>> ... mass = my_dbreader.get_mass(srno) >>> ... rawfiles, cellpyfiles = \ >>> ... filefinder.search_for_files(my_run_name) >>> ... cell_data = cellreader.CellpyData() >>> ... cell_data.loadcell(raw_files=rawfiles, >>> ... cellpy_file=cellpyfiles) >>> ... cell_data.set_mass(mass) >>> ... if not cell_data.summary_exists: >>> ... cell_data.make_summary() # etc. etc. >>> ... cell_datas.append(cell_data) >>>
entailment
def from_raw(self, file_names=None, **kwargs): """Load a raw data-file. Args: file_names (list of raw-file names): uses CellpyData.file_names if None. If the list contains more than one file name, then the runs will be merged together. """ # This function only loads one test at a time (but could contain several # files). The function from_res() also implements loading several # datasets (using list of lists as input). if file_names: self.file_names = file_names if not isinstance(file_names, (list, tuple)): self.file_names = [file_names, ] # file_type = self.tester raw_file_loader = self.loader set_number = 0 test = None counter = 0 self.logger.debug("start iterating through file(s)") for f in self.file_names: self.logger.debug("loading raw file:") self.logger.debug(f"{f}") new_tests = raw_file_loader(f, **kwargs) if new_tests: if test is not None: self.logger.debug("continuing reading files...") _test = self._append(test[set_number], new_tests[set_number]) if not _test: self.logger.warning(f"EMPTY TEST: {f}") continue test[set_number] = _test self.logger.debug("added this test - started merging") for j in range(len(new_tests[set_number].raw_data_files)): raw_data_file = new_tests[set_number].raw_data_files[j] file_size = new_tests[set_number].raw_data_files_length[j] test[set_number].raw_data_files.append(raw_data_file) test[set_number].raw_data_files_length.append(file_size) counter += 1 if counter > 10: self.logger.debug("ERROR? Too many files to merge") raise ValueError("Too many files to merge - " "could be a p2-p3 zip thing") else: self.logger.debug("getting data from first file") if new_tests[set_number].no_data: self.logger.debug("NO DATA") else: test = new_tests else: self.logger.debug("NOTHING LOADED") self.logger.debug("finished loading the raw-files") test_exists = False if test: if test[0].no_data: self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty") else: test_exists = True if test_exists: if not prms.Reader.sorted_data: self.logger.debug("sorting data") test[set_number] = self._sort_data(test[set_number]) self.datasets.append(test[set_number]) else: self.logger.warning("No new datasets added!") self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name() return self
Load a raw data-file. Args: file_names (list of raw-file names): uses CellpyData.file_names if None. If the list contains more than one file name, then the runs will be merged together.
entailment
def check(self): """Returns False if no datasets exists or if one or more of the datasets are empty""" if len(self.status_datasets) == 0: return False if all(self.status_datasets): return True return False
Returns False if no datasets exists or if one or more of the datasets are empty
entailment
def load(self, cellpy_file, parent_level="CellpyData"): """Loads a cellpy file. Args: cellpy_file (path, str): Full path to the cellpy file. parent_level (str, optional): Parent level """ try: self.logger.debug("loading cellpy-file (hdf5):") self.logger.debug(cellpy_file) new_datasets = self._load_hdf5(cellpy_file, parent_level) self.logger.debug("cellpy-file loaded") except AttributeError: new_datasets = [] self.logger.warning("This cellpy-file version is not supported by" "current reader (try to update cellpy).") if new_datasets: for dataset in new_datasets: self.datasets.append(dataset) else: # raise LoadError self.logger.warning("Could not load") self.logger.warning(str(cellpy_file)) self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name(cellpy_file) return self
Loads a cellpy file. Args: cellpy_file (path, str): Full path to the cellpy file. parent_level (str, optional): Parent level
entailment
def _load_hdf5(self, filename, parent_level="CellpyData"): """Load a cellpy-file. Args: filename (str): Name of the cellpy file. parent_level (str) (optional): name of the parent level (defaults to "CellpyData") Returns: loaded datasets (DataSet-object) """ if not os.path.isfile(filename): self.logger.info(f"file does not exist: {filename}") raise IOError store = pd.HDFStore(filename) # required_keys = ['dfdata', 'dfsummary', 'fidtable', 'info'] required_keys = ['dfdata', 'dfsummary', 'info'] required_keys = ["/" + parent_level + "/" + _ for _ in required_keys] for key in required_keys: if key not in store.keys(): self.logger.info(f"This hdf-file is not good enough - " f"at least one key is missing: {key}") raise Exception(f"OH MY GOD! At least one crucial key" f"is missing {key}!") self.logger.debug(f"Keys in current hdf5-file: {store.keys()}") data = DataSet() if parent_level != "CellpyData": self.logger.debug("Using non-default parent label for the " "hdf-store: {}".format(parent_level)) # checking file version infotable = store.select(parent_level + "/info") try: data.cellpy_file_version = \ self._extract_from_dict(infotable, "cellpy_file_version") except Exception as e: data.cellpy_file_version = 0 warnings.warn(f"Unhandled exception raised: {e}") if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION: raise WrongFileVersion if data.cellpy_file_version > CELLPY_FILE_VERSION: raise WrongFileVersion data.dfsummary = store.select(parent_level + "/dfsummary") data.dfdata = store.select(parent_level + "/dfdata") try: data.step_table = store.select(parent_level + "/step_table") except Exception as e: self.logging.debug("could not get step_table from cellpy-file") data.step_table = pd.DataFrame() warnings.warn(f"Unhandled exception raised: {e}") try: fidtable = store.select( parent_level + "/fidtable") # remark! changed spelling from # lower letter to camel-case! fidtable_selected = True except Exception as e: self.logging.debug("could not get fid-table from cellpy-file") fidtable = [] warnings.warn("no fidtable - you should update your hdf5-file") fidtable_selected = False self.logger.debug(" h5") # this does not yet allow multiple sets newtests = [] # but this is ready when that time comes # The infotable stores "meta-data". The follwing statements loads the # content of infotable and updates div. DataSet attributes. # Maybe better use it as dict? data = self._load_infotable(data, infotable, filename) if fidtable_selected: data.raw_data_files, data.raw_data_files_length = \ self._convert2fid_list(fidtable) else: data.raw_data_files = None data.raw_data_files_length = None newtests.append(data) store.close() # self.datasets.append(data) return newtests
Load a cellpy-file. Args: filename (str): Name of the cellpy file. parent_level (str) (optional): name of the parent level (defaults to "CellpyData") Returns: loaded datasets (DataSet-object)
entailment
def merge(self, datasets=None, separate_datasets=False): """This function merges datasets into one set.""" self.logger.info("merging") if separate_datasets: warnings.warn("The option seperate_datasets=True is" "not implemented yet. Performing merging, but" "neglecting the option.") else: if datasets is None: datasets = list(range(len(self.datasets))) first = True for dataset_number in datasets: if first: dataset = self.datasets[dataset_number] first = False else: dataset = self._append(dataset, self.datasets[dataset_number]) for raw_data_file, file_size in zip(self.datasets[dataset_number].raw_data_files, self.datasets[dataset_number].raw_data_files_length): dataset.raw_data_files.append(raw_data_file) dataset.raw_data_files_length.append(file_size) self.datasets = [dataset] self.number_of_datasets = 1 return self
This function merges datasets into one set.
entailment
def print_step_table(self, dataset_number=None): """Print the step table.""" dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return st = self.datasets[dataset_number].step_table print(st)
Print the step table.
entailment
def get_step_numbers(self, steptype='charge', allctypes=True, pdtype=False, cycle_number=None, dataset_number=None, steptable=None): # TODO: @jepe - include sub_steps here """Get the step numbers of selected type. Returns the selected step_numbers for the elected type of step(s). Args: steptype (string): string identifying type of step. allctypes (bool): get all types of charge (or discharge). pdtype (bool): return results as pandas.DataFrame cycle_number (int): selected cycle, selects all if not set. dataset_number (int): test number (default first) (usually not used). steptable (pandas.DataFrame): optional steptable Returns: A dictionary containing a list of step numbers corresponding to the selected steptype for the cycle(s). Returns a pandas.DataFrame instead of a dict of lists if pdtype is set to True. The frame is a sub-set of the step-table frame (i.e. all the same columns, only filtered by rows). Example: >>> my_charge_steps = CellpyData.get_step_numbers( >>> "charge", >>> cycle_number = 3 >>> ) >>> print my_charge_steps {3: [5,8]} """ # self.logger.debug("Trying to get step-types") if steptable is None: dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return if not self.datasets[dataset_number].step_table_made: self.logger.debug("step_table is not made") if self.force_step_table_creation or self.force_all: self.logger.debug("creating step_table for") self.logger.debug(self.datasets[dataset_number].loaded_from) # print "CREAING STEP-TABLE" self.make_step_table(dataset_number=dataset_number) else: self.logger.info("ERROR! Cannot use get_steps: " "create step_table first") self.logger.info(" you could use find_step_numbers" " method instead") self.logger.info(" (but I don't recommend it)") return None # check if steptype is valid steptype = steptype.lower() steptypes = [] helper_step_types = ['ocv', 'charge_discharge'] valid_step_type = True if steptype in self.list_of_step_types: steptypes.append(steptype) else: txt = "%s is not a valid core steptype" % steptype if steptype in helper_step_types: txt = "but a helper steptype" if steptype == 'ocv': steptypes.append('ocvrlx_up') steptypes.append('ocvrlx_down') elif steptype == 'charge_discharge': steptypes.append('charge') steptypes.append('discharge') else: valid_step_type = False self.logger.debug(txt) if not valid_step_type: return None # in case of selection allctypes, then modify charge, discharge if allctypes: add_these = [] for st in steptypes: if st in ['charge', 'discharge']: st1 = st + '_cv' add_these.append(st1) st1 = 'cv_' + st add_these.append(st1) for st in add_these: steptypes.append(st) # self.logger.debug("Your steptypes:") # self.logger.debug(steptypes) if steptable is None: st = self.datasets[dataset_number].step_table else: st = steptable shdr = self.headers_step_table # retrieving cycle numbers if cycle_number is None: cycle_numbers = self.get_cycle_numbers( dataset_number, steptable=steptable ) else: if isinstance(cycle_number, (list, tuple)): cycle_numbers = cycle_number else: cycle_numbers = [cycle_number, ] if pdtype: self.logger.debug("return pandas dataframe") out = st[st[shdr.type].isin(steptypes) & st[shdr.cycle].isin(cycle_numbers)] return out # if not pdtype, return a dict instead # self.logger.debug("out as dict; out[cycle] = [s1,s2,...]") # self.logger.debug("(same behaviour as find_step_numbers)") self.logger.debug("return dict of lists") out = dict() for cycle in cycle_numbers: steplist = [] for s in steptypes: step = st[(st[shdr.type] == s) & (st[shdr.cycle] == cycle)][shdr.step].tolist() for newstep in step: steplist.append(int(newstep)) # int(step.iloc[0]) # self.is_empty(steps) if not steplist: steplist = [0] out[cycle] = steplist return out
Get the step numbers of selected type. Returns the selected step_numbers for the elected type of step(s). Args: steptype (string): string identifying type of step. allctypes (bool): get all types of charge (or discharge). pdtype (bool): return results as pandas.DataFrame cycle_number (int): selected cycle, selects all if not set. dataset_number (int): test number (default first) (usually not used). steptable (pandas.DataFrame): optional steptable Returns: A dictionary containing a list of step numbers corresponding to the selected steptype for the cycle(s). Returns a pandas.DataFrame instead of a dict of lists if pdtype is set to True. The frame is a sub-set of the step-table frame (i.e. all the same columns, only filtered by rows). Example: >>> my_charge_steps = CellpyData.get_step_numbers( >>> "charge", >>> cycle_number = 3 >>> ) >>> print my_charge_steps {3: [5,8]}
entailment
def load_step_specifications(self, file_name, short=False, dataset_number=None): """ Load a table that contains step-type definitions. This function loads a file containing a specification for each step or for each (cycle_number, step_number) combinations if short==False. The step_cycle specifications that are allowed are stored in the variable cellreader.list_of_step_types. """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return # if short: # # the table only consists of steps (not cycle,step pairs) assuming # # that the step numbers uniquely defines step type (this is true # # for arbin at least). # raise NotImplementedError step_specs = pd.read_csv(file_name, sep=prms.Reader.sep) if "step" not in step_specs.columns: self.logger.info("step col is missing") raise IOError if "type" not in step_specs.columns: self.logger.info("type col is missing") raise IOError if not short and "cycle" not in step_specs.columns: self.logger.info("cycle col is missing") raise IOError self.make_step_table(custom_step_definition=True, step_specifications=step_specs, short=short)
Load a table that contains step-type definitions. This function loads a file containing a specification for each step or for each (cycle_number, step_number) combinations if short==False. The step_cycle specifications that are allowed are stored in the variable cellreader.list_of_step_types.
entailment
def make_step_table(self, custom_step_definition=False, step_specifications=None, short=False, dataset_number=None): """ Create a table (v.4) that contains summary information for each step. This function creates a table containing information about the different steps for each cycle and, based on that, decides what type of step it is (e.g. charge) for each cycle. The format of the step_table is: index: cycleno - stepno - sub-step-no Logging info (average, stdev, max, min, start, end, delta) - Current info (average, stdev, max, min, start, end, delta) - Voltage info (average, stdev, max, min, start, end, delta) - Type (from pre-defined list) - SubType - Info """ time_00 = time.time() dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return nhdr = self.headers_normal shdr = self.headers_step_table df = self.datasets[dataset_number].dfdata # df[shdr.internal_resistance_change] = \ # df[nhdr.internal_resistance_txt].pct_change() def first(x): return x.iloc[0] def last(x): return x.iloc[-1] def delta(x): if x.iloc[0] == 0.0: # starts from a zero value difference = 100.0 * x.iloc[-1] else: difference = (x.iloc[-1] - x.iloc[0]) * 100 / x.iloc[0] return difference keep = [ nhdr.data_point_txt, nhdr.step_time_txt, nhdr.step_index_txt, nhdr.cycle_index_txt, nhdr.current_txt, nhdr.voltage_txt, nhdr.ref_voltage_txt, nhdr.charge_capacity_txt, nhdr.discharge_capacity_txt, nhdr.internal_resistance_txt, # "ir_pct_change" ] # only use col-names that exist: keep = [col for col in keep if col in df.columns] df = df[keep] df[nhdr.sub_step_index_txt] = 1 rename_dict = { nhdr.cycle_index_txt: shdr.cycle, nhdr.step_index_txt: shdr.step, nhdr.sub_step_index_txt: shdr.sub_step, nhdr.data_point_txt: shdr.point, nhdr.step_time_txt: shdr.step_time, nhdr.current_txt: shdr.current, nhdr.voltage_txt: shdr.voltage, nhdr.charge_capacity_txt: shdr.charge, nhdr.discharge_capacity_txt: shdr.discharge, nhdr.internal_resistance_txt: shdr.internal_resistance, } df = df.rename(columns=rename_dict) by = [shdr.cycle, shdr.step, shdr.sub_step] self.logger.debug(f"groupby: {by}") gf = df.groupby(by=by) df_steps = (gf.agg( [np.mean, np.std, np.amin, np.amax, first, last, delta] ).rename(columns={'amin': 'min', 'amax': 'max', 'mean': 'avr'})) df_steps = df_steps.reset_index() df_steps[shdr.type] = np.nan df_steps[shdr.sub_type] = np.nan df_steps[shdr.info] = np.nan current_limit_value_hard = self.raw_limits["current_hard"] current_limit_value_soft = self.raw_limits["current_soft"] stable_current_limit_hard = self.raw_limits["stable_current_hard"] stable_current_limit_soft = self.raw_limits["stable_current_soft"] stable_voltage_limit_hard = self.raw_limits["stable_voltage_hard"] stable_voltage_limit_soft = self.raw_limits["stable_voltage_soft"] stable_charge_limit_hard = self.raw_limits["stable_charge_hard"] stable_charge_limit_soft = self.raw_limits["stable_charge_soft"] ir_change_limit = self.raw_limits["ir_change"] mask_no_current_hard = ( df_steps.loc[:, (shdr.current, "max")].abs() + df_steps.loc[:, (shdr.current, "min")].abs() ) < current_limit_value_hard mask_voltage_down = df_steps.loc[:, (shdr.voltage, "delta")] < \ - stable_voltage_limit_hard mask_voltage_up = df_steps.loc[:, (shdr.voltage, "delta")] > \ stable_voltage_limit_hard mask_voltage_stable = df_steps.loc[:, (shdr.voltage, "delta")].abs() < \ stable_voltage_limit_hard mask_current_down = df_steps.loc[:, (shdr.current, "delta")] < \ - stable_current_limit_soft mask_current_up = df_steps.loc[:, (shdr.current, "delta")] > \ stable_current_limit_soft mask_current_negative = df_steps.loc[:, (shdr.current, "avr")] < \ - current_limit_value_hard mask_current_positive = df_steps.loc[:, (shdr.current, "avr")] > \ current_limit_value_hard mask_galvanostatic = df_steps.loc[:, (shdr.current, "delta")].abs() < \ stable_current_limit_soft mask_charge_changed = df_steps.loc[:, (shdr.charge, "delta")].abs() > \ stable_charge_limit_hard mask_discharge_changed = df_steps.loc[:, (shdr.discharge, "delta")].abs() > \ stable_charge_limit_hard mask_no_change = (df_steps.loc[:, (shdr.voltage, "delta")] == 0) & \ (df_steps.loc[:, (shdr.current, "delta")] == 0) & \ (df_steps.loc[:, (shdr.charge, "delta")] == 0) & \ (df_steps.loc[:, (shdr.charge, "delta")] == 0) if custom_step_definition: self.logger.debug("parsing custom step definition") if not short: self.logger.debug("using long format (cycle,step)") for row in step_specifications.itertuples(): # self.logger.debug(f"cycle: {row.cycle} step: {row.step}" # f" type: {row.type}") df_steps.loc[(df_steps[shdr.step] == row.step) & (df_steps[shdr.cycle] == row.cycle), "type"] = row.type df_steps.loc[(df_steps[shdr.step] == row.step) & (df_steps[shdr.cycle] == row.cycle), "info"] = row.info else: self.logger.debug("using short format (step)") for row in step_specifications.itertuples(): # self.logger.debug(f"step: {row.step} " # f"type: {row.type}" # f"info: {row.info}") df_steps.loc[df_steps[shdr.step] == row.step, "type"] = row.type df_steps.loc[df_steps[shdr.step] == row.step, "info"] = row.info else: self.logger.debug("masking and labelling steps") df_steps.loc[mask_no_current_hard & mask_voltage_stable, shdr.type] = 'rest' df_steps.loc[mask_no_current_hard & mask_voltage_up, shdr.type] = 'ocvrlx_up' df_steps.loc[mask_no_current_hard & mask_voltage_down, shdr.type] = 'ocvrlx_down' df_steps.loc[mask_discharge_changed & mask_current_negative, shdr.type] = 'discharge' df_steps.loc[mask_charge_changed & mask_current_positive, shdr.type] = 'charge' df_steps.loc[ mask_voltage_stable & mask_current_negative & mask_current_down, shdr.type ] = 'cv_discharge' df_steps.loc[mask_voltage_stable & mask_current_positive & mask_current_down, shdr.type] = 'cv_charge' # --- internal resistance ---- df_steps.loc[mask_no_change, shdr.type] = 'ir' # assumes that IR is stored in just one row # --- sub-step-txt ----------- df_steps[shdr.sub_type] = None # --- CV steps ---- # "voltametry_charge" # mask_charge_changed # mask_voltage_up # (could also include abs-delta-cumsum current) # "voltametry_discharge" # mask_discharge_changed # mask_voltage_down # check if all the steps got categorizes self.logger.debug("looking for un-categorized steps") empty_rows = df_steps.loc[df_steps[shdr.type].isnull()] if not empty_rows.empty: logging.warning( f"found {len(empty_rows)}" f":{len(df_steps)} non-categorized steps " f"(please, check your raw-limits)") # flatten (possible remove in the future), # (maybe we will implement mulitindexed tables) self.logger.debug(f"flatten columns") flat_cols = [] for col in df_steps.columns: if isinstance(col, tuple): if col[-1]: col = "_".join(col) else: col = col[0] flat_cols.append(col) df_steps.columns = flat_cols self.datasets[dataset_number].step_table = df_steps self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)") return self
Create a table (v.4) that contains summary information for each step. This function creates a table containing information about the different steps for each cycle and, based on that, decides what type of step it is (e.g. charge) for each cycle. The format of the step_table is: index: cycleno - stepno - sub-step-no Logging info (average, stdev, max, min, start, end, delta) - Current info (average, stdev, max, min, start, end, delta) - Voltage info (average, stdev, max, min, start, end, delta) - Type (from pre-defined list) - SubType - Info
entailment
def to_csv(self, datadir=None, sep=None, cycles=False, raw=True, summary=True, shifted=False, method=None, shift=0.0, last_cycle=None): """Saves the data as .csv file(s). Args: datadir: folder where to save the data (uses current folder if not given). sep: the separator to use in the csv file (defaults to CellpyData.sep). cycles: (bool) export voltage-capacity curves if True. raw: (bool) export raw-data if True. summary: (bool) export summary if True. shifted (bool): export with cumulated shift. method (string): how the curves are given "back-and-forth" - standard back and forth; discharge (or charge) reversed from where charge (or discharge) ends. "forth" - discharge (or charge) continues along x-axis. "forth-and-forth" - discharge (or charge) also starts at 0 (or shift if not shift=0.0) shift: start-value for charge (or discharge) last_cycle: process only up to this cycle (if not None). Returns: Nothing """ if sep is None: sep = self.sep self.logger.debug("saving to csv") dataset_number = -1 for data in self.datasets: dataset_number += 1 if not self._is_not_empty_dataset(data): self.logger.info("to_csv -") self.logger.info("empty test [%i]" % dataset_number) self.logger.info("not saved!") else: if isinstance(data.loaded_from, (list, tuple)): txt = "merged file" txt += "using first file as basename" self.logger.debug(txt) no_merged_sets = len(data.loaded_from) no_merged_sets = "_merged_" + str(no_merged_sets).zfill(3) filename = data.loaded_from[0] else: filename = data.loaded_from no_merged_sets = "" firstname, extension = os.path.splitext(filename) firstname += no_merged_sets if datadir: firstname = os.path.join(datadir, os.path.basename(firstname)) if raw: outname_normal = firstname + "_normal.csv" self._export_normal(data, outname=outname_normal, sep=sep) if data.step_table_made is True: outname_steps = firstname + "_steps.csv" self._export_steptable(data, outname=outname_steps, sep=sep) else: self.logger.debug("step_table_made is not True") if summary: outname_stats = firstname + "_stats.csv" self._export_stats(data, outname=outname_stats, sep=sep) if cycles: outname_cycles = firstname + "_cycles.csv" self._export_cycles(outname=outname_cycles, dataset_number=dataset_number, sep=sep, shifted=shifted, method=method, shift=shift, last_cycle=last_cycle)
Saves the data as .csv file(s). Args: datadir: folder where to save the data (uses current folder if not given). sep: the separator to use in the csv file (defaults to CellpyData.sep). cycles: (bool) export voltage-capacity curves if True. raw: (bool) export raw-data if True. summary: (bool) export summary if True. shifted (bool): export with cumulated shift. method (string): how the curves are given "back-and-forth" - standard back and forth; discharge (or charge) reversed from where charge (or discharge) ends. "forth" - discharge (or charge) continues along x-axis. "forth-and-forth" - discharge (or charge) also starts at 0 (or shift if not shift=0.0) shift: start-value for charge (or discharge) last_cycle: process only up to this cycle (if not None). Returns: Nothing
entailment
def save(self, filename, dataset_number=None, force=False, overwrite=True, extension="h5", ensure_step_table=None): """Save the data structure to cellpy-format. Args: filename: (str) the name you want to give the file dataset_number: (int) if you have several datasets, chose the one you want (probably leave this untouched) force: (bool) save a file even if the summary is not made yet (not recommended) overwrite: (bool) save the new version of the file even if old one exists. extension: (str) filename extension. ensure_step_table: (bool) make step-table if missing. Returns: Nothing at all. """ if ensure_step_table is None: ensure_step_table = self.ensure_step_table dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self.logger.info("Saving test failed!") self._report_empty_dataset() return test = self.get_dataset(dataset_number) dfsummary_made = test.dfsummary_made if not dfsummary_made and not force: self.logger.info( "You should not save datasets " "without making a summary first!" ) self.logger.info( "If you really want to do it, " "use save with force=True" ) return step_table_made = test.step_table_made if not step_table_made and not force and not ensure_step_table: self.logger.info( "You should not save datasets " "without making a step-table first!" ) self.logger.info( "If you really want to do it, " "use save with force=True" ) return if not os.path.splitext(filename)[-1]: outfile_all = filename + "." + extension else: outfile_all = filename if os.path.isfile(outfile_all): self.logger.debug("Outfile exists") if overwrite: self.logger.debug("overwrite = True") os.remove(outfile_all) else: self.logger.info( "save (hdf5): file exist - did not save", end=' ' ) self.logger.info(outfile_all) return if ensure_step_table: self.logger.debug("ensure_step_table is on") if not test.step_table_made: self.logger.debug("save: creating step table") self.make_step_table(dataset_number=dataset_number) # This method can probalby be updated using pandas transpose trick self.logger.debug("trying to make infotable") infotbl, fidtbl = self._create_infotable( dataset_number=dataset_number ) root = prms._cellpyfile_root self.logger.debug("trying to save to hdf5") txt = "\nHDF5 file: %s" % outfile_all self.logger.debug(txt) warnings.simplefilter("ignore", PerformanceWarning) try: store = pd.HDFStore( outfile_all, complib=prms._cellpyfile_complib, complevel=prms._cellpyfile_complevel, ) self.logger.debug("trying to put dfdata") self.logger.debug(" - lets set Data_Point as index") hdr_data_point = self.headers_normal.data_point_txt test.dfdata = test.dfdata.set_index(hdr_data_point, drop=False) store.put(root + "/dfdata", test.dfdata, format=prms._cellpyfile_dfdata_format) self.logger.debug(" dfdata -> hdf5 OK") self.logger.debug("trying to put dfsummary") store.put(root + "/dfsummary", test.dfsummary, format=prms._cellpyfile_dfsummary_format) self.logger.debug(" dfsummary -> hdf5 OK") self.logger.debug("trying to put infotbl") store.put(root + "/info", infotbl, format=prms._cellpyfile_infotable_format) self.logger.debug(" infotable -> hdf5 OK") self.logger.debug("trying to put fidtable") store.put(root + "/fidtable", fidtbl, format=prms._cellpyfile_fidtable_format) self.logger.debug(" fidtable -> hdf5 OK") self.logger.debug("trying to put step_table") try: store.put(root + "/step_table", test.step_table, format=prms._cellpyfile_stepdata_format) self.logger.debug(" step_table -> hdf5 OK") except TypeError: test = self._fix_dtype_step_table(test) store.put(root + "/step_table", test.step_table, format=prms._cellpyfile_stepdata_format) self.logger.debug(" fixed step_table -> hdf5 OK") # creating indexes # hdr_data_point = self.headers_normal.data_point_txt # hdr_cycle_steptable = self.headers_step_table.cycle # hdr_cycle_normal = self.headers_normal.cycle_index_txt # store.create_table_index(root + "/dfdata", columns=[hdr_data_point], # optlevel=9, kind='full') finally: store.close() self.logger.debug(" all -> hdf5 OK") warnings.simplefilter("default", PerformanceWarning)
Save the data structure to cellpy-format. Args: filename: (str) the name you want to give the file dataset_number: (int) if you have several datasets, chose the one you want (probably leave this untouched) force: (bool) save a file even if the summary is not made yet (not recommended) overwrite: (bool) save the new version of the file even if old one exists. extension: (str) filename extension. ensure_step_table: (bool) make step-table if missing. Returns: Nothing at all.
entailment
def sget_voltage(self, cycle, step, set_number=None): """Returns voltage for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][voltage_header] Args: cycle: cycle number step: step number set_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty """ time_00 = time.time() set_number = self._validate_dataset_number(set_number) if set_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt voltage_header = self.headers_normal.voltage_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[set_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test[(test[cycle_index_header] == cycle) & (test[step_index_header] == step)] self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)") if not self.is_empty(c): v = c[voltage_header] return v else: return None
Returns voltage for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][voltage_header] Args: cycle: cycle number step: step number set_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty
entailment
def get_voltage(self, cycle=None, dataset_number=None, full=True): """Returns voltage (in V). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False) """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt voltage_header = self.headers_normal.voltage_txt # step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if cycle: self.logger.debug("getting voltage curve for cycle") c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[voltage_header] return v else: if not full: self.logger.debug( "getting list of voltage-curves for all cycles" ) v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[voltage_header]) else: self.logger.debug("getting frame of all voltage-curves") v = test[voltage_header] return v
Returns voltage (in V). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
entailment
def get_current(self, cycle=None, dataset_number=None, full=True): """Returns current (in mA). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False) """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt current_header = self.headers_normal.current_txt # step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if cycle: self.logger.debug(f"getting current for cycle {cycle}") c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[current_header] return v else: if not full: self.logger.debug( "getting a list of current-curves for all cycles" ) v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[current_header]) else: self.logger.debug("getting all current-curves ") v = test[current_header] return v
Returns current (in mA). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
entailment
def sget_steptime(self, cycle, step, dataset_number=None): """Returns step time for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][step_time_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt step_time_header = self.headers_normal.step_time_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test.loc[ (test[cycle_index_header] == cycle) & (test[step_index_header] == step), : ] if not self.is_empty(c): t = c[step_time_header] return t else: return None
Returns step time for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][step_time_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty
entailment
def sget_timestamp(self, cycle, step, dataset_number=None): """Returns timestamp for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][timestamp_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test[(test[cycle_index_header] == cycle) & (test[step_index_header] == step)] if not self.is_empty(c): t = c[timestamp_header] return t else: return pd.Series()
Returns timestamp for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][timestamp_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series
entailment
def get_timestamp(self, cycle=None, dataset_number=None, in_minutes=False, full=True): """Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False) """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt v = pd.Series() test = self.datasets[dataset_number].dfdata if cycle: c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[timestamp_header] else: if not full: self.logger.debug("getting timestapm for all cycles") v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[timestamp_header]) else: self.logger.debug("returning full timestamp col") v = test[timestamp_header] if in_minutes and v is not None: v /= 60.0 if in_minutes and v is not None: v /= 60.0 return v
Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
entailment
def get_dcap(self, cycle=None, dataset_number=None): """Returns discharge_capacity (in mAh/g), and voltage.""" # TODO: should return a DataFrame as default # but remark that we then have to update e.g. batch_helpers.py dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return dc, v = self._get_cap(cycle, dataset_number, "discharge") return dc, v
Returns discharge_capacity (in mAh/g), and voltage.
entailment
def get_ccap(self, cycle=None, dataset_number=None): """Returns charge_capacity (in mAh/g), and voltage.""" # TODO: should return a DataFrame as default # but remark that we then have to update e.g. batch_helpers.py dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cc, v = self._get_cap(cycle, dataset_number, "charge") return cc, v
Returns charge_capacity (in mAh/g), and voltage.
entailment
def get_cap(self, cycle=None, dataset_number=None, method="back-and-forth", shift=0.0, categorical_column=False, label_cycle_number=False, split=False, interpolated=False, dx=0.1, number_of_points=None, dynamic=False, ): """Gets the capacity for the run. For cycle=None: not implemented yet, cycle set to 1. Args: cycle (int): cycle number. method (string): how the curves are given "back-and-forth" - standard back and forth; discharge (or charge) reversed from where charge (or discharge) ends. "forth" - discharge (or charge) continues along x-axis. "forth-and-forth" - discharge (or charge) also starts at 0 (or shift if not shift=0.0) shift: start-value for charge (or discharge) (typically used when plotting shifted-capacity). categorical_column: add a categorical column showing if it is charge or discharge. dataset_number (int): test number (default first) (usually not used). label_cycle_number (bool): add column for cycle number (tidy format). split (bool): return a list of c and v instead of the defualt that is to return them combined in a DataFrame. This is only possible for some specific combinations of options (neither categorical_colum=True or label_cycle_number=True are allowed). interpolated (bool): set to True if you would like to get interpolated data (typically if you want to save disk space or memory). Defaults to False. dx (float): the step used when interpolating. number_of_points (int): number of points to use (over-rides dx) for interpolation (i.e. the length of the interpolated data). dynamic: for dynamic retrieving data from cellpy-file. [NOT IMPLEMNETED YET] Returns: pandas.DataFrame ((cycle) voltage, capacity, (direction (-1, 1))) unless split is explicitly set to True. Then it returns a tuple with capacity (mAh/g) and voltage. """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return # if cycle is not given, then this function should # iterate through cycles if cycle is None: cycle = self.get_cycle_numbers() if not isinstance(cycle, (collections.Iterable,)): cycle = [cycle] if split and not (categorical_column or label_cycle_number): return_dataframe = False else: return_dataframe = True method = method.lower() if method not in ["back-and-forth", "forth", "forth-and-forth"]: warnings.warn(f"method '{method}' is not a valid option " f"- setting to 'back-and-forth'") method = "back-and-forth" capacity = None voltage = None cycle_df = pd.DataFrame() initial = True for current_cycle in cycle: # self.logger.debug(f"processing cycle {current_cycle}") try: cc, cv = self.get_ccap(current_cycle, dataset_number) dc, dv = self.get_dcap(current_cycle, dataset_number) except NullData as e: self.logger.debug(e) self.logger.debug("breaking out of loop") break if cc.empty: self.logger.debug("get_ccap returns empty cc Series") if dc.empty: self.logger.debug("get_ccap returns empty dc Series") if initial: # self.logger.debug("(initial cycle)") prev_end = shift initial = False if self._cycle_mode == "anode": _first_step_c = dc _first_step_v = dv _last_step_c = cc _last_step_v = cv else: _first_step_c = cc _first_step_v = cv _last_step_c = dc _last_step_v = dv if method == "back-and-forth": _last = np.amax(_first_step_c) # should change amax to last point _first = None _new_first = None if _last_step_c is not None: _last_step_c = _last - _last_step_c + prev_end else: self.logger.debug("no last charge step found") if _first_step_c is not None: _first = _first_step_c.iat[0] _first_step_c += prev_end _new_first = _first_step_c.iat[0] else: self.logger.debug("probably empty (_first_step_c is None)") # self.logger.debug(f"current shifts used: prev_end = {prev_end}") # self.logger.debug(f"shifting start from {_first} to " # f"{_new_first}") prev_end = np.amin(_last_step_c) # should change amin to last point elif method == "forth": _last = np.amax(_first_step_c) # should change amax to last point if _last_step_c is not None: _last_step_c += _last + prev_end else: self.logger.debug("no last charge step found") if _first_step_c is not None: _first_step_c += prev_end else: self.logger.debug("no first charge step found") prev_end = np.amax(_last_step_c) # should change amin to last point elif method == "forth-and-forth": if _last_step_c is not None: _last_step_c += shift else: self.logger.debug("no last charge step found") if _first_step_c is not None: _first_step_c += shift else: self.logger.debug("no first charge step found") if return_dataframe: try: _first_df = pd.DataFrame( { "voltage": _first_step_v.values, "capacity": _first_step_c.values } ) if interpolated: _first_df = _interpolate_df_col( _first_df, y="capacity", x="voltage", dx=dx, number_of_points=number_of_points, direction=-1 ) if categorical_column: _first_df["direction"] = -1 _last_df = pd.DataFrame( { "voltage": _last_step_v.values, "capacity": _last_step_c.values } ) if interpolated: _last_df = _interpolate_df_col( _last_df, y="capacity", x="voltage", dx=dx, number_of_points=number_of_points, direction=1 ) if categorical_column: _last_df["direction"] = 1 except AttributeError: self.logger.info(f"could not extract cycle {current_cycle}") else: c = pd.concat([_first_df, _last_df], axis=0) if label_cycle_number: c.insert(0, "cycle", current_cycle) # c["cycle"] = current_cycle # c = c[["cycle", "voltage", "capacity", "direction"]] if cycle_df.empty: cycle_df = c else: cycle_df = pd.concat([cycle_df, c], axis=0) else: logging.warning("returning non-dataframe") c = pd.concat([_first_step_c, _last_step_c], axis=0) v = pd.concat([_first_step_v, _last_step_v], axis=0) capacity = pd.concat([capacity, c], axis=0) voltage = pd.concat([voltage, v], axis=0) if return_dataframe: return cycle_df else: return capacity, voltage
Gets the capacity for the run. For cycle=None: not implemented yet, cycle set to 1. Args: cycle (int): cycle number. method (string): how the curves are given "back-and-forth" - standard back and forth; discharge (or charge) reversed from where charge (or discharge) ends. "forth" - discharge (or charge) continues along x-axis. "forth-and-forth" - discharge (or charge) also starts at 0 (or shift if not shift=0.0) shift: start-value for charge (or discharge) (typically used when plotting shifted-capacity). categorical_column: add a categorical column showing if it is charge or discharge. dataset_number (int): test number (default first) (usually not used). label_cycle_number (bool): add column for cycle number (tidy format). split (bool): return a list of c and v instead of the defualt that is to return them combined in a DataFrame. This is only possible for some specific combinations of options (neither categorical_colum=True or label_cycle_number=True are allowed). interpolated (bool): set to True if you would like to get interpolated data (typically if you want to save disk space or memory). Defaults to False. dx (float): the step used when interpolating. number_of_points (int): number of points to use (over-rides dx) for interpolation (i.e. the length of the interpolated data). dynamic: for dynamic retrieving data from cellpy-file. [NOT IMPLEMNETED YET] Returns: pandas.DataFrame ((cycle) voltage, capacity, (direction (-1, 1))) unless split is explicitly set to True. Then it returns a tuple with capacity (mAh/g) and voltage.
entailment
def get_ocv(self, cycles=None, direction="up", remove_first=False, interpolated=False, dx=None, number_of_points=None): """get the open curcuit voltage relaxation curves. Args: cycles (list of ints or None): the cycles to extract from (selects all if not given). direction ("up", "down", or "both"): extract only relaxations that is performed during discharge for "up" (because then the voltage relaxes upwards) etc. remove_first: remove the first relaxation curve (typically, the first curve is from the initial rest period between assembling the cell to the actual testing/cycling starts) interpolated (bool): set to True if you want the data to be interpolated (e.g. for creating smaller files) dx (float): the step used when interpolating. number_of_points (int): number of points to use (over-rides dx) for interpolation (i.e. the length of the interpolated data). Returns: A pandas.DataFrame with cycle-number, step-number, step-time, and voltage columns. """ if cycles is None: cycles = self.get_cycle_numbers() else: if not isinstance(cycles, (list, tuple)): cycles = [cycles, ] else: remove_first = False ocv_rlx_id = "ocvrlx" if direction == "up": ocv_rlx_id += "_up" elif direction == "down": ocv_rlx_id += "_down" step_table = self.dataset.step_table dfdata = self.dataset.dfdata ocv_steps = step_table.loc[ step_table["cycle"].isin(cycles), : ] ocv_steps = ocv_steps.loc[ ocv_steps.type.str.startswith(ocv_rlx_id), : ] if remove_first: ocv_steps = ocv_steps.iloc[1:, :] step_time_label = self.headers_normal.step_time_txt voltage_label = self.headers_normal.voltage_txt cycle_label = self.headers_normal.cycle_index_txt step_label = self.headers_normal.step_index_txt selected_df = dfdata.where( dfdata[cycle_label].isin(ocv_steps.cycle) & dfdata[step_label].isin(ocv_steps.step) ).dropna() selected_df = selected_df.loc[ :, [cycle_label, step_label, step_time_label, voltage_label] ] if interpolated: if dx is None and number_of_points is None: dx = prms.Reader.time_interpolation_step new_dfs = list() groupby_list = [cycle_label, step_label] for name, group in selected_df.groupby(groupby_list): new_group = _interpolate_df_col( group, x=step_time_label, y=voltage_label, dx=dx, number_of_points=number_of_points, ) for i, j in zip(groupby_list, name): new_group[i] = j new_dfs.append(new_group) selected_df = pd.concat(new_dfs) return selected_df
get the open curcuit voltage relaxation curves. Args: cycles (list of ints or None): the cycles to extract from (selects all if not given). direction ("up", "down", or "both"): extract only relaxations that is performed during discharge for "up" (because then the voltage relaxes upwards) etc. remove_first: remove the first relaxation curve (typically, the first curve is from the initial rest period between assembling the cell to the actual testing/cycling starts) interpolated (bool): set to True if you want the data to be interpolated (e.g. for creating smaller files) dx (float): the step used when interpolating. number_of_points (int): number of points to use (over-rides dx) for interpolation (i.e. the length of the interpolated data). Returns: A pandas.DataFrame with cycle-number, step-number, step-time, and voltage columns.
entailment
def get_ocv_old(self, cycle_number=None, ocv_type='ocv', dataset_number=None): """Find ocv data in DataSet (voltage vs time). Args: cycle_number (int): find for all cycles if None. ocv_type ("ocv", "ocvrlx_up", "ocvrlx_down"): ocv - get up and down (default) ocvrlx_up - get up ocvrlx_down - get down dataset_number (int): test number (default first) (usually not used). Returns: if cycle_number is not None ocv or [ocv_up, ocv_down] ocv (and ocv_up and ocv_down) are list containg [time,voltage] (that are Series) if cycle_number is None [ocv1,ocv2,...ocvN,...] N = cycle ocvN = pandas DataFrame containing the columns cycle inded, step time, step index, data point, datetime, voltage (TODO: check if copy or reference of dfdata is returned) """ # function for getting ocv curves dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return if ocv_type in ['ocvrlx_up', 'ocvrlx_down']: ocv = self._get_ocv(dataset_number=None, ocv_type=ocv_type, select_last=True, select_columns=True, cycle_number=cycle_number, ) return ocv else: ocv_up = self._get_ocv(dataset_number=None, ocv_type='ocvrlx_up', select_last=True, select_columns=True, cycle_number=cycle_number, ) ocv_down = self._get_ocv(dataset_number=None, ocv_type='ocvrlx_down', select_last=True, select_columns=True, cycle_number=cycle_number, ) return ocv_up, ocv_down
Find ocv data in DataSet (voltage vs time). Args: cycle_number (int): find for all cycles if None. ocv_type ("ocv", "ocvrlx_up", "ocvrlx_down"): ocv - get up and down (default) ocvrlx_up - get up ocvrlx_down - get down dataset_number (int): test number (default first) (usually not used). Returns: if cycle_number is not None ocv or [ocv_up, ocv_down] ocv (and ocv_up and ocv_down) are list containg [time,voltage] (that are Series) if cycle_number is None [ocv1,ocv2,...ocvN,...] N = cycle ocvN = pandas DataFrame containing the columns cycle inded, step time, step index, data point, datetime, voltage (TODO: check if copy or reference of dfdata is returned)
entailment
def get_number_of_cycles(self, dataset_number=None, steptable=None): """Get the number of cycles in the test.""" if steptable is None: dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return d = self.datasets[dataset_number].dfdata no_cycles = np.amax(d[self.headers_normal.cycle_index_txt]) else: no_cycles = np.amax(steptable[self.headers_step_table.cycle]) return no_cycles
Get the number of cycles in the test.
entailment
def get_cycle_numbers(self, dataset_number=None, steptable=None): """Get a list containing all the cycle numbers in the test.""" if steptable is None: dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return d = self.datasets[dataset_number].dfdata cycles = np.unique(d[self.headers_normal.cycle_index_txt]) else: cycles = np.unique(steptable[self.headers_step_table.cycle]) return cycles
Get a list containing all the cycle numbers in the test.
entailment
def get_converter_to_specific(self, dataset=None, mass=None, to_unit=None, from_unit=None): """get the convertion values Args: dataset: DataSet object mass: mass of electrode (for example active material in mg) to_unit: (float) unit of input, f.ex. if unit of charge is mAh and unit of mass is g, then to_unit for charge/mass will be 0.001 / 1.0 = 0.001 from_unit: float) unit of output, f.ex. if unit of charge is mAh and unit of mass is g, then to_unit for charge/mass will be 1.0 / 0.001 = 1000.0 Returns: multiplier (float) from_unit/to_unit * mass """ if not dataset: dataset_number = self._validate_dataset_number(None) if dataset_number is None: self._report_empty_dataset() return dataset = self.datasets[dataset_number] if not mass: mass = dataset.mass if not to_unit: to_unit_cap = self.cellpy_units["charge"] to_unit_mass = self.cellpy_units["specific"] to_unit = to_unit_cap / to_unit_mass if not from_unit: from_unit_cap = self.raw_units["charge"] from_unit_mass = self.raw_units["mass"] from_unit = from_unit_cap / from_unit_mass return from_unit / to_unit / mass
get the convertion values Args: dataset: DataSet object mass: mass of electrode (for example active material in mg) to_unit: (float) unit of input, f.ex. if unit of charge is mAh and unit of mass is g, then to_unit for charge/mass will be 0.001 / 1.0 = 0.001 from_unit: float) unit of output, f.ex. if unit of charge is mAh and unit of mass is g, then to_unit for charge/mass will be 1.0 / 0.001 = 1000.0 Returns: multiplier (float) from_unit/to_unit * mass
entailment
def set_mass(self, masses, dataset_number=None, validated=None): """Sets the mass (masses) for the test (datasets). """ self._set_run_attribute("mass", masses, dataset_number=dataset_number, validated=validated)
Sets the mass (masses) for the test (datasets).
entailment
def set_tot_mass(self, masses, dataset_number=None, validated=None): """Sets the mass (masses) for the test (datasets). """ self._set_run_attribute("tot_mass", masses, dataset_number=dataset_number, validated=validated)
Sets the mass (masses) for the test (datasets).
entailment
def set_nom_cap(self, nom_caps, dataset_number=None, validated=None): """Sets the mass (masses) for the test (datasets). """ self._set_run_attribute("nom_cap", nom_caps, dataset_number=dataset_number, validated=validated)
Sets the mass (masses) for the test (datasets).
entailment
def set_col_first(df, col_names): """set selected columns first in a pandas.DataFrame. This function sets cols with names given in col_names (a list) first in the DataFrame. The last col in col_name will come first (processed last) """ column_headings = df.columns column_headings = column_headings.tolist() try: for col_name in col_names: i = column_headings.index(col_name) column_headings.pop(column_headings.index(col_name)) column_headings.insert(0, col_name) finally: df = df.reindex(columns=column_headings) return df
set selected columns first in a pandas.DataFrame. This function sets cols with names given in col_names (a list) first in the DataFrame. The last col in col_name will come first (processed last)
entailment
def get_summary(self, dataset_number=None, use_dfsummary_made=False): """Retrieve summary returned as a pandas DataFrame.""" dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return None test = self.get_dataset(dataset_number) # This is a bit convoluted; in the old days, we used an attribute # called dfsummary_made, # that was set to True when the summary was made successfully. # It is most likely never # used anymore. And will most probably be deleted. if use_dfsummary_made: dfsummary_made = test.dfsummary_made else: dfsummary_made = True if not dfsummary_made: warnings.warn("Summary is not made yet") return None else: self.logger.info("returning datasets[test_no].dfsummary") return test.dfsummary
Retrieve summary returned as a pandas DataFrame.
entailment
def make_summary(self, find_ocv=False, find_ir=False, find_end_voltage=False, use_cellpy_stat_file=None, all_tests=True, dataset_number=0, ensure_step_table=True, convert_date=False): """Convenience function that makes a summary of the cycling data.""" # first - check if we need some "instrument-specific" prms if self.tester == "arbin": convert_date = True if ensure_step_table is None: ensure_step_table = self.ensure_step_table # Cycle_Index Test_Time(s) Test_Time(h) Date_Time Current(A) # Current(mA) Voltage(V) Charge_Capacity(Ah) Discharge_Capacity(Ah) # Charge_Energy(Wh) Discharge_Energy(Wh) Internal_Resistance(Ohm) # AC_Impedance(Ohm) ACI_Phase_Angle(Deg) Charge_Time(s) # DisCharge_Time(s) Vmax_On_Cycle(V) Coulombic_Efficiency if use_cellpy_stat_file is None: use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file self.logger.debug("using use_cellpy_stat_file from prms") self.logger.debug(f"use_cellpy_stat_file: {use_cellpy_stat_file}") if all_tests is True: for j in range(len(self.datasets)): txt = "creating summary for file " test = self.datasets[j] if not self._is_not_empty_dataset(test): self.logger.info("empty test %i" % j) return if isinstance(test.loaded_from, (list, tuple)): for f in test.loaded_from: txt += f txt += "\n" else: txt += str(test.loaded_from) if not test.mass_given: txt += " mass for test %i is not given" % j txt += " setting it to %f mg" % test.mass self.logger.debug(txt) self._make_summary(j, find_ocv=find_ocv, find_ir=find_ir, find_end_voltage=find_end_voltage, use_cellpy_stat_file=use_cellpy_stat_file, ensure_step_table=ensure_step_table, convert_date=convert_date, ) else: self.logger.debug("creating summary for only one test") dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return self._make_summary(dataset_number, find_ocv=find_ocv, find_ir=find_ir, find_end_voltage=find_end_voltage, use_cellpy_stat_file=use_cellpy_stat_file, ensure_step_table=ensure_step_table, convert_date=convert_date, ) return self
Convenience function that makes a summary of the cycling data.
entailment
def single_html(epub_file_path, html_out=sys.stdout, mathjax_version=None, numchapters=None, includes=None): """Generate complete book HTML.""" epub = cnxepub.EPUB.from_file(epub_file_path) if len(epub) != 1: raise Exception('Expecting an epub with one book') package = epub[0] binder = cnxepub.adapt_package(package) partcount.update({}.fromkeys(parts, 0)) partcount['book'] += 1 html = cnxepub.SingleHTMLFormatter(binder, includes=includes) # Truncate binder to the first N chapters where N = numchapters. logger.debug('Full binder: {}'.format(cnxepub.model_to_tree(binder))) if numchapters is not None: apply_numchapters(html.get_node_type, binder, numchapters) logger.debug('Truncated Binder: {}'.format( cnxepub.model_to_tree(binder))) # Add mathjax to the page. if mathjax_version: etree.SubElement( html.head, 'script', src=MATHJAX_URL.format(mathjax_version=mathjax_version)) print(str(html), file=html_out) if hasattr(html_out, 'name'): # html_out is a file, close after writing html_out.close()
Generate complete book HTML.
entailment
def _pack_prms(): """if you introduce new 'save-able' parameter dictionaries, then you have to include them here""" config_dict = { "Paths": prms.Paths.to_dict(), "FileNames": prms.FileNames.to_dict(), "Db": prms.Db.to_dict(), "DbCols": prms.DbCols.to_dict(), "DataSet": prms.DataSet.to_dict(), "Reader": prms.Reader.to_dict(), "Instruments": prms.Instruments.to_dict(), # "excel_db_cols": prms.excel_db_cols.to_dict(), # "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(), "Batch": prms.Batch.to_dict(), } return config_dict
if you introduce new 'save-able' parameter dictionaries, then you have to include them here
entailment
def _read_prm_file(prm_filename): """read the prm file""" logger.debug("Reading config-file: %s" % prm_filename) try: with open(prm_filename, "r") as config_file: prm_dict = yaml.load(config_file) except yaml.YAMLError: raise ConfigFileNotRead else: _update_prms(prm_dict)
read the prm file
entailment
def _get_prm_file(file_name=None, search_order=None): """returns name of the prm file""" if file_name is not None: if os.path.isfile(file_name): return file_name else: logger.info("Could not find the prm-file") default_name = prms._prm_default_name prm_globtxt = prms._prm_globtxt script_dir = os.path.abspath(os.path.dirname(__file__)) search_path = dict() search_path["curdir"] = os.path.abspath(os.path.dirname(sys.argv[0])) search_path["filedir"] = script_dir search_path["userdir"] = os.path.expanduser("~") if search_order is None: search_order = ["userdir", ] # ["curdir","filedir", "userdir",] else: search_order = search_order # The default name for the prm file is at the moment in the script-dir,@ # while default searching is in the userdir (yes, I know): prm_default = os.path.join(script_dir, default_name) # -searching----------------------- search_dict = OrderedDict() for key in search_order: search_dict[key] = [None, None] prm_directory = search_path[key] default_file = os.path.join(prm_directory, default_name) if os.path.isfile(default_file): # noinspection PyTypeChecker search_dict[key][0] = default_file prm_globtxt_full = os.path.join(prm_directory, prm_globtxt) user_files = glob.glob(prm_globtxt_full) for f in user_files: if os.path.basename(f) != os.path.basename(default_file): search_dict[key][1] = f break # -selecting---------------------- prm_file = None for key, file_list in search_dict.items(): if file_list[-1]: prm_file = file_list[-1] break else: if not prm_file: prm_file = file_list[0] if prm_file: prm_filename = prm_file else: prm_filename = prm_default return prm_filename
returns name of the prm file
entailment
def info(): """this function will show only the 'box'-type attributes and their content in the cellpy.prms module""" print("convenience function for listing prms") print(type(prms)) print(prms.__name__) print(f"prm file: {_get_prm_file()}") for key in prms.__dict__: if isinstance(prms.__dict__[key], box.Box): print() print(80 * "=") print(f"prms.{key}:") print(80 * "-") for subkey in prms.__dict__[key]: print( f"prms.{key}.{subkey} = ", f"{prms.__dict__[key][subkey]}" ) print(80 * "=")
this function will show only the 'box'-type attributes and their content in the cellpy.prms module
entailment
def _replace_tex_math(node, mml_url, mc_client=None, retry=0): """call mml-api service to replace TeX math in body of node with mathml""" math = node.attrib['data-math'] or node.text if math is None: return None eq = {} if mc_client: math_key = hashlib.md5(math.encode('utf-8')).hexdigest() eq = json.loads(mc_client.get(math_key) or '{}') if not eq: res = requests.post(mml_url, {'math': math.encode('utf-8'), 'mathType': 'TeX', 'mml': 'true'}) if res: # Non-error response from requests eq = res.json() if mc_client: mc_client.set(math_key, res.text) if 'components' in eq and len(eq['components']) > 0: for component in eq['components']: if component['format'] == 'mml': mml = etree.fromstring(component['source']) if node.tag.endswith('span'): mml.set('display', 'inline') elif node.tag.endswith('div'): mml.set('display', 'block') mml.tail = node.tail return mml else: logger.warning('Retrying math TeX conversion: ' '{}'.format(json.dumps(eq, indent=4))) retry += 1 if retry < 2: return _replace_tex_math(node, mml_url, mc_client, retry) return None
call mml-api service to replace TeX math in body of node with mathml
entailment
def exercise_callback_factory(match, url_template, mc_client=None, token=None, mml_url=None): """Create a callback function to replace an exercise by fetching from a server.""" def _replace_exercises(elem): item_code = elem.get('href')[len(match):] url = url_template.format(itemCode=item_code) exercise = {} if mc_client: mc_key = item_code + (token or '') exercise = json.loads(mc_client.get(mc_key) or '{}') if not exercise: if token: headers = {'Authorization': 'Bearer {}'.format(token)} res = requests.get(url, headers=headers) else: res = requests.get(url) if res: # grab the json exercise, run it through Jinja2 template, # replace element w/ it exercise = res.json() if mc_client: mc_client.set(mc_key, res.text) if exercise['total_count'] == 0: logger.warning('MISSING EXERCISE: {}'.format(url)) XHTML = '{{{}}}'.format(HTML_DOCUMENT_NAMESPACES['xhtml']) missing = etree.Element(XHTML + 'div', {'class': 'missing-exercise'}, nsmap=HTML_DOCUMENT_NAMESPACES) missing.text = 'MISSING EXERCISE: tag:{}'.format(item_code) nodes = [missing] else: html = EXERCISE_TEMPLATE.render(data=exercise) try: nodes = etree.fromstring('<div>{}</div>'.format(html)) except etree.XMLSyntaxError: # Probably HTML nodes = etree.HTML(html)[0] # body node if mml_url: for node in nodes.xpath('//*[@data-math]'): mathml = _replace_tex_math(node, mml_url, mc_client) if mathml is not None: mparent = node.getparent() mparent.replace(node, mathml) else: mathtext = node.get('data-math') or node.text or '' logger.warning('BAD TEX CONVERSION: "%s" URL: %s' % (mathtext.encode('utf-8'), url)) parent = elem.getparent() if etree.QName(parent.tag).localname == 'p': elem = parent parent = elem.getparent() parent.remove(elem) # Special case - assumes single wrapper elem for child in nodes: parent.append(child) xpath = '//xhtml:a[contains(@href, "{}")]'.format(match) return (xpath, _replace_exercises)
Create a callback function to replace an exercise by fetching from a server.
entailment
def html_listify(tree, root_xl_element, extensions, list_type='ol'): """Convert a node tree into an xhtml nested list-of-lists. This will create 'li' elements under the root_xl_element, additional sublists of the type passed as list_type. The contents of each li depends on the extensions dictonary: the keys of this dictionary are the ids of tree elements that are repesented by files in the epub, with associated filename extensions as the value. Those nodes will be rendered as links to the reassembled filename: i.e. id='abc-2345-54e4' {'abc-2345-54e4': 'xhtml'} -> abc-2345-54e4.xhtml Other nodes will render as spans. If the node has id or short id values, the associated li will be populated with cnx-archive-uri and cnx-archive-shortid attributes, respectively""" for node in tree: li_elm = etree.SubElement(root_xl_element, 'li') if node['id'] not in extensions: # no extension, no associated file span_elm = lxml.html.fragment_fromstring( node['title'], create_parent='span') li_elm.append(span_elm) else: a_elm = lxml.html.fragment_fromstring( node['title'], create_parent='a') a_elm.set('href', ''.join([node['id'], extensions[node['id']]])) li_elm.append(a_elm) if node['id'] is not None and node['id'] != 'subcol': li_elm.set('cnx-archive-uri', node['id']) if node['shortId'] is not None: li_elm.set('cnx-archive-shortid', node['shortId']) if 'contents' in node: elm = etree.SubElement(li_elm, list_type) html_listify(node['contents'], elm, extensions)
Convert a node tree into an xhtml nested list-of-lists. This will create 'li' elements under the root_xl_element, additional sublists of the type passed as list_type. The contents of each li depends on the extensions dictonary: the keys of this dictionary are the ids of tree elements that are repesented by files in the epub, with associated filename extensions as the value. Those nodes will be rendered as links to the reassembled filename: i.e. id='abc-2345-54e4' {'abc-2345-54e4': 'xhtml'} -> abc-2345-54e4.xhtml Other nodes will render as spans. If the node has id or short id values, the associated li will be populated with cnx-archive-uri and cnx-archive-shortid attributes, respectively
entailment
def _generate_ids(self, document, content): """Generate unique ids for html elements in page content so that it's possible to link to them. """ existing_ids = content.xpath('//*/@id') elements = [ 'p', 'dl', 'dt', 'dd', 'table', 'div', 'section', 'figure', 'blockquote', 'q', 'code', 'pre', 'object', 'img', 'audio', 'video', ] elements_xpath = '|'.join(['.//{}|.//xhtml:{}'.format(elem, elem) for elem in elements]) data_types = [ 'equation', 'list', 'exercise', 'rule', 'example', 'note', 'footnote-number', 'footnote-ref', 'problem', 'solution', 'media', 'proof', 'statement', 'commentary' ] data_types_xpath = '|'.join(['.//*[@data-type="{}"]'.format(data_type) for data_type in data_types]) xpath = '|'.join([elements_xpath, data_types_xpath]) mapping = {} # old id -> new id for node in content.xpath(xpath, namespaces=HTML_DOCUMENT_NAMESPACES): old_id = node.attrib.get('id') document_id = document.id.replace('_', '') if old_id: new_id = 'auto_{}_{}'.format(document_id, old_id) else: random_number = random.randint(0, 100000) new_id = 'auto_{}_{}'.format(document_id, random_number) while new_id in existing_ids: random_number = random.randint(0, 100000) new_id = 'auto_{}_{}'.format(document_id, random_number) node.attrib['id'] = new_id if old_id: mapping[old_id] = new_id existing_ids.append(new_id) for a in content.xpath('//a[@href]|//xhtml:a[@href]', namespaces=HTML_DOCUMENT_NAMESPACES): href = a.attrib['href'] if href.startswith('#') and href[1:] in mapping: a.attrib['href'] = '#{}'.format(mapping[href[1:]])
Generate unique ids for html elements in page content so that it's possible to link to them.
entailment
def get_node_type(self, node, parent=None): """If node is a document, the type is page. If node is a binder with no parent, the type is book. If node is a translucent binder, the type is either chapters (only contain pages) or unit (contains at least one translucent binder). """ if isinstance(node, CompositeDocument): return 'composite-page' elif isinstance(node, (Document, DocumentPointer)): return 'page' elif isinstance(node, Binder) and parent is None: return 'book' for child in node: if isinstance(child, TranslucentBinder): return 'unit' return 'chapter'
If node is a document, the type is page. If node is a binder with no parent, the type is book. If node is a translucent binder, the type is either chapters (only contain pages) or unit (contains at least one translucent binder).
entailment
def pack_epub(directory, file): """Pack the given ``directory`` into an epub (i.e. zip) archive given as ``file``, which can be a file-path or file-like object. """ with zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED) as zippy: base_path = os.path.abspath(directory) for root, dirs, filenames in os.walk(directory): # Strip the absolute path archive_path = os.path.relpath(root, base_path) for filename in filenames: filepath = os.path.join(root, filename) archival_filepath = os.path.join(archive_path, filename) zippy.write(filepath, archival_filepath)
Pack the given ``directory`` into an epub (i.e. zip) archive given as ``file``, which can be a file-path or file-like object.
entailment
def unpack_epub(file, directory): """Unpack the given ``file`` (a file-path or file-like object) to the given ``directory``. """ if zipfile.is_zipfile(file): # Extract the epub to the current working directory. with zipfile.ZipFile(file, 'r') as zf: zf.extractall(path=directory)
Unpack the given ``file`` (a file-path or file-like object) to the given ``directory``.
entailment
def from_file(cls, file): """Create the object from a *file* or *file-like object*. The file can point to an ``.epub`` file or a directory (the contents of which reflect the internal struture of an ``.epub`` archive). If given an non-archive file, this structure will be used when reading in and parsing the epub. If an archive file is given, it will be extracted to the temporal filesystem. """ root = None if zipfile.is_zipfile(file): unpack_dir = tempfile.mkdtemp('-epub') # Extract the epub to the current working directory. with zipfile.ZipFile(file, 'r') as zf: zf.extractall(path=unpack_dir) root = unpack_dir elif os.path.isdir(file): root = file else: raise TypeError("Can't decipher what should be done " "with the given file.") # NOTE We ignore the mimetype file, as it's not extremely important # to anything done here. # Build a blank epub object then parse the packages. container_xml_filepath = os.path.join(root, EPUB_CONTAINER_XML_RELATIVE_PATH) container_xml = etree.parse(container_xml_filepath) packages = [] for pkg_filepath in container_xml.xpath( '//ns:rootfile/@full-path', namespaces=EPUB_CONTAINER_XML_NAMESPACES): filepath = os.path.join(root, pkg_filepath) packages.append(Package.from_file(filepath)) return cls(packages=packages, root=root)
Create the object from a *file* or *file-like object*. The file can point to an ``.epub`` file or a directory (the contents of which reflect the internal struture of an ``.epub`` archive). If given an non-archive file, this structure will be used when reading in and parsing the epub. If an archive file is given, it will be extracted to the temporal filesystem.
entailment
def to_file(epub, file): """Export to ``file``, which is a *file* or *file-like object*.""" directory = tempfile.mkdtemp('-epub') # Write out the contents to the filesystem. package_filenames = [] for package in epub: opf_filepath = Package.to_file(package, directory) opf_filename = os.path.basename(opf_filepath) package_filenames.append(opf_filename) # Create the container.xml container_xml_filepath = os.path.join(directory, EPUB_CONTAINER_XML_RELATIVE_PATH) template = jinja2.Template(CONTAINER_XML_TEMPLATE, trim_blocks=True, lstrip_blocks=True) os.makedirs(os.path.dirname(container_xml_filepath)) # FIXME PY3 with open(container_xml_filepath, 'w') as fb: xml = template.render(package_filenames=package_filenames) fb.write(xml) # Write the mimetype file. with open(os.path.join(directory, 'mimetype'), 'w') as fb: fb.write("application/epub+zip") # Pack everything up pack_epub(directory, file=file)
Export to ``file``, which is a *file* or *file-like object*.
entailment
def from_file(cls, file): """Create the object from a *file* or *file-like object*.""" opf_xml = etree.parse(file) # Check if ``file`` is file-like. if hasattr(file, 'read'): name = os.path.basename(file.name) root = os.path.abspath(os.path.dirname(file.name)) else: # ...a filepath name = os.path.basename(file) root = os.path.abspath(os.path.dirname(file)) parser = OPFParser(opf_xml) # Roll through the item entries manifest = opf_xml.xpath('/opf:package/opf:manifest/opf:item', namespaces=EPUB_OPF_NAMESPACES) pkg_items = [] for item in manifest: absolute_filepath = os.path.join(root, item.get('href')) properties = item.get('properties', '').split() is_navigation = 'nav' in properties media_type = item.get('media-type') pkg_items.append(Item.from_file(absolute_filepath, media_type=media_type, is_navigation=is_navigation, properties=properties)) # Ignore spine ordering, because it is not important # for our use cases. return cls(name, pkg_items, parser.metadata)
Create the object from a *file* or *file-like object*.
entailment
def to_file(package, directory): """Write the package to the given ``directory``. Returns the OPF filename. """ opf_filepath = os.path.join(directory, package.name) # Create the directory structure for name in ('contents', 'resources',): path = os.path.join(directory, name) if not os.path.exists(path): os.mkdir(path) # Write the items to the filesystem locations = {} # Used when rendering for item in package: if item.media_type == 'application/xhtml+xml': base = os.path.join(directory, 'contents') else: base = os.path.join(directory, 'resources') filename = item.name filepath = os.path.join(base, filename) locations[item] = os.path.relpath(filepath, directory) with open(filepath, 'wb') as item_file: item_file.write(item.data.read()) # Write the OPF template = jinja2.Template(OPF_TEMPLATE, trim_blocks=True, lstrip_blocks=True) with open(opf_filepath, 'wb') as opf_file: opf = template.render(package=package, locations=locations) if not isinstance(opf, bytes): opf = opf.encode('utf-8') opf_file.write(opf) return opf_filepath
Write the package to the given ``directory``. Returns the OPF filename.
entailment
def from_file(self, file_name=None): """Loads a DataFrame with all the needed info about the experiment""" file_name = self._check_file_name(file_name) with open(file_name, 'r') as infile: top_level_dict = json.load(infile) pages_dict = top_level_dict['info_df'] pages = pd.DataFrame(pages_dict) self.pages = pages self.file_name = file_name self._prm_packer(top_level_dict['metadata']) self.generate_folder_names() self.paginate()
Loads a DataFrame with all the needed info about the experiment
entailment
def to_file(self, file_name=None): """Saves a DataFrame with all the needed info about the experiment""" file_name = self._check_file_name(file_name) pages = self.pages top_level_dict = { 'info_df': pages, 'metadata': self._prm_packer() } jason_string = json.dumps( top_level_dict, default=lambda info_df: json.loads( info_df.to_json() ) ) self.paginate() with open(file_name, 'w') as outfile: outfile.write(jason_string) self.file_name = file_name logging.info("Saved file to {}".format(file_name))
Saves a DataFrame with all the needed info about the experiment
entailment
def generate_folder_names(self): """Set appropriate folder names.""" self.project_dir = os.path.join(prms.Paths.outdatadir, self.project) self.batch_dir = os.path.join(self.project_dir, self.name) self.raw_dir = os.path.join(self.batch_dir, "raw_data")
Set appropriate folder names.
entailment
def paginate(self): """Make folders where we would like to put results etc.""" project_dir = self.project_dir raw_dir = self.raw_dir batch_dir = self.batch_dir if project_dir is None: raise UnderDefined("no project directory defined") if raw_dir is None: raise UnderDefined("no raw directory defined") if batch_dir is None: raise UnderDefined("no batcb directory defined") # create the folders if not os.path.isdir(project_dir): os.mkdir(project_dir) logging.info(f"created folder {project_dir}") if not os.path.isdir(batch_dir): os.mkdir(batch_dir) logging.info(f"created folder {batch_dir}") if not os.path.isdir(raw_dir): os.mkdir(raw_dir) logging.info(f"created folder {raw_dir}") return project_dir, batch_dir, raw_dir
Make folders where we would like to put results etc.
entailment
def generate_file_name(self): """generate a suitable file name for the experiment""" if not self.project: raise UnderDefined("project name not given") out_data_dir = prms.Paths.outdatadir project_dir = os.path.join(out_data_dir, self.project) file_name = "cellpy_batch_%s.json" % self.name self.file_name = os.path.join(project_dir, file_name)
generate a suitable file name for the experiment
entailment
def info(self): """Delivers some info to you about the class.""" print("Sorry, but I don't have much to share.") print("This is me:") print(self) print("And these are the experiments assigned to me:") print(self.experiments)
Delivers some info to you about the class.
entailment
def assign(self, experiment): """Assign an experiment.""" self.experiments.append(experiment) self.farms.append(empty_farm)
Assign an experiment.
entailment
def model_to_tree(model, title=None, lucent_id=TRANSLUCENT_BINDER_ID): """Given an model, build the tree:: {'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]} """ id = model.ident_hash if id is None and isinstance(model, TranslucentBinder): id = lucent_id md = model.metadata shortid = md.get('shortId', md.get('cnx-archive-shortid')) title = title is not None and title or md.get('title') tree = {'id': id, 'title': title, 'shortId': shortid} if hasattr(model, '__iter__'): contents = tree['contents'] = [] for node in model: item = model_to_tree(node, model.get_title_for_node(node), lucent_id=lucent_id) contents.append(item) return tree
Given an model, build the tree:: {'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
entailment
def flatten_tree_to_ident_hashes(item_or_tree, lucent_id=TRANSLUCENT_BINDER_ID): """Flatten a tree to id and version values (ident_hash).""" if 'contents' in item_or_tree: tree = item_or_tree if tree['id'] != lucent_id: yield tree['id'] for i in tree['contents']: # yield from flatten_tree_to_ident_hashs(i, lucent_id) for x in flatten_tree_to_ident_hashes(i, lucent_id): yield x else: item = item_or_tree yield item['id']
Flatten a tree to id and version values (ident_hash).
entailment
def flatten_model(model): """Flatten a model to a list of models. This is used to flatten a ``Binder``'ish model down to a list of contained models. """ yield model if isinstance(model, (TranslucentBinder, Binder,)): for m in model: # yield from flatten_model(m) for x in flatten_model(m): yield x
Flatten a model to a list of models. This is used to flatten a ``Binder``'ish model down to a list of contained models.
entailment
def flatten_to_documents(model, include_pointers=False): """Flatten the model to a list of documents (aka ``Document`` objects). This is to flatten a ``Binder``'ish model down to a list of documents. If ``include_pointers`` has been set to ``True``, ``DocumentPointers`` will also be included in the results. """ types = [Document] if include_pointers: types.append(DocumentPointer) types = tuple(types) def _filter(m): return isinstance(m, types) return flatten_to(model, _filter)
Flatten the model to a list of documents (aka ``Document`` objects). This is to flatten a ``Binder``'ish model down to a list of documents. If ``include_pointers`` has been set to ``True``, ``DocumentPointers`` will also be included in the results.
entailment
def _discover_uri_type(uri): """Given a ``uri``, determine if it is internal or external.""" parsed_uri = urlparse(uri) if not parsed_uri.netloc: if parsed_uri.scheme == 'data': type_ = INLINE_REFERENCE_TYPE else: type_ = INTERNAL_REFERENCE_TYPE else: type_ = EXTERNAL_REFERENCE_TYPE return type_
Given a ``uri``, determine if it is internal or external.
entailment
def _parse_references(xml): """Parse the references to ``Reference`` instances.""" references = [] ref_finder = HTMLReferenceFinder(xml) for elm, uri_attr in ref_finder: type_ = _discover_uri_type(elm.get(uri_attr)) references.append(Reference(elm, type_, uri_attr)) return references
Parse the references to ``Reference`` instances.
entailment
def _set_uri_from_bound_model(self): """Using the bound model, set the uri.""" value = self._uri_template.format(self._bound_model.id) self.elm.set(self._uri_attr, value)
Using the bound model, set the uri.
entailment
def bind(self, model, template="{}"): """Bind the ``model`` to the reference. This uses the model's ``id`` attribute and the given ``template`` to dynamically produce a uri when accessed. """ self._bound_model = model self._uri_template = template self._set_uri_from_bound_model()
Bind the ``model`` to the reference. This uses the model's ``id`` attribute and the given ``template`` to dynamically produce a uri when accessed.
entailment
def index_bounds(x): """returns tuple with first and last item""" if isinstance(x, (pd.DataFrame, pd.Series)): return x.iloc[0], x.iloc[-1] else: return x[0], x[-1]
returns tuple with first and last item
entailment
def dqdv_cycle(cycle, splitter=True, **kwargs): """Convenience functions for creating dq-dv data from given capacity and voltage cycle. Returns the a DataFrame with a 'voltage' and a 'incremental_capacity' column. Args: cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity', 'direction' (1 or -1)). splitter (bool): insert a np.NaN row between charge and discharge. Returns: List of step numbers corresponding to the selected steptype. Returns a pandas.DataFrame instead of a list if pdtype is set to True. Example: >>> cycle_df = my_data.get_cap( >>> ... 1, >>> ... categorical_column=True, >>> ... method = "forth-and-forth" >>> ... ) >>> voltage, incremental = ica.dqdv_cycle(cycle_df) """ c_first = cycle.loc[cycle["direction"] == -1] c_last = cycle.loc[cycle["direction"] == 1] converter = Converter(**kwargs) converter.set_data(c_first["capacity"], c_first["voltage"]) converter.inspect_data() converter.pre_process_data() converter.increment_data() converter.post_process_data() voltage_first = converter.voltage_processed incremental_capacity_first = converter.incremental_capacity if splitter: voltage_first = np.append(voltage_first, np.NaN) incremental_capacity_first = np.append(incremental_capacity_first, np.NaN) converter = Converter(**kwargs) converter.set_data(c_last["capacity"], c_last["voltage"]) converter.inspect_data() converter.pre_process_data() converter.increment_data() converter.post_process_data() voltage_last = converter.voltage_processed[::-1] incremental_capacity_last = converter.incremental_capacity[::-1] voltage = np.concatenate((voltage_first, voltage_last)) incremental_capacity = np.concatenate((incremental_capacity_first, incremental_capacity_last)) return voltage, incremental_capacity
Convenience functions for creating dq-dv data from given capacity and voltage cycle. Returns the a DataFrame with a 'voltage' and a 'incremental_capacity' column. Args: cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity', 'direction' (1 or -1)). splitter (bool): insert a np.NaN row between charge and discharge. Returns: List of step numbers corresponding to the selected steptype. Returns a pandas.DataFrame instead of a list if pdtype is set to True. Example: >>> cycle_df = my_data.get_cap( >>> ... 1, >>> ... categorical_column=True, >>> ... method = "forth-and-forth" >>> ... ) >>> voltage, incremental = ica.dqdv_cycle(cycle_df)
entailment
def dqdv_cycles(cycles, **kwargs): """Convenience functions for creating dq-dv data from given capacity and voltage cycles. Returns a DataFrame with a 'voltage' and a 'incremental_capacity' column. Args: cycles (pandas.DataFrame): the cycle data ('cycle', 'voltage', 'capacity', 'direction' (1 or -1)). Returns: pandas.DataFrame with columns 'cycle', 'voltage', 'dq'. Example: >>> cycles_df = my_data.get_cap( >>> ... categorical_column=True, >>> ... method = "forth-and-forth", >>> ... label_cycle_number=True, >>> ... ) >>> ica_df = ica.dqdv_cycles(cycles_df) """ # TODO: should add option for normalising based on first cycle capacity # this is e.g. done by first finding the first cycle capacity (nom_cap) # (or use nominal capacity given as input) and then propagating this to # Converter using the key-word arguments # normalize=True, normalization_factor=1.0, normalization_roof=nom_cap ica_dfs = list() cycle_group = cycles.groupby("cycle") for cycle_number, cycle in cycle_group: v, dq = dqdv_cycle(cycle, splitter=True, **kwargs) _ica_df = pd.DataFrame( { "voltage": v, "dq": dq, } ) _ica_df["cycle"] = cycle_number _ica_df = _ica_df[['cycle', 'voltage', 'dq']] ica_dfs.append(_ica_df) ica_df = pd.concat(ica_dfs) return ica_df
Convenience functions for creating dq-dv data from given capacity and voltage cycles. Returns a DataFrame with a 'voltage' and a 'incremental_capacity' column. Args: cycles (pandas.DataFrame): the cycle data ('cycle', 'voltage', 'capacity', 'direction' (1 or -1)). Returns: pandas.DataFrame with columns 'cycle', 'voltage', 'dq'. Example: >>> cycles_df = my_data.get_cap( >>> ... categorical_column=True, >>> ... method = "forth-and-forth", >>> ... label_cycle_number=True, >>> ... ) >>> ica_df = ica.dqdv_cycles(cycles_df)
entailment
def dqdv(voltage, capacity, voltage_resolution=None, capacity_resolution=None, voltage_fwhm=0.01, pre_smoothing=True, diff_smoothing=False, post_smoothing=True, post_normalization=True, interpolation_method=None, gaussian_order=None, gaussian_mode=None, gaussian_cval=None, gaussian_truncate=None, points_pr_split=None, savgol_filter_window_divisor_default=None, savgol_filter_window_order=None, max_points=None, **kwargs): """Convenience functions for creating dq-dv data from given capacity and voltage data. Args: voltage: nd.array or pd.Series capacity: nd.array or pd.Series voltage_resolution: used for interpolating voltage data (e.g. 0.005) capacity_resolution: used for interpolating capacity data voltage_fwhm: used for setting the post-processing gaussian sigma pre_smoothing: set to True for pre-smoothing (window) diff_smoothing: set to True for smoothing during differentiation (window) post_smoothing: set to True for post-smoothing (gaussian) post_normalization: set to True for normalising to capacity interpolation_method: scipy interpolation method gaussian_order: int gaussian_mode: mode gaussian_cval: gaussian_truncate: points_pr_split: only used when investigating data using splits savgol_filter_window_divisor_default: used for window smoothing savgol_filter_window_order: used for window smoothing max_points: restricting to max points in vector (capacity-selected) Returns: voltage, dqdv Notes: PEC data (Helge) pre_smoothing = False diff_smoothing = False pos_smoothing = False voltage_resolution = 0.005 PEC data (Preben) ... Arbin data (IFE) ... """ converter = Converter(**kwargs) logging.debug("dqdv - starting") logging.debug("dqdv - created Converter obj") converter.pre_smoothing = pre_smoothing converter.post_smoothing = post_smoothing converter.smoothing = diff_smoothing converter.normalize = post_normalization converter.voltage_fwhm = voltage_fwhm logging.debug(f"converter.pre_smoothing: {converter.pre_smoothing}") logging.debug(f"converter.post_smoothing: {converter.post_smoothing}") logging.debug(f"converter.smoothing: {converter.smoothing}") logging.debug(f"converter.normalise: {converter.normalize}") logging.debug(f"converter.voltage_fwhm: {converter.voltage_fwhm}") if voltage_resolution is not None: converter.voltage_resolution = voltage_resolution if capacity_resolution is not None: converter.capacity_resolution = capacity_resolution if savgol_filter_window_divisor_default is not None: converter.savgol_filter_window_divisor_default = savgol_filter_window_divisor_default logging.debug(f"converter.savgol_filter_window_divisor_default: " f"{converter.savgol_filter_window_divisor_default}") if savgol_filter_window_order is not None: converter.savgol_filter_window_order = savgol_filter_window_order logging.debug(f"converter.savgol_filter_window_order: " f"{converter.savgol_filter_window_order}") if gaussian_mode is not None: converter.gaussian_mode = gaussian_mode if gaussian_order is not None: converter.gaussian_order = gaussian_order if gaussian_truncate is not None: converter.gaussian_truncate = gaussian_truncate if gaussian_cval is not None: converter.gaussian_cval = gaussian_cval if interpolation_method is not None: converter.interpolation_method = interpolation_method if points_pr_split is not None: converter.points_pr_split = points_pr_split if max_points is not None: converter.max_points = max_points converter.set_data(capacity, voltage) converter.inspect_data() converter.pre_process_data() converter.increment_data() converter.post_process_data() return converter.voltage_processed, converter.incremental_capacity
Convenience functions for creating dq-dv data from given capacity and voltage data. Args: voltage: nd.array or pd.Series capacity: nd.array or pd.Series voltage_resolution: used for interpolating voltage data (e.g. 0.005) capacity_resolution: used for interpolating capacity data voltage_fwhm: used for setting the post-processing gaussian sigma pre_smoothing: set to True for pre-smoothing (window) diff_smoothing: set to True for smoothing during differentiation (window) post_smoothing: set to True for post-smoothing (gaussian) post_normalization: set to True for normalising to capacity interpolation_method: scipy interpolation method gaussian_order: int gaussian_mode: mode gaussian_cval: gaussian_truncate: points_pr_split: only used when investigating data using splits savgol_filter_window_divisor_default: used for window smoothing savgol_filter_window_order: used for window smoothing max_points: restricting to max points in vector (capacity-selected) Returns: voltage, dqdv Notes: PEC data (Helge) pre_smoothing = False diff_smoothing = False pos_smoothing = False voltage_resolution = 0.005 PEC data (Preben) ... Arbin data (IFE) ...
entailment
def _dqdv_combinded_frame(cell, **kwargs): """Returns full cycle dqdv data for all cycles as one pd.DataFrame. Args: cell: CellpyData-object Returns: pandas.DataFrame with the following columns: cycle: cycle number voltage: voltage dq: the incremental capacity """ cycles = cell.get_cap( method="forth-and-forth", categorical_column=True, label_cycle_number=True, ) ica_df = dqdv_cycles(cycles, **kwargs) assert isinstance(ica_df, pd.DataFrame) return ica_df
Returns full cycle dqdv data for all cycles as one pd.DataFrame. Args: cell: CellpyData-object Returns: pandas.DataFrame with the following columns: cycle: cycle number voltage: voltage dq: the incremental capacity
entailment
def dqdv_frames(cell, split=False, **kwargs): """Returns dqdv data as pandas.DataFrame(s) for all cycles. Args: cell (CellpyData-object). split (bool): return one frame for charge and one for discharge if True (defaults to False). Returns: pandas.DataFrame(s) with the following columns: cycle: cycle number (if split is set to True). voltage: voltage dq: the incremental capacity Example: >>> from cellpy.utils import ica >>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True) >>> charge_df.plot(x=("voltage", "v")) """ # TODO: should add option for normalising based on first cycle capacity # this is e.g. done by first finding the first cycle capacity (nom_cap) # (or use nominal capacity given as input) and then propagating this to # Converter using the key-word arguments # normalize=True, normalization_factor=1.0, normalization_roof=nom_cap if split: return _dqdv_split_frames(cell, tidy=True, **kwargs) else: return _dqdv_combinded_frame(cell, **kwargs)
Returns dqdv data as pandas.DataFrame(s) for all cycles. Args: cell (CellpyData-object). split (bool): return one frame for charge and one for discharge if True (defaults to False). Returns: pandas.DataFrame(s) with the following columns: cycle: cycle number (if split is set to True). voltage: voltage dq: the incremental capacity Example: >>> from cellpy.utils import ica >>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True) >>> charge_df.plot(x=("voltage", "v"))
entailment