sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _dqdv_split_frames(cell, tidy=False, **kwargs):
"""Returns dqdv data as pandas.DataFrames for all cycles.
Args:
cell (CellpyData-object).
tidy (bool): return in wide format if False (default),
long (tidy) format if True.
Returns:
(charge_ica_frame, discharge_ica_frame) where the frames are
pandas.DataFrames where the first column is voltage ('v') and
the following columns are the incremental capcaity for each
cycle (multi-indexed, where cycle number is on the top level).
Example:
>>> from cellpy.utils import ica
>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)
>>> charge_ica_df.plot(x=("voltage", "v"))
"""
charge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="charge"
)
# charge_df = pd.concat(
# charge_dfs, axis=1, keys=[k.name for k in charge_dfs])
ica_charge_dfs = _make_ica_charge_curves(
charge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_charge_df = pd.concat(
ica_charge_dfs,
axis=1,
keys=[k.name for k in ica_charge_dfs]
)
dcharge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="discharge"
)
ica_dcharge_dfs = _make_ica_charge_curves(
dcharge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_discharge_df = pd.concat(
ica_dcharge_dfs,
axis=1,
keys=[k.name for k in ica_dcharge_dfs]
)
ica_charge_df.columns.names = ["cycle", "value"]
ica_discharge_df.columns.names = ["cycle", "value"]
if tidy:
ica_charge_df = ica_charge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
ica_discharge_df = ica_discharge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
return ica_charge_df, ica_discharge_df | Returns dqdv data as pandas.DataFrames for all cycles.
Args:
cell (CellpyData-object).
tidy (bool): return in wide format if False (default),
long (tidy) format if True.
Returns:
(charge_ica_frame, discharge_ica_frame) where the frames are
pandas.DataFrames where the first column is voltage ('v') and
the following columns are the incremental capcaity for each
cycle (multi-indexed, where cycle number is on the top level).
Example:
>>> from cellpy.utils import ica
>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)
>>> charge_ica_df.plot(x=("voltage", "v")) | entailment |
def set_data(self, capacity, voltage=None,
capacity_label="q", voltage_label="v"
):
"""Set the data"""
logging.debug("setting data (capacity and voltage)")
if isinstance(capacity, pd.DataFrame):
logging.debug("recieved a pandas.DataFrame")
self.capacity = capacity[capacity_label]
self.voltage = capacity[voltage_label]
else:
assert len(capacity) == len(voltage)
self.capacity = capacity
self.voltage = voltage | Set the data | entailment |
def inspect_data(self, capacity=None, voltage=None,
err_est=False, diff_est=False):
"""check and inspect the data"""
logging.debug("inspecting the data")
if capacity is None:
capacity = self.capacity
if voltage is None:
voltage = self.voltage
if capacity is None or voltage is None:
raise NullData
self.len_capacity = len(capacity)
self.len_voltage = len(voltage)
if self.len_capacity <= 1:
raise NullData
if self.len_voltage <= 1:
raise NullData
self.min_capacity, self.max_capacity = value_bounds(capacity)
self.start_capacity, self.end_capacity = index_bounds(capacity)
self.number_of_points = len(capacity)
if diff_est:
d_capacity = np.diff(capacity)
d_voltage = np.diff(voltage)
self.d_capacity_mean = np.mean(d_capacity)
self.d_voltage_mean = np.mean(d_voltage)
if err_est:
splits = int(self.number_of_points / self.points_pr_split)
rest = self.number_of_points % self.points_pr_split
if splits < self.minimum_splits:
txt = "no point in splitting, too little data"
logging.debug(txt)
self.errors.append("splitting: to few points")
else:
if rest > 0:
_cap = capacity[:-rest]
_vol = voltage[:-rest]
else:
_cap = capacity
_vol = voltage
c_pieces = np.split(_cap, splits)
v_pieces = np.split(_vol, splits)
# c_middle = int(np.amax(c_pieces) / 2)
std_err = []
c_pieces_avg = []
for c, v in zip(c_pieces, v_pieces):
_slope, _intercept, _r_value, _p_value, _std_err = stats.linregress(c, v)
std_err.append(_std_err)
c_pieces_avg.append(np.mean(c))
self.std_err_median = np.median(std_err)
self.std_err_mean = np.mean(std_err)
if not self.start_capacity == self.min_capacity:
self.errors.append("capacity: start<>min")
if not self.end_capacity == self.max_capacity:
self.errors.append("capacity: end<>max")
if self.normalizing_factor is None:
self.normalizing_factor = self.end_capacity
if self.normalizing_roof is not None:
self.normalizing_factor = self.normalizing_factor * \
self.end_capacity / self.normalizing_roof | check and inspect the data | entailment |
def pre_process_data(self):
"""perform some pre-processing of the data (i.e. interpolation)"""
logging.debug("pre-processing the data")
capacity = self.capacity
voltage = self.voltage
# performing an interpolation in v(q) space
logging.debug(" - interpolating voltage(capacity)")
c1, c2 = index_bounds(capacity)
if self.max_points is not None:
len_capacity = min(self.max_points, self.len_capacity)
elif self.capacity_resolution is not None:
len_capacity = round(abs(c2-c1) / self.capacity_resolution, 0)
else:
len_capacity = self.len_capacity
f = interp1d(capacity, voltage, kind=self.interpolation_method)
self.capacity_preprocessed = np.linspace(c1, c2, len_capacity)
self.voltage_preprocessed = f(self.capacity_preprocessed)
if self.pre_smoothing:
logging.debug(" - pre-smoothing (savgol filter window)")
savgol_filter_window_divisor = np.amin(
(self.savgol_filter_window_divisor_default, len_capacity / 5)
)
savgol_filter_window_length = int(
len_capacity / savgol_filter_window_divisor
)
if savgol_filter_window_length % 2 == 0:
savgol_filter_window_length -= 1
savgol_filter_window_length = np.amax(
[3, savgol_filter_window_length]
)
self.voltage_preprocessed = savgol_filter(
self.voltage_preprocessed,
savgol_filter_window_length,
self.savgol_filter_window_order
) | perform some pre-processing of the data (i.e. interpolation) | entailment |
def increment_data(self):
"""perform the dq-dv transform"""
# NOTE TO ASBJOERN: Probably insert method for "binning" instead of
# differentiating here
# (use self.increment_method as the variable for selecting method for)
logging.debug("incrementing data")
# ---- shifting to y-x ----------------------------------------
v1, v2 = value_bounds(self.voltage_preprocessed)
if self.voltage_resolution is not None:
len_voltage = round(abs(v2 - v1) / self.voltage_resolution, 0)
else:
len_voltage = len(self.voltage_preprocessed)
# ---- interpolating ------------------------------------------
logging.debug(" - interpolating capacity(voltage)")
f = interp1d(
self.voltage_preprocessed,
self.capacity_preprocessed,
kind=self.interpolation_method
)
self.voltage_inverted = np.linspace(v1, v2, len_voltage)
self.voltage_inverted_step = (v2 - v1) / (len_voltage - 1)
self.capacity_inverted = f(self.voltage_inverted)
if self.smoothing:
logging.debug(" - smoothing (savgol filter window)")
savgol_filter_window_divisor = np.amin(
(self.savgol_filter_window_divisor_default, len_voltage / 5)
)
savgol_filter_window_length = int(
len(self.voltage_inverted) / savgol_filter_window_divisor
)
if savgol_filter_window_length % 2 == 0:
savgol_filter_window_length -= 1
self.capacity_inverted = savgol_filter(
self.capacity_inverted,
np.amax([3, savgol_filter_window_length]),
self.savgol_filter_window_order
)
# --- diff --------------------
if self.increment_method == "diff":
logging.debug(" - diff using DIFF")
self.incremental_capacity = np.ediff1d(self.capacity_inverted) / self.voltage_inverted_step
self._incremental_capacity = self.incremental_capacity
# --- need to adjust voltage ---
self._voltage_processed = self.voltage_inverted[1:]
self.voltage_processed = self.voltage_inverted[1:] - 0.5 * self.voltage_inverted_step # centering
elif self.increment_method == "hist":
logging.debug(" - diff using HIST")
# TODO: Asbjoern, maybe you can put your method here?
raise NotImplementedError | perform the dq-dv transform | entailment |
def post_process_data(self, voltage=None, incremental_capacity=None,
voltage_step=None):
"""perform post-processing (smoothing, normalisation, interpolation) of
the data"""
logging.debug("post-processing data")
if voltage is None:
voltage = self.voltage_processed
incremental_capacity = self.incremental_capacity
voltage_step = self.voltage_inverted_step
if self.post_smoothing:
logging.debug(" - post smoothing (gaussian)")
logging.debug(f" * using voltage fwhm: {self.voltage_fwhm}")
points_fwhm = int(self.voltage_fwhm / voltage_step)
sigma = np.amax([1, points_fwhm / 2])
self.incremental_capacity = gaussian_filter1d(
incremental_capacity, sigma=sigma, order=self.gaussian_order,
mode=self.gaussian_mode,
cval=self.gaussian_cval,
truncate=self.gaussian_truncate
)
if self.normalize:
logging.debug(" - normalizing")
area = simps(incremental_capacity, voltage)
self.incremental_capacity = incremental_capacity * self.normalizing_factor / abs(area)
fixed_range = False
if isinstance(self.fixed_voltage_range, np.ndarray):
fixed_range = True
else:
if self.fixed_voltage_range:
fixed_range = True
if fixed_range:
logging.debug(" - using fixed voltage range (interpolating)")
v1, v2, number_of_points = self.fixed_voltage_range
v = np.linspace(v1, v2, number_of_points)
f = interp1d(x=self.voltage_processed, y=self.incremental_capacity,
kind=self.interpolation_method, bounds_error=False,
fill_value=np.NaN)
self.incremental_capacity = f(v)
self.voltage_processed = v | perform post-processing (smoothing, normalisation, interpolation) of
the data | entailment |
def easybake(ruleset, in_html, out_html):
"""This adheres to the same interface as
``cnxeasybake.scripts.main.easyback``.
``ruleset`` is a string containing the ruleset CSS
while ``in_html`` and ``out_html`` are file-like objects,
with respective read and write ability.
"""
html = etree.parse(in_html)
oven = Oven(ruleset)
oven.bake(html)
out_html.write(etree.tostring(html)) | This adheres to the same interface as
``cnxeasybake.scripts.main.easyback``.
``ruleset`` is a string containing the ruleset CSS
while ``in_html`` and ``out_html`` are file-like objects,
with respective read and write ability. | entailment |
def reconstitute(html):
"""Given a file-like object as ``html``, reconstruct it into models."""
try:
htree = etree.parse(html)
except etree.XMLSyntaxError:
html.seek(0)
htree = etree.HTML(html.read())
xhtml = etree.tostring(htree, encoding='utf-8')
return adapt_single_html(xhtml) | Given a file-like object as ``html``, reconstruct it into models. | entailment |
def collate(binder, ruleset=None, includes=None):
"""Given a ``Binder`` as ``binder``, collate the content into a new set
of models.
Returns the collated binder.
"""
html_formatter = SingleHTMLFormatter(binder, includes)
raw_html = io.BytesIO(bytes(html_formatter))
collated_html = io.BytesIO()
if ruleset is None:
# No ruleset found, so no cooking necessary.
return binder
easybake(ruleset, raw_html, collated_html)
collated_html.seek(0)
collated_binder = reconstitute(collated_html)
return collated_binder | Given a ``Binder`` as ``binder``, collate the content into a new set
of models.
Returns the collated binder. | entailment |
def adapt_package(package):
"""Adapts ``.epub.Package`` to a ``BinderItem`` and cascades
the adaptation downward to ``DocumentItem``
and ``ResourceItem``.
The results of this process provide the same interface as
``.models.Binder``, ``.models.Document`` and ``.models.Resource``.
"""
navigation_item = package.navigation
html = etree.parse(navigation_item.data)
tree = parse_navigation_html_to_tree(html, navigation_item.name)
return _node_to_model(tree, package) | Adapts ``.epub.Package`` to a ``BinderItem`` and cascades
the adaptation downward to ``DocumentItem``
and ``ResourceItem``.
The results of this process provide the same interface as
``.models.Binder``, ``.models.Document`` and ``.models.Resource``. | entailment |
def adapt_item(item, package, filename=None):
"""Adapts ``.epub.Item`` to a ``DocumentItem``.
"""
if item.media_type == 'application/xhtml+xml':
try:
html = etree.parse(item.data)
except Exception as exc:
logger.error("failed parsing {}".format(item.name))
raise
metadata = DocumentPointerMetadataParser(
html, raise_value_error=False)()
item.data.seek(0)
if metadata.get('is_document_pointer'):
model = DocumentPointerItem(item, package)
else:
model = DocumentItem(item, package)
else:
model = Resource(item.name, item.data, item.media_type,
filename or item.name)
return model | Adapts ``.epub.Item`` to a ``DocumentItem``. | entailment |
def make_epub(binders, file):
"""Creates an EPUB file from a binder(s)."""
if not isinstance(binders, (list, set, tuple,)):
binders = [binders]
epub = EPUB([_make_package(binder) for binder in binders])
epub.to_file(epub, file) | Creates an EPUB file from a binder(s). | entailment |
def make_publication_epub(binders, publisher, publication_message, file):
"""Creates an epub file from a binder(s). Also requires
publication information, meant to be used in a EPUB publication
request.
"""
if not isinstance(binders, (list, set, tuple,)):
binders = [binders]
packages = []
for binder in binders:
metadata = binder.metadata
binder.metadata = deepcopy(metadata)
binder.metadata.update({'publisher': publisher,
'publication_message': publication_message})
packages.append(_make_package(binder))
binder.metadata = metadata
epub = EPUB(packages)
epub.to_file(epub, file) | Creates an epub file from a binder(s). Also requires
publication information, meant to be used in a EPUB publication
request. | entailment |
def _make_package(binder):
"""Makes an ``.epub.Package`` from a Binder'ish instance."""
package_id = binder.id
if package_id is None:
package_id = hash(binder)
package_name = "{}.opf".format(package_id)
extensions = get_model_extensions(binder)
template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
# Build the package item list.
items = []
# Build the binder as an item, specifically a navigation item.
navigation_document = bytes(HTMLFormatter(binder, extensions))
navigation_document_name = "{}{}".format(
package_id,
mimetypes.guess_extension('application/xhtml+xml', strict=False))
item = Item(str(navigation_document_name),
io.BytesIO(navigation_document),
'application/xhtml+xml',
is_navigation=True, properties=['nav'])
items.append(item)
resources = {}
# Roll through the model list again, making each one an item.
for model in flatten_model(binder):
for resource in getattr(model, 'resources', []):
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
if isinstance(model, (Binder, TranslucentBinder,)):
continue
if isinstance(model, DocumentPointer):
content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(content),
model.media_type)
items.append(item)
continue
for reference in model.references:
if reference.remote_type == INLINE_REFERENCE_TYPE:
# has side effects - converts ref type to INTERNAL w/
# appropriate uri, so need to replicate resource treatment from
# above
resource = _make_resource_from_inline(reference)
model.resources.append(resource)
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
reference.bind(resource, '../resources/{}')
elif reference.remote_type == INTERNAL_REFERENCE_TYPE:
filename = os.path.basename(reference.uri)
resource = resources.get(filename)
if resource:
reference.bind(resource, '../resources/{}')
complete_content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(complete_content),
model.media_type)
items.append(item)
# Build the package.
package = Package(package_name, items, binder.metadata)
return package | Makes an ``.epub.Package`` from a Binder'ish instance. | entailment |
def _make_resource_from_inline(reference):
"""Makes an ``models.Resource`` from a ``models.Reference``
of type INLINE. That is, a data: uri"""
uri = DataURI(reference.uri)
data = io.BytesIO(uri.data)
mimetype = uri.mimetype
res = Resource('dummy', data, mimetype)
res.id = res.filename
return res | Makes an ``models.Resource`` from a ``models.Reference``
of type INLINE. That is, a data: uri | entailment |
def _make_item(model):
"""Makes an ``.epub.Item`` from
a ``.models.Document`` or ``.models.Resource``
"""
item = Item(model.id, model.content, model.media_type)
return item | Makes an ``.epub.Item`` from
a ``.models.Document`` or ``.models.Resource`` | entailment |
def _node_to_model(tree_or_item, package, parent=None,
lucent_id=TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
# Grab the package metadata, so we have required license info
metadata = package.metadata.copy()
if tree['id'] == lucent_id:
metadata['title'] = tree['title']
binder = TranslucentBinder(metadata=metadata)
else:
try:
package_item = package.grab_by_name(tree['id'])
binder = BinderItem(package_item, package)
except KeyError: # Translucent w/ id
metadata.update({
'title': tree['title'],
'cnx-archive-uri': tree['id'],
'cnx-archive-shortid': tree['shortId']})
binder = Binder(tree['id'], metadata=metadata)
for item in tree['contents']:
node = _node_to_model(item, package, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is a document.
item = tree_or_item
package_item = package.grab_by_name(item['id'])
result = adapt_item(package_item, package)
if parent is not None:
parent.append(result)
return result | Given a tree, parse to a set of models | entailment |
def adapt_single_html(html):
"""Adapts a single html document generated by
``.formatters.SingleHTMLFormatter`` to a ``models.Binder``
"""
html_root = etree.fromstring(html)
metadata = parse_metadata(html_root.xpath('//*[@data-type="metadata"]')[0])
id_ = metadata['cnx-archive-uri'] or 'book'
binder = Binder(id_, metadata=metadata)
nav_tree = parse_navigation_html_to_tree(html_root, id_)
body = html_root.xpath('//xhtml:body', namespaces=HTML_DOCUMENT_NAMESPACES)
_adapt_single_html_tree(binder, body[0], nav_tree, top_metadata=metadata)
return binder | Adapts a single html document generated by
``.formatters.SingleHTMLFormatter`` to a ``models.Binder`` | entailment |
def select_ocv_points(cellpydata, cycles=None, selection_method="martin",
number_of_points=5,
interval=10,
relative_voltage=False,
report_times=False,
direction=None):
"""Select points from the ocvrlx steps.
Args:
cellpydata: CellpyData-object
cycles: list of cycle numbers to process (optional)
selection_method: criteria for selecting points
martin: select first and last, and then last/2, last/2/2 etc.
until you have reached the wanted number of points.
fixed_time: select first, and then
number_of_points: number of points you want.
interval: interval between each point (in use only for methods
where interval makes sense). If it is a list, then
number_of_points will be calculated as len(interval) + 1 (and
override the set number_of_points).
relative_voltage: set to True if you would like the voltage to be
relative to the voltage before starting the ocv rlx step.
Defaults to False. Remark that for the initial rxl step (when
you just have put your cell on the tester) does not have any
prior voltage. The relative voltage will then be versus the
first measurement point.
report_times: also report the ocv rlx total time if True (defaults
to False)
direction ("up", "down" or "both"): select "up" if you would like
to process only the ocv rlx steps where the voltage is relaxing
upwards and vize versa. Defaults to "both".
Returns:
pandas.DataFrame
"""
if cycles is None:
cycles = cellpydata.get_cycle_numbers()
else:
if not isinstance(cycles, (list, tuple)):
cycles = [cycles, ]
if direction is None:
direction = "both"
if not isinstance(interval, (list, tuple)):
interval = [float(interval) for _ in range(number_of_points-1)]
ocv_rlx_id = "ocvrlx"
step_table = cellpydata.dataset.step_table
dfdata = cellpydata.dataset.dfdata
ocv_steps = step_table.loc[
step_table["cycle"].isin(cycles), :
]
ocv_steps = ocv_steps.loc[
ocv_steps.type.str.startswith(ocv_rlx_id), :
]
if selection_method in ["fixed_times", "fixed_points", "selected_times"]:
number_of_points = len(interval) + 1
headers2 = []
for j in range(number_of_points):
n = str(j).zfill(2)
headers2.append(f"point_{n}")
# doing an iteration (thought I didnt have to, but...) (fix later)
results_list = list()
iter_range = number_of_points - 1
if selection_method == "martin":
iter_range -= 1
for index, row in ocv_steps.iterrows():
# voltage
first, last, delta = (
row['voltage_first'],
row['voltage_last'],
row['voltage_delta']
)
voltage_reference = 0.0
if relative_voltage:
if index > 0:
reference_row = step_table.iloc[index-1, :]
voltage_reference = reference_row['voltage_last']
else:
voltage_reference = first
logging.warning("STEP 0: Using first point as ref voltage")
# time
start, end, duration = (
row['step_time_first'],
row['step_time_last'],
row['step_time_delta']
)
cycle, step = (row['cycle'], row['step'])
info = row['type']
v_df = dfdata.loc[
(dfdata["Cycle_Index"] == cycle) &
(dfdata["Step_Index"] == step), ["Step_Time", "Voltage"]
]
poi = []
_end = end
_start = start
if report_times:
t = str(datetime.timedelta(seconds=round(end-start, 0)))
print(f"Cycle {cycle}:", end=" ")
print(f"dt = {t}, dv = {first-last:6.3f}")
for i, j in enumerate(range(max(1, iter_range))):
if selection_method == "martin":
logging.debug("using the 'martin'-method")
_end = _end / 2.0
poi.append(_end)
elif selection_method == "fixed_times":
logging.debug(f"using fixed times with interval {interval[i]}")
_start = _start + interval[i]
logging.debug(f"time: {_start}")
poi.append(_start)
else:
# more methods to come?
logging.info("this method is not implemented")
return None
if selection_method == "martin":
poi.reverse()
df_poi = pd.DataFrame({"Step_Time": poi})
df_poi["Voltage"] = np.nan
v_df = v_df.append(df_poi, ignore_index=True)
v_df = v_df.sort_values("Step_Time").reset_index(drop=True)
v_df["new"] = v_df["Voltage"].interpolate()
voi = []
for p in poi:
_v = v_df.loc[v_df["Step_Time"].isin([p]), "new"].values
_v = _v - voltage_reference
voi.append(_v[0])
poi.insert(0, start)
voi.insert(0, first - voltage_reference)
if selection_method == "martin":
poi.append(end)
voi.append(last - voltage_reference)
d1 = {"cycle": cycle}
d2 = {h: [v] for h, v in zip(headers2, voi)}
d = {**d1, **d2}
result = pd.DataFrame(d)
result["step"] = step
result["type"] = info
results_list.append(result)
final = pd.concat(results_list)
if direction == "down":
final = final.loc[final["type"] == "ocvrlx_down", :]
elif direction == "up":
final = final.loc[final["type"] == "ocvrlx_up", :]
final = final.reset_index(drop=True)
return final | Select points from the ocvrlx steps.
Args:
cellpydata: CellpyData-object
cycles: list of cycle numbers to process (optional)
selection_method: criteria for selecting points
martin: select first and last, and then last/2, last/2/2 etc.
until you have reached the wanted number of points.
fixed_time: select first, and then
number_of_points: number of points you want.
interval: interval between each point (in use only for methods
where interval makes sense). If it is a list, then
number_of_points will be calculated as len(interval) + 1 (and
override the set number_of_points).
relative_voltage: set to True if you would like the voltage to be
relative to the voltage before starting the ocv rlx step.
Defaults to False. Remark that for the initial rxl step (when
you just have put your cell on the tester) does not have any
prior voltage. The relative voltage will then be versus the
first measurement point.
report_times: also report the ocv rlx total time if True (defaults
to False)
direction ("up", "down" or "both"): select "up" if you would like
to process only the ocv rlx steps where the voltage is relaxing
upwards and vize versa. Defaults to "both".
Returns:
pandas.DataFrame | entailment |
def get_best_fit_parameters_grouped(self):
"""Returns a dictionary of the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters]
for i in range(self.circuits):
result_dict['t' + str(i)] = [parameters['t' + str(i)] for parameters
in self.best_fit_parameters]
result_dict['w' + str(i)] = [parameters['w' + str(i)] for parameters
in self.best_fit_parameters]
return result_dict | Returns a dictionary of the best fit. | entailment |
def get_best_fit_parameters_translated_grouped(self):
"""Returns the parameters as a dictionary of the 'real units' for the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters_translated]
result_dict['ir'] = [parameters['ir'] for parameters in
self.best_fit_parameters_translated]
for i in range(self.circuits):
result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters
in self.best_fit_parameters_translated]
result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters
in self.best_fit_parameters_translated]
return result_dict | Returns the parameters as a dictionary of the 'real units' for the best fit. | entailment |
def plot_summary(self, cycles=None):
"""Convenience function for plotting the summary of the fit"""
if cycles is None:
cycles = [0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(221)
ax1.set_title('Fit')
ax2 = fig1.add_subplot(222)
ax2.set_title('OCV')
ax3 = fig1.add_subplot(223)
ax3.set_title('Tau')
ax3.set_yscale("log")
ax4 = fig1.add_subplot(224)
ax4.set_title('Voltage Impact')
plot_data = self.get_best_fit_data()
for cycle in cycles:
ax1.plot(plot_data[cycle][0], plot_data[cycle][1])
ax1.plot(plot_data[cycle][0], plot_data[cycle][2])
plot_data = self.get_best_fit_parameters_grouped()
for i in range(self.circuits):
ax3.plot(self.get_fit_cycles(), plot_data['t' + str(i)])
ax4.plot(self.get_fit_cycles(), plot_data['w' + str(i)])
ax2.plot(self.get_fit_cycles(), plot_data['ocv']) | Convenience function for plotting the summary of the fit | entailment |
def plot_summary_translated(self):
"""Convenience function for plotting the summary of the
fit (translated)"""
fig2 = plt.figure()
ax1 = fig2.add_subplot(221)
ax1.set_title('OCV (V)')
ax2 = fig2.add_subplot(222)
ax2.set_title('IR (Ohm)')
ax3 = fig2.add_subplot(223)
ax3.set_title('Resistances (Ohm)')
ax4 = fig2.add_subplot(224)
ax4.set_title('Capacitances (F)')
ax4.set_yscale("log")
plot_data = self.get_best_fit_parameters_translated_grouped()
print(plot_data['ocv'])
print(plot_data['ir'])
print(plot_data['r0'])
ax1.plot(self.get_fit_cycles(), plot_data['ocv'])
ax2.plot(self.get_fit_cycles(), plot_data['ir'])
for i in range(self.circuits):
ax3.plot(self.get_fit_cycles(), plot_data['r' + str(i)])
ax4.plot(self.get_fit_cycles(), plot_data['c' + str(i)])
plt.show() | Convenience function for plotting the summary of the
fit (translated) | entailment |
def set_cellpydata(self, cellpydata, cycle):
"""Performing fit of the OCV steps in the cycles set by set_cycles()
from the data set by set_data()
r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start).
c is found from using tau / r --> err(c) = err(r) + err(tau)
Args:
cellpydata (CellpyData): data object from cellreader
cycle (int): cycle number to get from CellpyData object
Returns:
None
"""
self.data = cellpydata
self.step_table = self.data.dataset # hope it works...
time_voltage = self.data.get_ocv(direction='up',
cycles=cycle)
time = time_voltage.Step_Time
voltage = time_voltage.Voltage
self.time = np.array(time)
self.voltage = np.array(voltage) | Performing fit of the OCV steps in the cycles set by set_cycles()
from the data set by set_data()
r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start).
c is found from using tau / r --> err(c) = err(r) + err(tau)
Args:
cellpydata (CellpyData): data object from cellreader
cycle (int): cycle number to get from CellpyData object
Returns:
None | entailment |
def run_fit(self):
"""Performing fit of the OCV steps in the cycles set by set_cycles()
from the data set by set_data()
r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start).
c is found from using tau / r --> err(c) = err(r) + err(tau)
Returns:
None: Resulting best fit parameters are stored in self.result for the given cycles
"""
# Check if data is set
if self.time is []:
self.result = []
return
try:
self.fit_model()
except ValueError as e:
print(e)
except AttributeError as e:
print(e) | Performing fit of the OCV steps in the cycles set by set_cycles()
from the data set by set_data()
r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start).
c is found from using tau / r --> err(c) = err(r) + err(tau)
Returns:
None: Resulting best fit parameters are stored in self.result for the given cycles | entailment |
def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent'
except IndexError:
is_translucent = False
if is_translucent:
id = TRANSLUCENT_BINDER_ID
tree = {'id': id,
'title': xpath('//*[@data-type="document-title"]/text()')[0],
'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]
}
return tree | Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value. | entailment |
def _nav_to_tree(root):
"""Given an etree containing a navigation document structure
rooted from the 'nav' element, parse to a tree:
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
"""
def expath(e, x):
return e.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
for li in expath(root, 'xhtml:ol/xhtml:li'):
is_subtree = bool([e for e in li.getchildren()
if e.tag[e.tag.find('}')+1:] == 'ol'])
if is_subtree:
# It's a sub-tree and have a 'span' and 'ol'.
itemid = li.get('cnx-archive-uri', 'subcol')
shortid = li.get('cnx-archive-shortid')
yield {'id': itemid,
# Title is wrapped in a span, div or some other element...
'title': squash_xml_to_text(expath(li, '*')[0],
remove_namespaces=True),
'shortId': shortid,
'contents': [x for x in _nav_to_tree(li)],
}
else:
# It's a node and should only have an li.
a = li.xpath('xhtml:a', namespaces=HTML_DOCUMENT_NAMESPACES)[0]
yield {'id': a.get('href'),
'shortid': li.get('cnx-archive-shortid'),
'title': squash_xml_to_text(a, remove_namespaces=True)} | Given an etree containing a navigation document structure
rooted from the 'nav' element, parse to a tree:
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]} | entailment |
def parse_resources(html):
"""Return a list of resource names found in the html metadata section."""
xpath = '//*[@data-type="resources"]//xhtml:li/xhtml:a'
for resource in html.xpath(xpath, namespaces=HTML_DOCUMENT_NAMESPACES):
yield {
'id': resource.get('href'),
'filename': resource.text.strip(),
} | Return a list of resource names found in the html metadata section. | entailment |
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports() | Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection. | entailment |
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port) | Callback for when a ``PUBLISH`` message is received from the broker. | entailment |
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True) | Publish status for specified port.
Parameters
----------
port : str
Device name/port. | entailment |
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return | Handle close request.
Parameters
----------
port : str
Device name/port. | entailment |
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return | Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict | entailment |
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception) | Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device. | entailment |
def print_datetime_object(dt):
"""prints a date-object"""
print(dt)
print('ctime :', dt.ctime())
print('tuple :', dt.timetuple())
print('ordinal:', dt.toordinal())
print('Year :', dt.year)
print('Mon :', dt.month)
print('Day :', dt.day) | prints a date-object | entailment |
def get_raw_limits(self):
"""Include the settings for how to decide what kind of step you are examining here.
The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current is stable (constant) and non-zero).
It is expected that different instruments (with different resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict)
"""
warnings.warn("raw limits have not been subject for testing yet")
raw_limits = dict()
raw_limits["current_hard"] = 0.1 # There is a bug in PEC
raw_limits["current_soft"] = 1.0
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 2.0
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits | Include the settings for how to decide what kind of step you are examining here.
The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current is stable (constant) and non-zero).
It is expected that different instruments (with different resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict) | entailment |
def check64bit(current_system="python"):
"""checks if you are on a 64 bit platform"""
if current_system == "python":
return sys.maxsize > 2147483647
elif current_system == "os":
import platform
pm = platform.machine()
if pm != ".." and pm.endswith('64'): # recent Python (not Iron)
return True
else:
if 'PROCESSOR_ARCHITEW6432' in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
# 64 bit Windows 64 bit program
return os.environ['PROCESSOR_ARCHITECTURE'].endswith('64')
except IndexError:
pass # not Windows
try:
# this often works in Linux
return '64' in platform.architecture()[0]
except Exception:
# is an older version of Python, assume also an older os@
# (best we can guess)
return False | checks if you are on a 64 bit platform | entailment |
def humanize_bytes(b, precision=1):
"""Return a humanized string representation of a number of b.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
# abbrevs = (
# (1 << 50L, 'PB'),
# (1 << 40L, 'TB'),
# (1 << 30L, 'GB'),
# (1 << 20L, 'MB'),
# (1 << 10L, 'kB'),
# (1, 'b')
# )
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'b')
)
if b == 1:
return '1 byte'
for factor, suffix in abbrevs:
if b >= factor:
break
# return '%.*f %s' % (precision, old_div(b, factor), suffix)
return '%.*f %s' % (precision, b // factor, suffix) | Return a humanized string representation of a number of b.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB' | entailment |
def xldate_as_datetime(xldate, datemode=0, option="to_datetime"):
"""Converts a xls date stamp to a more sensible format.
Args:
xldate (str): date stamp in Excel format.
datemode (int): 0 for 1900-based, 1 for 1904-based.
option (str): option in ("to_datetime", "to_float", "to_string"),
return value
Returns:
datetime (datetime object, float, or string).
"""
# This does not work for numpy-arrays
if option == "to_float":
d = (xldate - 25589) * 86400.0
else:
try:
d = datetime.datetime(1899, 12, 30) + \
datetime.timedelta(days=xldate + 1462 * datemode)
# date_format = "%Y-%m-%d %H:%M:%S:%f" # with microseconds,
# excel cannot cope with this!
if option == "to_string":
date_format = "%Y-%m-%d %H:%M:%S" # without microseconds
d = d.strftime(date_format)
except TypeError:
logging.info(f'The date is not of correct type [{xldate}]')
d = xldate
return d | Converts a xls date stamp to a more sensible format.
Args:
xldate (str): date stamp in Excel format.
datemode (int): 0 for 1900-based, 1 for 1904-based.
option (str): option in ("to_datetime", "to_float", "to_string"),
return value
Returns:
datetime (datetime object, float, or string). | entailment |
def populate(self, filename):
"""Finds the file-stats and populates the class with stat values.
Args:
filename (str): name of the file.
"""
if os.path.isfile(filename):
fid_st = os.stat(filename)
self.name = os.path.abspath(filename)
self.full_name = filename
self.size = fid_st.st_size
self.last_modified = fid_st.st_mtime
self.last_accessed = fid_st.st_atime
self.last_info_changed = fid_st.st_ctime
self.location = os.path.dirname(filename) | Finds the file-stats and populates the class with stat values.
Args:
filename (str): name of the file. | entailment |
def get_raw(self):
"""Get a list with information about the file.
The returned list contains name, size, last_modified and location.
"""
return [self.name, self.size, self.last_modified, self.location] | Get a list with information about the file.
The returned list contains name, size, last_modified and location. | entailment |
def dfsummary_made(self):
"""check if the summary table exists"""
try:
empty = self.dfsummary.empty
except AttributeError:
empty = True
return not empty | check if the summary table exists | entailment |
def step_table_made(self):
"""check if the step table exists"""
try:
empty = self.step_table.empty
except AttributeError:
empty = True
return not empty | check if the step table exists | entailment |
def _open_sheet(self, dtypes_dict=None):
"""Opens sheets and returns it"""
table_name = self.db_sheet_table
header_row = self.db_header_row
nrows = self.nrows
if dtypes_dict is None:
dtypes_dict = self.dtypes_dict
rows_to_skip = self.skiprows
logging.debug(f"Trying to open the file {self.db_file}")
logging.debug(f"Number of rows: {nrows}")
logging.debug(f"Skipping the following rows: {rows_to_skip}")
logging.debug(f"Declaring the following dtyps: {dtypes_dict}")
work_book = pd.ExcelFile(self.db_file)
try:
sheet = work_book.parse(
table_name, header=header_row, skiprows=rows_to_skip,
dtype=dtypes_dict, nrows=nrows,
)
except ValueError as e:
logging.debug("Could not parse all the columns (ValueError) "
"using given dtypes. Trying without dtypes.")
logging.debug(str(e))
sheet = work_book.parse(
table_name, header=header_row, skiprows=rows_to_skip,
nrows=nrows,
)
return sheet | Opens sheets and returns it | entailment |
def _validate(self):
"""Checks that the db-file is ok
Returns:
True if OK, False if not.
"""
probably_good_to_go = True
sheet = self.table
identity = self.db_sheet_cols.id
# check if you have unique srnos
id_col = sheet.loc[:, identity]
if any(id_col.duplicated()):
warnings.warn(
"your database is corrupt: duplicates"
" encountered in the srno-column")
logger.debug("srno duplicates:\n" + str(
id_col.duplicated()))
probably_good_to_go = False
return probably_good_to_go | Checks that the db-file is ok
Returns:
True if OK, False if not. | entailment |
def select_serial_number_row(self, serial_number):
"""Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame
"""
sheet = self.table
col = self.db_sheet_cols.id
rows = sheet.loc[:, col] == serial_number
return sheet.loc[rows, :] | Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame | entailment |
def select_all(self, serial_numbers):
"""Select rows for identification for a list of serial_number.
Args:
serial_numbers: list (or ndarray) of serial numbers
Returns:
pandas.DataFrame
"""
sheet = self.table
col = self.db_sheet_cols.id
rows = sheet.loc[:, col].isin(serial_numbers)
return sheet.loc[rows, :] | Select rows for identification for a list of serial_number.
Args:
serial_numbers: list (or ndarray) of serial numbers
Returns:
pandas.DataFrame | entailment |
def print_serial_number_info(self, serial_number, print_to_screen=True):
"""Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None.
"""
r = self.select_serial_number_row(serial_number)
if r.empty:
warnings.warn("missing serial number")
return
txt1 = 80 * "="
txt1 += "\n"
txt1 += f" serial number {serial_number}\n"
txt1 = 80 * "-"
txt1 += "\n"
txt2 = ""
for label, value in zip(r.columns, r.values[0]):
if label in self.headers:
txt1 += f"{label}: \t {value}\n"
else:
txt2 += f"({label}: \t {value})\n"
if print_to_screen:
print(txt1)
print(80 * "-")
print(txt2)
print(80 * "=")
return
else:
return txt1 | Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None. | entailment |
def filter_by_slurry(self, slurry, appender="_"):
"""Filters sheet/table by slurry name.
Input is slurry name or list of slurry names, for example 'es030' or
["es012","es033","es031"].
Args:
slurry (str or list of strings): slurry names.
appender (chr): char that surrounds slurry names.
Returns:
List of serial_number (ints).
"""
sheet = self.table
identity = self.db_sheet_cols.id
exists = self.db_sheet_cols.exists
cellname = self.db_sheet_cols.cell_name
search_string = ""
if not isinstance(slurry, (list, tuple)):
slurry = [slurry, ]
first = True
for slur in slurry:
s_s = appender + slur + appender
if first:
search_string = s_s
first = False
else:
search_string += "|"
search_string += s_s
criterion = sheet.loc[:, cellname].str.contains(
search_string
)
exists = sheet.loc[:, exists] > 0
sheet = sheet[criterion & exists]
return sheet.loc[:, identity].values.astype(int) | Filters sheet/table by slurry name.
Input is slurry name or list of slurry names, for example 'es030' or
["es012","es033","es031"].
Args:
slurry (str or list of strings): slurry names.
appender (chr): char that surrounds slurry names.
Returns:
List of serial_number (ints). | entailment |
def filter_by_col(self, column_names):
"""filters sheet/table by columns (input is column header)
The routine returns the serial numbers with values>1 in the selected
columns.
Args:
column_names (list): the column headers.
Returns:
pandas.DataFrame
"""
if not isinstance(column_names, (list, tuple)):
column_names = [column_names, ]
sheet = self.table
identity = self.db_sheet_cols.id
exists = self.db_sheet_cols.exists
criterion = True
for column_name in column_names:
_criterion = sheet.loc[:, column_name] > 0
_exists = sheet.loc[:, exists] > 0
criterion = criterion & _criterion & _exists
return sheet.loc[criterion, identity].values.astype(int) | filters sheet/table by columns (input is column header)
The routine returns the serial numbers with values>1 in the selected
columns.
Args:
column_names (list): the column headers.
Returns:
pandas.DataFrame | entailment |
def filter_by_col_value(self, column_name,
min_val=None, max_val=None):
"""filters sheet/table by column.
The routine returns the serial-numbers with min_val <= values >= max_val
in the selected column.
Args:
column_name (str): column name.
min_val (int): minimum value of serial number.
max_val (int): maximum value of serial number.
Returns:
pandas.DataFrame
"""
sheet = self.table
identity = self.db_sheet_cols.id
exists_col_number = self.db_sheet_cols.exists
exists = sheet.loc[:, exists_col_number] > 0
if min_val is not None and max_val is not None:
criterion1 = sheet.loc[:, column_name] >= min_val
criterion2 = sheet.loc[:, column_name] <= max_val
sheet = sheet[criterion1 & criterion2 & exists]
elif min_val is not None or max_val is not None:
if min_val is not None:
criterion = sheet.loc[:, column_name] >= min_val
if max_val is not None:
criterion = sheet.loc[:, column_name] <= max_val
# noinspection PyUnboundLocalVariable
sheet = sheet[criterion & exists]
else:
sheet = sheet[exists]
return sheet.loc[:, identity].values.astype(int) | filters sheet/table by column.
The routine returns the serial-numbers with min_val <= values >= max_val
in the selected column.
Args:
column_name (str): column name.
min_val (int): minimum value of serial number.
max_val (int): maximum value of serial number.
Returns:
pandas.DataFrame | entailment |
def select_batch(self, batch, batch_col_name=None):
"""selects the rows in column batch_col_number
(default: DbSheetCols.batch)"""
if not batch_col_name:
batch_col_name = self.db_sheet_cols.batch
logger.debug("selecting batch - %s" % batch)
sheet = self.table
identity = self.db_sheet_cols.id
exists_col_number = self.db_sheet_cols.exists
criterion = sheet.loc[:, batch_col_name] == batch
exists = sheet.loc[:, exists_col_number] > 0
# This will crash if the col is not of dtype number
sheet = sheet[criterion & exists]
return sheet.loc[:, identity].values.astype(int) | selects the rows in column batch_col_number
(default: DbSheetCols.batch) | entailment |
def main(argv=None):
"""Parse passed in cooked single HTML."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('collated_html', type=argparse.FileType('r'),
help='Path to the collated html'
' file (use - for stdin)')
parser.add_argument('-d', '--dump-tree', action='store_true',
help='Print out parsed model tree.')
parser.add_argument('-o', '--output', type=argparse.FileType('w+'),
help='Write out epub of parsed tree.')
parser.add_argument('-i', '--input', type=argparse.FileType('r'),
help='Read and copy resources/ for output epub.')
args = parser.parse_args(argv)
if args.input and args.output == sys.stdout:
raise ValueError('Cannot output to stdout if reading resources')
from cnxepub.collation import reconstitute
binder = reconstitute(args.collated_html)
if args.dump_tree:
print(pformat(cnxepub.model_to_tree(binder)),
file=sys.stdout)
if args.output:
cnxepub.adapters.make_epub(binder, args.output)
if args.input:
args.output.seek(0)
zout = ZipFile(args.output, 'a', ZIP_DEFLATED)
zin = ZipFile(args.input, 'r')
for res in zin.namelist():
if res.startswith('resources'):
zres = zin.open(res)
zi = zin.getinfo(res)
zout.writestr(zi, zres.read(), ZIP_DEFLATED)
zout.close()
# TODO Check for documents that have no identifier.
# These should likely be composite-documents
# or the the metadata got wiped out.
# docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter)
# if x.ident_hash is None]
return 0 | Parse passed in cooked single HTML. | entailment |
def success(channel, image, hex_str):
"""
Creates an embed UI containing a hex color message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
image (str): The url of the image to add
hex_str (str): The hex value
Returns:
ui (ui_embed.UI): The embed UI object that was created
"""
hex_number = int(hex_str, 16)
# Create embed UI object
gui = ui_embed.UI(
channel,
"",
"#{}".format(hex_str),
modulename=modulename,
colour=hex_number,
thumbnail=image,
)
return gui | Creates an embed UI containing a hex color message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
image (str): The url of the image to add
hex_str (str): The hex value
Returns:
ui (ui_embed.UI): The embed UI object that was created | entailment |
def add_md(text, s, level=0):
"""Adds text to the readme at the given level"""
if level > 0:
if text != "":
text += "\n"
text += "#" * level
text += " "
text += s + "\n"
if level > 0:
text += "\n"
return text | Adds text to the readme at the given level | entailment |
def add_ul(text, ul):
"""Adds an unordered list to the readme"""
text += "\n"
for li in ul:
text += "- " + li + "\n"
text += "\n"
return text | Adds an unordered list to the readme | entailment |
def make_editions_dict(editions):
"""Take a reporter editions dict and flatten it, returning a dict for
use in the DictWriter.
"""
d = {}
nums = ['1', '2', '3', '4', '5', '6']
num_counter = 0
for k, date_dict in editions.items():
d['edition%s' % nums[num_counter]] = k
if date_dict['start'] is not None:
d['start_e%s' % nums[num_counter]] = date_dict['start'].isoformat()
if date_dict['end'] is not None:
d['end_e%s' % nums[num_counter]] = date_dict['end'].isoformat()
num_counter += 1
return d | Take a reporter editions dict and flatten it, returning a dict for
use in the DictWriter. | entailment |
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Commands section
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
# Parse message
package = content.split(" ")
command = package[0][len(prefix):]
args = package[1:]
alias_steam = ["steam", "pc"]
alias_ps = ["ps", "psn", "playstation", "ps4", "playstation 4"]
alias_xbox = ["xbox", "xb", "xb1", "xbone", "xbox one", "xbox one"]
platform = "steam"
if len(args) > 0:
player_name = args[0]
else:
return
if len(args) > 1:
platform = ' '.join(args[1:]).lower()
if platform in alias_steam:
platform = "steam"
elif platform in alias_ps:
platform = "ps"
elif platform in alias_xbox:
platform = "xbox"
# Commands
if command == 'rlstats':
await client.send_typing(channel)
# Get Rocket League stats from stats API
success, rldata = api_rocketleaguestats.check_rank(player_name, platform)
# Create embed UI
if success:
embed = ui_embed.success(channel, rldata[0], rldata[1], rldata[2], rldata[3])
else:
embed = ui_embed.fail_api(channel)
await embed.send() | The on_message event handler for this module
Args:
message (discord.Message): Input message | entailment |
def update_keys(self):
"""Updates the Google API key with the text value"""
from ...main import add_api_key
add_api_key("google_api_key", self.google_api_key.get())
add_api_key("soundcloud_client_id", self.soundcloud_client_id.get()) | Updates the Google API key with the text value | entailment |
def modify_module(channel, module_name, module_state):
"""
Creates an embed UI containing the module modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
module_name (str): The name of the module that was updated
module_state (bool): The current state of the module
Returns:
embed: The created embed
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"{} updated".format(module_name),
"{} is now {}".format(module_name, "activated" if module_state else "deactivated"),
modulename=modulename
)
return gui | Creates an embed UI containing the module modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
module_name (str): The name of the module that was updated
module_state (bool): The current state of the module
Returns:
embed: The created embed | entailment |
def modify_prefix(channel, new_prefix):
"""
Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"Prefix updated",
"Modis prefix is now `{}`".format(new_prefix),
modulename=modulename
)
return gui | Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed | entailment |
def user_warning(channel, user, warnings, max_warnings):
"""
Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to warn
warnings (str): The warnings for the user
max_warnings (str): The maximum warnings for the user
Returns:
ui (ui_embed.UI): The embed UI object
"""
username = user.name
if isinstance(user, discord.Member):
if user.nick is not None:
username = user.nick
warning_count_text = "warnings" if warnings != 1 else "warning"
warning_text = "{} {}".format(warnings, warning_count_text)
result_text = "at {} you will be banned".format(max_warnings)
if warnings >= max_warnings:
result_text = "you are being banned because you have more than the maximum warnings"
# Create embed UI object
gui = ui_embed.UI(
channel,
"Warning {}".format(username),
"You now have {} {}, {}".format(warning_text, username, result_text),
modulename=modulename
)
return gui | Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to warn
warnings (str): The warnings for the user
max_warnings (str): The maximum warnings for the user
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
def user_ban(channel, user):
"""
Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to ban
Returns:
ui (ui_embed.UI): The embed UI object
"""
username = user.name
if isinstance(user, discord.Member):
if user.nick is not None:
username = user.nick
# Create embed UI object
gui = ui_embed.UI(
channel,
"Banned {}".format(username),
"{} has been banned from this server".format(username),
modulename=modulename
)
return gui | Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to ban
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
def warning_max_changed(channel, max_warnings):
"""
Creates an embed UI containing an error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
max_warnings (int): The new maximum warnings
Returns:
ui (ui_embed.UI): The embed UI object
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"Maximum Warnings Changed",
"Users must now have {} warnings to be banned "
"(this won't ban existing users with warnings)".format(max_warnings),
modulename=modulename
)
return gui | Creates an embed UI containing an error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
max_warnings (int): The new maximum warnings
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
def error(channel, title, description):
"""
Creates an embed UI containing an error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
title (str): The title of the embed
description (str): The description for the error
Returns:
ui (ui_embed.UI): The embed UI object
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
title,
description,
modulename=modulename
)
return gui | Creates an embed UI containing an error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
title (str): The title of the embed
description (str): The description for the error
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
async def update_server_data(server):
"""
Updates the server info for the given server
Args:
server: The Discord server to update info for
"""
data = datatools.get_data()
# Add the server to server data if it doesn't yet exist
send_welcome_message = False
if server.id not in data["discord"]["servers"]:
logger.debug("Adding new server to serverdata")
data["discord"]["servers"][server.id] = {"prefix": "!"}
if "mute_intro" not in data or not data["mute_intro"]:
send_welcome_message = True
# Make sure all modules are in the server
_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
_dir_modules = "{}/../".format(_dir)
for module_name in os.listdir(_dir_modules):
if module_name.startswith("_") or module_name.startswith("!"):
continue
if not os.path.isfile("{}/{}/_data.py".format(_dir_modules, module_name)):
logger.warning("No _data.py file found for module {}".format(module_name))
continue
try:
import_name = ".discord_modis.modules.{}.{}".format(module_name, "_data")
_data = importlib.import_module(import_name, "modis")
if _data.modulename not in data["discord"]["servers"][server.id]:
data["discord"]["servers"][server.id][_data.modulename] = _data.sd_structure
datatools.write_data(data)
except Exception as e:
logger.error("Could not initialise module {}".format(module_name))
logger.exception(e)
datatools.write_data(data)
# Send a welcome message now
if send_welcome_message:
default_channel = server.default_channel
if not default_channel:
for channel in server.channels:
if channel.name == "general":
default_channel = channel
break
if not default_channel:
for channel in server.channels:
if "general" in channel.name:
default_channel = channel
break
if not default_channel:
for channel in server.channels:
if channel.type == discord.ChannelType.text:
default_channel = channel
break
# Display a welcome message
if default_channel:
hello_message = "Hello! I'm Modis.\n\n" + \
"The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n" + \
"You can use `!help` to get help commands for all modules, " + \
"or {} me to get the server prefix and help commands.".format(server.me.mention)
await client.send_message(default_channel, hello_message) | Updates the server info for the given server
Args:
server: The Discord server to update info for | entailment |
def remove_server_data(server_id):
"""
Remove a server from the server data
Args:
server_id (int): The server to remove from the server data
"""
logger.debug("Removing server from serverdata")
# Remove the server from data
data = datatools.get_data()
if server_id in data["discord"]["servers"]:
data["discord"]["servers"].pop(server_id)
datatools.write_data(data) | Remove a server from the server data
Args:
server_id (int): The server to remove from the server data | entailment |
def check_all_servers():
"""Checks all servers, removing any that Modis isn't part of any more"""
data = datatools.get_data()
for server_id in data["discord"]["servers"]:
is_in_client = False
for client_server in client.servers:
if server_id == client_server.id:
is_in_client = True
break
if not is_in_client:
remove_server_data(server_id) | Checks all servers, removing any that Modis isn't part of any more | entailment |
def clear_modules(self):
"""Clears all modules from the list"""
for child in self.module_selection.winfo_children():
child.destroy()
self.clear_ui()
tk.Label(self.module_ui, text="Start Modis and select a module").grid(
column=0, row=0, padx=0, pady=0, sticky="W E N S")
if self.current_button is not None:
self.current_button.config(bg="white")
self.module_buttons = {}
self.current_button = None | Clears all modules from the list | entailment |
def add_module(self, module_name, module_ui):
"""
Adds a module to the list
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI
"""
m_button = tk.Label(self.module_selection, text=module_name, bg="white", anchor="w")
m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky="W E N S")
self.module_buttons[module_name] = m_button
m_button.bind("<Button-1>", lambda e: self.module_selected(module_name, module_ui)) | Adds a module to the list
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI | entailment |
def module_selected(self, module_name, module_ui):
"""
Called when a module is selected
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI
"""
if self.current_button == self.module_buttons[module_name]:
return
self.module_buttons[module_name].config(bg="#cacaca")
if self.current_button is not None:
self.current_button.config(bg="white")
self.current_button = self.module_buttons[module_name]
self.clear_ui()
try:
# Create the UI
module_ui_frame = ModuleUIBaseFrame(self.module_ui, module_name, module_ui)
module_ui_frame.grid(column=0, row=0, sticky="W E N S")
except Exception as e:
logger.error("Could not load UI for {}".format(module_name))
logger.exception(e)
# Create a error UI
tk.Label(self.module_ui, text="Could not load UI for {}".format(module_name)).grid(
column=0, row=0, padx=0, pady=0, sticky="W E N S") | Called when a module is selected
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI | entailment |
def toggle(self, discord_token, discord_client_id):
"""Toggles Modis on or off"""
if self.state == 'off':
self.start(discord_token, discord_client_id)
elif self.state == 'on':
self.stop() | Toggles Modis on or off | entailment |
def start(self, discord_token, discord_client_id):
"""Start Modis and log it into Discord."""
self.button_toggle_text.set("Stop Modis")
self.state = "on"
self.status_bar.set_status(1)
logger.info("----------------STARTING DISCORD MODIS----------------")
# Clear the module list
self.module_frame.clear_modules()
# Start Modis
from modis.discord_modis import main
logger.debug("Creating event loop")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.discord_thread = threading.Thread(
target=main.start,
args=[discord_token, discord_client_id, loop, self.on_ready])
logger.debug("Starting event loop")
self.discord_thread.start()
# Find module UIs
database_dir = "{}/modules".format(
os.path.dirname(os.path.realpath(__file__)))
for module_name in os.listdir(database_dir):
module_dir = "{}/{}".format(database_dir, module_name)
# Iterate through files in module
if os.path.isdir(module_dir) and not module_name.startswith("_"):
# Add all defined event handlers in module files
module_event_handlers = os.listdir(module_dir)
if "_ui.py" in module_event_handlers:
import_name = ".discord_modis.modules.{}.{}".format(
module_name, "_ui")
logger.debug(
"Found module UI file {}".format(import_name[23:]))
self.module_frame.add_module(module_name, importlib.import_module(import_name, "modis"))
else:
self.module_frame.add_module(module_name, None) | Start Modis and log it into Discord. | entailment |
def stop(self):
"""Stop Modis and log it out of Discord."""
self.button_toggle_text.set("Start Modis")
self.state = "off"
logger.info("Stopping Discord Modis")
from ._client import client
asyncio.run_coroutine_threadsafe(client.logout(), client.loop)
self.status_bar.set_status(0) | Stop Modis and log it out of Discord. | entailment |
def key_changed(self):
"""Checks if the key name and value fields have been set, and updates the add key button"""
if self.key_name.get() and self.key_val.get():
self.button_key_add.state(["!disabled"])
else:
self.button_key_add.state(["disabled"]) | Checks if the key name and value fields have been set, and updates the add key button | entailment |
def key_add(self):
"""Adds the current API key to the bot's data"""
from .main import add_api_key
add_api_key(self.key_name.get(), self.key_val.get())
# Clear the text fields
self.key_name.set("")
self.key_val.set("") | Adds the current API key to the bot's data | entailment |
def set_status(self, status):
"""
Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online
"""
text = ""
colour = "#FFFFFF"
if status == 0:
text = "OFFLINE"
colour = "#EF9A9A"
elif status == 1:
text = "STARTING"
colour = "#FFE082"
elif status == 2:
text = "ONLINE"
colour = "#A5D6A7"
self.status.set(text)
self.statusbar.config(background=colour) | Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online | entailment |
def get_help_data(filepath):
"""
Get the json data from a help file
Args:
filepath (str): The file path for the help file
Returns:
data: The json data from a help file
"""
try:
with open(filepath, 'r') as file:
return _json.load(file, object_pairs_hook=OrderedDict)
except Exception as e:
logger.error("Could not load file {}".format(filepath))
logger.exception(e)
return {} | Get the json data from a help file
Args:
filepath (str): The file path for the help file
Returns:
data: The json data from a help file | entailment |
def get_help_datapacks(filepath, prefix="!"):
"""
Load help text from a file and give it as datapacks
Args:
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands
Returns:
datapacks (list): The datapacks from the file
"""
help_contents = get_help_data(filepath)
datapacks = []
# Add the content
for d in help_contents:
heading = d
content = ""
if "commands" in d.lower():
for c in help_contents[d]:
if "name" not in c:
continue
content += "- `"
command = prefix + c["name"]
content += "{}".format(command)
if "params" in c:
for param in c["params"]:
content += " [{}]".format(param)
content += "`: "
if "description" in c:
content += c["description"]
content += "\n"
else:
content += help_contents[d]
datapacks.append((heading, content, False))
return datapacks | Load help text from a file and give it as datapacks
Args:
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands
Returns:
datapacks (list): The datapacks from the file | entailment |
def add_help_text(parent, filepath, prefix="!"):
"""
Load help text from a file and adds it to the parent
Args:
parent: A tk or ttk object
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands
"""
import tkinter as tk
import tkinter.ttk as ttk
help_contents = get_help_data(filepath)
text = tk.Text(parent, wrap='word', font=("Helvetica", 10))
text.grid(row=0, column=0, sticky="W E N S")
text.tag_config("heading", font=("Helvetica", 14))
text.tag_config("command", font=("Courier", 10))
text.tag_config("param", font=("Courier", 10))
text.tag_config("description")
# Vertical Scrollbar
scrollbar = ttk.Scrollbar(parent, orient="vertical", command=text.yview)
scrollbar.grid(column=1, row=0, sticky="N S")
text['yscrollcommand'] = scrollbar.set
# Add the content
for d in help_contents:
text.insert('end', d, "heading")
text.insert('end', '\n')
if "commands" in d.lower():
for c in help_contents[d]:
if "name" not in c:
continue
command = prefix + c["name"]
text.insert('end', command, ("command", "description"))
if "params" in c:
for param in c["params"]:
text.insert('end', " [{}]".format(param), ("param", "description"))
text.insert('end', ": ")
if "description" in c:
text.insert('end', c["description"], "description")
text.insert('end', '\n')
text.insert('end', '\n')
else:
text.insert('end', help_contents[d], "description")
text.insert('end', '\n\n')
text.config(state=tk.DISABLED) | Load help text from a file and adds it to the parent
Args:
parent: A tk or ttk object
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands | entailment |
async def on_reaction_add(reaction, user):
"""The on_message event handler for this module
Args:
reaction (discord.Reaction): Input reaction
user (discord.User): The user that added the reaction
"""
# Simplify reaction info
server = reaction.message.server
emoji = reaction.emoji
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Commands section
if user != reaction.message.channel.server.me:
if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed':
return
try:
valid_reaction = reaction.message.id == _data.cache[server.id].embed.sent_embed.id
except AttributeError:
pass
else:
if valid_reaction:
# Remove reaction
try:
await client.remove_reaction(reaction.message, emoji, user)
except discord.errors.NotFound:
pass
except discord.errors.Forbidden:
pass
# Commands
if emoji == "⏯":
await _data.cache[server.id].toggle()
if emoji == "⏹":
await _data.cache[server.id].stop()
if emoji == "⏭":
await _data.cache[server.id].skip("1")
if emoji == "⏮":
await _data.cache[server.id].rewind("1")
if emoji == "🔀":
await _data.cache[server.id].shuffle()
if emoji == "🔉":
await _data.cache[server.id].setvolume('-')
if emoji == "🔊":
await _data.cache[server.id].setvolume('+') | The on_message event handler for this module
Args:
reaction (discord.Reaction): Input reaction
user (discord.User): The user that added the reaction | entailment |
def console(discord_token, discord_client_id):
"""
Start Modis in console format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID
"""
state, response = datatools.get_compare_version()
logger.info("Starting Modis in console")
logger.info(response)
import threading
import asyncio
logger.debug("Loading packages")
from modis.discord_modis import main as discord_modis_console
from modis.reddit_modis import main as reddit_modis_console
from modis.facebook_modis import main as facebook_modis_console
# Create threads
logger.debug("Initiating threads")
loop = asyncio.get_event_loop()
discord_thread = threading.Thread(
target=discord_modis_console.start,
args=[discord_token, discord_client_id, loop])
reddit_thread = threading.Thread(
target=reddit_modis_console.start, args=[])
facebook_thread = threading.Thread(
target=facebook_modis_console.start, args=[])
# Run threads
logger.debug("Starting threads")
discord_thread.start()
reddit_thread.start()
facebook_thread.start()
logger.debug("Root startup completed") | Start Modis in console format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID | entailment |
def gui(discord_token, discord_client_id):
"""
Start Modis in gui format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID
"""
logger.info("Starting Modis in GUI")
import tkinter as tk
logger.debug("Loading packages")
from modis.discord_modis import gui as discord_modis_gui
from modis.reddit_modis import gui as reddit_modis_gui
from modis.facebook_modis import gui as facebook_modis_gui
logger.debug("Initialising window")
# Setup the root window
root = tk.Tk()
root.minsize(width=800, height=400)
root.geometry("800x600")
root.title("Modis Control Panel")
# Icon
root.iconbitmap(r"{}/assets/modis.ico".format(file_dir))
# Setup the notebook
"""notebook = ttk.Notebook(root)
notebook.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S")
# Configure stretch ratios
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
notebook.columnconfigure(0, weight=1)
notebook.rowconfigure(0, weight=1)
# Add tabs
logger.debug("Adding packages to window")
notebook.add(
discord_modis_gui.Frame(notebook, discord_token, discord_client_id),
text="Discord")
notebook.add(reddit_modis_gui.Frame(notebook), text="Reddit")
notebook.add(facebook_modis_gui.Frame(notebook), text="Facebook")"""
discord = discord_modis_gui.Frame(root, discord_token, discord_client_id)
discord.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S")
# Configure stretch ratios
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
discord.columnconfigure(0, weight=1)
discord.rowconfigure(0, weight=1)
logger.debug("GUI initialised")
# Run the window UI
root.mainloop() | Start Modis in gui format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID | entailment |
def write_data(data):
"""
Write the data to the data.json file
Args:
data (dict): The updated data dictionary for Modis
"""
sorted_dict = sort_recursive(data)
with open(_datafile, 'w') as file:
_json.dump(sorted_dict, file, indent=2) | Write the data to the data.json file
Args:
data (dict): The updated data dictionary for Modis | entailment |
def sort_recursive(data):
"""
Recursively sorts all elements in a dictionary
Args:
data (dict): The dictionary to sort
Returns:
sorted_dict (OrderedDict): The sorted data dict
"""
newdict = {}
for i in data.items():
if type(i[1]) is dict:
newdict[i[0]] = sort_recursive(i[1])
else:
newdict[i[0]] = i[1]
return OrderedDict(sorted(newdict.items(), key=lambda item: (compare_type(type(item[1])), item[0]))) | Recursively sorts all elements in a dictionary
Args:
data (dict): The dictionary to sort
Returns:
sorted_dict (OrderedDict): The sorted data dict | entailment |
def get_compare_version():
"""
Get the version comparison info.
Returns: (tuple)
state (int): -1 for lower version, 0 for same version, 1 for higher version than latest.
response (str): The response string.
"""
state, latest_version = compare_latest_version()
if state < 0:
return -1, "A new version of Modis is available (v{})".format(latest_version)
elif state == 0:
return 0, "You are running the latest version of Modis (v{})".format(version)
else:
return 1, "You are running a preview version of Modis (v{} pre-release)".format(version) | Get the version comparison info.
Returns: (tuple)
state (int): -1 for lower version, 0 for same version, 1 for higher version than latest.
response (str): The response string. | entailment |
def success(channel, stats, name, platform, dp):
"""Creates an embed UI containing the Rocket League stats
Args:
channel (discord.Channel): The Discord channel to bind the embed to
stats (tuple): Tuples of (field, value, percentile)
name (str): The name of the player
platform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox'
dp (str): URL to the player's dp
Returns:
(discord.Embed): The created embed
"""
# Create datapacks
datapacks = [("Platform", platform, False)]
for stat in stats:
# Add stats
if stat[0] in ("Duel 1v1", "Doubles 2v2", "Solo Standard 3v3", "Standard 3v3"):
stat_name = "__" + stat[0] + "__"
stat_value = "**" + stat[1] + "**"
else:
stat_name = stat[0]
stat_value = stat[1]
# Add percentile if it exists
if stat[2]:
stat_value += " *(Top " + stat[2] + "%)*"
datapacks.append((stat_name, stat_value, True))
# Create embed UI object
gui = ui_embed.UI(
channel,
"Rocket League Stats: {}".format(name),
"*Stats obtained from [Rocket League Tracker Network](https://rocketleague.tracker.network/)*",
modulename=modulename,
colour=0x0088FF,
thumbnail=dp,
datapacks=datapacks
)
return gui | Creates an embed UI containing the Rocket League stats
Args:
channel (discord.Channel): The Discord channel to bind the embed to
stats (tuple): Tuples of (field, value, percentile)
name (str): The name of the player
platform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox'
dp (str): URL to the player's dp
Returns:
(discord.Embed): The created embed | entailment |
def fail_steamid(channel):
"""Creates an embed UI for invalid SteamIDs
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"That SteamID doesn't exist.",
"You can get your SteamID by going to your profile page and looking at the url, "
"or you can set a custom ID by going to edit profile on your profile page.",
modulename=modulename,
colour=0x0088FF
)
return gui | Creates an embed UI for invalid SteamIDs
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
def fail_api(channel):
"""Creates an embed UI for when the API call didn't work
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"Couldn't get stats off RLTrackerNetwork.",
"Maybe the API changed, please tell Infraxion.",
modulename=modulename,
colour=0x0088FF
)
return gui | Creates an embed UI for when the API call didn't work
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Commands section
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
# Parse message
package = content.split(" ")
command = package[0][len(prefix):]
args = package[1:]
arg = ' '.join(args)
# Commands
if command == 'hex':
await client.send_typing(channel)
# Parse message
hex_strs = api_hexconvert.convert_hex_value(arg)
# Create embed UI
if len(hex_strs) > 0:
for hex_str in hex_strs:
image_url = convert_hex_to_url(hex_str)
embed = ui_embed.success(channel, image_url, hex_str)
await embed.send()
else:
embed = ui_embed.fail_api(channel)
await embed.send()
else:
# Parse message
hex_strs = api_hexconvert.convert_hex_value(content)
# Create embed UI
if len(hex_strs) > 0:
for hex_str in hex_strs:
await client.send_typing(channel)
image_url = convert_hex_to_url(hex_str)
embed = ui_embed.success(channel, image_url, hex_str)
await embed.send() | The on_message event handler for this module
Args:
message (discord.Message): Input message | entailment |
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Commands section
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
# Parse message
package = content.split(" ")
command = package[0][len(prefix):]
# Commands
if command == 'gamedeals':
await client.send_typing(channel)
# Get posts from Reddit API
posts = api_reddit.get_top10()
if posts:
for post in posts:
# Create embed UI
embed = ui_embed.success(channel, post)
await embed.send()
else:
embed = ui_embed.no_results(channel)
await embed.send() | The on_message event handler for this module
Args:
message (discord.Message): Input message | entailment |
def success(channel, post):
"""Creates an embed UI containing the Reddit posts
Args:
channel (discord.Channel): The Discord channel to bind the embed to
post (tuple): Tuples of (field, value, percentile)
Returns:
"""
# Create datapacks
datapacks = [("Game", post[0], True), ("Upvotes", post[2], True)]
# Create embed UI object
gui = ui_embed.UI(
channel,
"Link",
post[1],
modulename=modulename,
colour=0xFF8800,
thumbnail=post[1],
datapacks=datapacks
)
return gui | Creates an embed UI containing the Reddit posts
Args:
channel (discord.Channel): The Discord channel to bind the embed to
post (tuple): Tuples of (field, value, percentile)
Returns: | entailment |
def no_results(channel):
"""Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"No results",
":c",
modulename=modulename,
colour=0xFF8800
)
return gui | Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object | entailment |
def make_timebar(progress=0, duration=0):
"""
Makes a new time bar string
Args:
progress: How far through the current song we are (in seconds)
duration: The duration of the current song (in seconds)
Returns:
timebar (str): The time bar string
"""
duration_string = api_music.duration_to_string(duration)
if duration <= 0:
return "---"
time_counts = int(round((progress / duration) * TIMEBAR_LENGTH))
if time_counts > TIMEBAR_LENGTH:
time_counts = TIMEBAR_LENGTH
if duration > 0:
bar = "│" + (TIMEBAR_PCHAR * time_counts) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts)) + "│"
time_bar = "{} {}".format(bar, duration_string)
else:
time_bar = duration_string
return time_bar | Makes a new time bar string
Args:
progress: How far through the current song we are (in seconds)
duration: The duration of the current song (in seconds)
Returns:
timebar (str): The time bar string | entailment |
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Only reply to mentions
if channel.server.me in message.mentions:
logger.info("Bot was mentioned, summoning Mitsuku")
await client.send_typing(channel)
# Get new botcust2 from Mitsuku if does not exist for channel in serverdata
if channel.id not in data["discord"]["servers"][server.id][_data.modulename]["channels"]:
new_serverdata = data
new_serverdata["discord"]["servers"][server.id][_data.modulename]["channels"][channel.id] = \
api_mitsuku.get_botcust2()
datatools.write_data(new_serverdata)
# Get botcust2 from serverdata
botcust2 = data["discord"]["servers"][server.id][_data.modulename]["channels"][channel.id]
# Remove mention from message content so Mitsuku doesn't see it
content = content.replace("<@{}>".format(str(channel.server.me.id)), ' ')
content = content.replace("<@!{}>".format(str(channel.server.me.id)), ' ')
# Send Mitsuku's reply
if botcust2:
response = api_mitsuku.query(botcust2, content)
if response:
await client.send_message(channel, response)
else:
await client.send_message(channel, "```Couldn't get readable response from Mitsuku.```")
else:
await client.send_message(channel, "```Couldn't initialise with Mitsuku.```") | The on_message event handler for this module
Args:
message (discord.Message): Input message | entailment |
def build(self):
"""Builds Discord embed GUI
Returns:
discord.Embed: Built GUI
"""
if self.colour:
embed = discord.Embed(
title=self.title,
type='rich',
description=self.description,
colour=self.colour)
else:
embed = discord.Embed(
title=self.title,
type='rich',
description=self.description)
if self.thumbnail:
embed.set_thumbnail(url=self.thumbnail)
if self.image:
embed.set_image(url=self.image)
embed.set_author(
name="Modis",
url="https://musicbyango.com/modis/",
icon_url="http://musicbyango.com/modis/dp/modis64t.png")
for pack in self.datapacks:
embed.add_field(
name=pack[0],
value=pack[1],
inline=pack[2]
)
return embed | Builds Discord embed GUI
Returns:
discord.Embed: Built GUI | entailment |
async def send(self):
"""Send new GUI"""
await client.send_typing(self.channel)
self.sent_embed = await client.send_message(self.channel, embed=self.built_embed) | Send new GUI | entailment |
def update_data(self, index, data):
"""Updates a particular datapack's data
Args:
index (int): The index of the datapack
data (str): The new value to set for this datapack
"""
datapack = self.built_embed.to_dict()["fields"][index]
self.built_embed.set_field_at(index, name=datapack["name"], value=data, inline=datapack["inline"]) | Updates a particular datapack's data
Args:
index (int): The index of the datapack
data (str): The new value to set for this datapack | entailment |
def suck_out_variations_only(reporters):
"""Builds a dictionary of variations to canonical reporters.
The dictionary takes the form of:
{
"A. 2d": ["A.2d"],
...
"P.R.": ["Pen. & W.", "P.R.R.", "P."],
}
In other words, it's a dictionary that maps each variation to a list of
reporters that it could be possibly referring to.
"""
variations_out = {}
for reporter_key, data_list in reporters.items():
# For each reporter key...
for data in data_list:
# For each book it maps to...
for variation_key, variation_value in data["variations"].items():
try:
variations_list = variations_out[variation_key]
if variation_value not in variations_list:
variations_list.append(variation_value)
except KeyError:
# The item wasn't there; add it.
variations_out[variation_key] = [variation_value]
return variations_out | Builds a dictionary of variations to canonical reporters.
The dictionary takes the form of:
{
"A. 2d": ["A.2d"],
...
"P.R.": ["Pen. & W.", "P.R.R.", "P."],
}
In other words, it's a dictionary that maps each variation to a list of
reporters that it could be possibly referring to. | entailment |
def suck_out_editions(reporters):
"""Builds a dictionary mapping edition keys to their root name.
The dictionary takes the form of:
{
"A.": "A.",
"A.2d": "A.",
"A.3d": "A.",
"A.D.": "A.D.",
...
}
In other words, this lets you go from an edition match to its parent key.
"""
editions_out = {}
for reporter_key, data_list in reporters.items():
# For each reporter key...
for data in data_list:
# For each book it maps to...
for edition_key, edition_value in data["editions"].items():
try:
editions_out[edition_key]
except KeyError:
# The item wasn't there; add it.
editions_out[edition_key] = reporter_key
return editions_out | Builds a dictionary mapping edition keys to their root name.
The dictionary takes the form of:
{
"A.": "A.",
"A.2d": "A.",
"A.3d": "A.",
"A.D.": "A.D.",
...
}
In other words, this lets you go from an edition match to its parent key. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.