signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def plot_feedback_params(hw_major_version, max_resistor_readings,<EOL>feedback_params, axis=None):
R1 = <NUM_LIT><EOL>if axis is None:<EOL><INDENT>fig = plt.figure()<EOL>axis = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>markers = MarkerStyle.filled_markers<EOL>def plot_resistor_params(args):<EOL><INDENT>resistor_index, x = args<EOL>try:<EOL><INDENT>color = next(axis._get_lines.color_cycle)<EOL><DEDENT>except: <EOL><INDENT>color = axis._get_lines.prop_cycler.next()['<STR_LIT>']<EOL><DEDENT>F = feedback_params.loc[resistor_index]<EOL>values = np.empty_like(x['<STR_LIT>'])<EOL>values[:] = compute_from_transfer_function(hw_major_version, '<STR_LIT>',<EOL>V1=<NUM_LIT:1.>, R1=R1,<EOL>R2=F['<STR_LIT>'],<EOL>C2=F['<STR_LIT>'],<EOL>f=x['<STR_LIT>'])<EOL>axis.loglog(x['<STR_LIT>'], values, color=color, linestyle='<STR_LIT>',<EOL>label='<STR_LIT>' % resistor_index)<EOL>values[:] = compute_from_transfer_function(hw_major_version, '<STR_LIT>',<EOL>V1=<NUM_LIT:1.>, R1=R1,<EOL>R2=F['<STR_LIT>'],<EOL>C2=F['<STR_LIT>'],<EOL>f=x['<STR_LIT>'])<EOL>axis.loglog(x['<STR_LIT>'], values, color=color, linestyle='<STR_LIT:->',<EOL>alpha=<NUM_LIT>, label='<STR_LIT>' % resistor_index)<EOL>attenuation = x['<STR_LIT>'] / x['<STR_LIT>']<EOL>axis.plot(x['<STR_LIT>'], attenuation, color='<STR_LIT:none>',<EOL>marker=markers[resistor_index % len(markers)],<EOL>label='<STR_LIT>' % resistor_index,<EOL>linestyle='<STR_LIT:none>', markeredgecolor=color, markeredgewidth=<NUM_LIT:2>,<EOL>markersize=<NUM_LIT:8>)<EOL>return <NUM_LIT:0><EOL><DEDENT>list(map(plot_resistor_params, max_resistor_readings.groupby('<STR_LIT>')))<EOL>legend = axis.legend(ncol=<NUM_LIT:3>)<EOL>legend.draw_frame(False)<EOL>axis.set_xlabel('<STR_LIT>')<EOL>axis.set_ylabel(r'<STR_LIT>'<EOL>r'<STR_LIT>', fontsize=<NUM_LIT>)<EOL>
Plot the effective attenuation _(i.e., gain less than 1)_ of the control board measurements of high-voltage AC input according to: - AC signal frequency. - feedback resistor used _(varies based on amplitude of AC signal)_. Each high-voltage feedback resistor (unintentionally) forms a low-pass filter, resulting in attenuation of the voltage measured on the control board. The plot generated by this function plots each of the following trends for each feedback resistor: - Oscilloscope measurements. - Previous model of attenuation. - Newly fitted model of attenuation, based on oscilloscope readings.
f7275:m4
def update_control_board_calibration(control_board, fitted_params):
<EOL>control_board.a0_series_resistance = fitted_params['<STR_LIT>'].values<EOL>control_board.a0_series_capacitance = fitted_params['<STR_LIT>'].values<EOL>
Update the control board with the specified fitted parameters.
f7275:m5
def savgol_filter(x, window_length, polyorder, deriv=<NUM_LIT:0>, delta=<NUM_LIT:1.0>, axis=-<NUM_LIT:1>, mode='<STR_LIT>', cval=<NUM_LIT:0.0>):
<EOL>x = np.ma.masked_invalid(pd.Series(x).interpolate())<EOL>try:<EOL><INDENT>ind = np.isfinite(x).nonzero()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>x[ind:] = signal.savgol_filter(x[ind:], window_length, polyorder, deriv,<EOL>delta, axis, mode, cval)<EOL><DEDENT>except IndexError:<EOL><INDENT>pass<EOL><DEDENT>return np.ma.masked_invalid(x)<EOL>
Wrapper for the scipy.signal.savgol_filter function that handles Nan values. See: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/3 Returns ------- y : ndarray, same shape as `x` The filtered data.
f7276:m0
def serial_ports():
vid_pids = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>return sd.comports(vid_pid=vid_pids, include_all=True)<EOL>
Returns ------- pandas.DataFrame Table of serial ports that match the USB vendor ID and product IDs for Arduino Mega2560 (from `official Arduino Windows driver`_). .. official Arduino Windows driver: https://github.com/arduino/Arduino/blob/27d1b8d9a190469e185af7484b52cc5884e7d731/build/windows/dist/drivers/arduino.inf#L95-L98
f7276:m1
def feedback_results_to_measurements_frame(feedback_result):
index = pd.Index(feedback_result.time * <NUM_LIT>, name='<STR_LIT>')<EOL>df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_fb,<EOL>feedback_result.V_hv,<EOL>feedback_result.fb_resistor,<EOL>feedback_result.hv_resistor]),<EOL>columns=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'],<EOL>index=index)<EOL>df_feedback.insert(<NUM_LIT:0>, '<STR_LIT>', feedback_result.frequency)<EOL>return df_feedback<EOL>
Extract measured data from `FeedbackResults` instance into `pandas.DataFrame`.
f7276:m2
def feedback_results_to_impedance_frame(feedback_result):
index = pd.Index(feedback_result.time * <NUM_LIT>, name='<STR_LIT>')<EOL>df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_actuation()<EOL>.filled(np.NaN),<EOL>feedback_result.capacitance()<EOL>.filled(np.NaN),<EOL>feedback_result.Z_device()<EOL>.filled(np.NaN)]),<EOL>columns=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'],<EOL>index=index)<EOL>df_feedback.insert(<NUM_LIT:0>, '<STR_LIT>', feedback_result.frequency)<EOL>df_feedback.insert(<NUM_LIT:1>, '<STR_LIT>', feedback_result.voltage)<EOL>return df_feedback<EOL>
Extract computed impedance data from `FeedbackResults` instance into `pandas.DataFrame`.
f7276:m3
def get_sketch_directory():
return os.path.join(package_path(), '<STR_LIT:src>', '<STR_LIT>')<EOL>
Return directory containing the `dmf_control_board` Arduino sketch.
f7276:m5
def get_includes():
return [get_sketch_directory()]<EOL>
Return directories containing the `dmf_control_board` Arduino header files. Modules that need to compile against `dmf_control_boad` should use this function to locate the appropriate include directories. Notes ===== For example: import dmf_control_board ... print ' '.join(['-I%s' % i for i in dmf_control_board.get_includes()]) ...
f7276:m6
def get_firmwares():
return OrderedDict([(board_dir.name, [f.abspath() for f in<EOL>board_dir.walkfiles('<STR_LIT>')])<EOL>for board_dir in<EOL>package_path().joinpath('<STR_LIT>').dirs()])<EOL>
Return `dmf_control_board` compiled Arduino hex file paths. This function may be used to locate firmware binaries that are available for flashing to [Arduino Mega2560][1] boards. [1]: http://arduino.cc/en/Main/arduinoBoardMega2560
f7276:m7
def get_sources():
return get_sketch_directory().files('<STR_LIT>')<EOL>
Return `dmf_control_board` Arduino source file paths. Modules that need to compile against `dmf_control_board` should use this function to locate the appropriate source files to compile. Notes ===== For example: import dmf_control_board ... print ' '.join(dmf_control_board.get_sources()) ...
f7276:m8
def safe_getattr(obj, attr, except_types):
try:<EOL><INDENT>return getattr(obj, attr, None)<EOL><DEDENT>except except_types:<EOL><INDENT>return None<EOL><DEDENT>
Execute `getattr` to retrieve the specified attribute from the provided object, returning a default value of `None` in the case where the attribute does not exist. In the case where an exception occurs during the `getattr` call, if the exception type is in `except_types`, ignore the exception and return `None`.
f7276:m9
@decorator.decorator<EOL>def safe_series_resistor_index_read(f, self, channel, resistor_index=None):
if resistor_index is not None:<EOL><INDENT>original_resistor_index = self.series_resistor_index(channel)<EOL>if resistor_index != original_resistor_index:<EOL><INDENT>self.set_series_resistor_index(channel, resistor_index)<EOL><DEDENT><DEDENT>value = f(self, channel)<EOL>if (resistor_index is not None and<EOL>resistor_index != original_resistor_index):<EOL><INDENT>self.set_series_resistor_index(channel, original_resistor_index)<EOL><DEDENT>return value<EOL>
This decorator checks the resistor-index from the current context _(i.e., the result of `self.series_resistor_index`)_. If the resistor-index specified by the `resistor_index` keyword argument is different than the current context value, the series-resistor-index is temporarily set to the value of `resistor_index` to execute the wrapped function before restoring back to the original value.
f7276:m10
@decorator.decorator<EOL>def safe_series_resistor_index_write(f, self, channel, value,<EOL>resistor_index=None):
if resistor_index is not None:<EOL><INDENT>original_resistor_index = self.series_resistor_index(channel)<EOL>if resistor_index != original_resistor_index:<EOL><INDENT>self.set_series_resistor_index(channel, resistor_index)<EOL><DEDENT><DEDENT>value = f(self, channel, value)<EOL>if (resistor_index is not None and<EOL>resistor_index != original_resistor_index):<EOL><INDENT>self.set_series_resistor_index(channel, original_resistor_index)<EOL><DEDENT>return value<EOL>
This decorator checks the resistor-index from the current context _(i.e., the result of `self.series_resistor_index`)_. If the resistor-index specified by the `resistor_index` keyword argument is different than the current context value, the series-resistor-index is temporarily set to the value of `resistor_index` to execute the wrapped function before restoring back to the original value.
f7276:m11
@decorator.decorator<EOL>def remote_command(function, self, *args, **kwargs):
try:<EOL><INDENT>return function(self, *args, **kwargs)<EOL><DEDENT>except RuntimeError as exception:<EOL><INDENT>error_message = str(exception)<EOL>match = CRE_REMOTE_ERROR.match(error_message)<EOL>if match:<EOL><INDENT>command_code = int(match.group('<STR_LIT>'))<EOL>return_code = int(match.group('<STR_LIT>'))<EOL>raise FirmwareError(command_code, return_code)<EOL><DEDENT>match = CRE_REMOTE_COMMAND_ERROR.match(error_message)<EOL>if match:<EOL><INDENT>command_code = int(match.group('<STR_LIT>'))<EOL>command_name = NAMES_BY_COMMAND_CODE[command_code]<EOL>raise RuntimeError(CRE_REMOTE_COMMAND_ERROR.sub(command_name,<EOL>error_message))<EOL><DEDENT>raise<EOL><DEDENT>
Catch `RuntimeError` exceptions raised by remote control board firmware commands and re-raise as more specific `FirmwareError` exception type, which includes command code and return code.
f7276:m12
def _upgrade(self):
logging.debug('<STR_LIT>')<EOL>if hasattr(self, '<STR_LIT:version>'):<EOL><INDENT>version = Version.fromstring(self.version)<EOL><DEDENT>else:<EOL><INDENT>version = Version(<NUM_LIT:0>)<EOL><DEDENT>logging.debug('<STR_LIT>' %<EOL>(str(version), self.class_version))<EOL>if version > Version.fromstring(self.class_version):<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>raise FutureVersionError(Version.fromstring(self.class_version),<EOL>version)<EOL><DEDENT>elif version < Version.fromstring(self.class_version):<EOL><INDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:1>):<EOL><INDENT>self.calibration = FeedbackCalibration()<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:2>):<EOL><INDENT>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:2>))<EOL>self.fb_resistor[self.V_fb > <NUM_LIT:5>] = -<NUM_LIT:1><EOL>self.hv_resistor[self.V_hv > <NUM_LIT:5>] = -<NUM_LIT:1><EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:3>):<EOL><INDENT>self.attempt = <NUM_LIT:0><EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:4>):<EOL><INDENT>del self.sampling_time_ms<EOL>del self.delay_between_samples_ms<EOL>self.voltage = self.options.voltage<EOL>del self.options<EOL>del self.attempt<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:5>):<EOL><INDENT>self.area = <NUM_LIT:0><EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:5>))<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:6>):<EOL><INDENT>self.amplifier_gain = None<EOL>self.vgnd_hv = None<EOL>self.vgnd_fb = None<EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:6>))<EOL>logging.info('<STR_LIT>' %<EOL>self.version)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>
Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software.
f7276:c3:m2
def V_total(self):
ind = mlab.find(self.hv_resistor >= <NUM_LIT:0>)<EOL>V1 = np.empty(self.hv_resistor.shape)<EOL>V1.fill(np.nan)<EOL>V1[ind] = compute_from_transfer_function(self.calibration.hw_version<EOL>.major, '<STR_LIT>',<EOL>V2=self.V_hv[ind], R1=<NUM_LIT>,<EOL>R2=self.calibration.R_hv<EOL>[self.hv_resistor[ind]],<EOL>C2=self.calibration.C_hv<EOL>[self.hv_resistor[ind]],<EOL>f=self.frequency)<EOL>V1 = np.ma.masked_invalid(pd.Series(V1, pd.to_datetime(self.time, unit='<STR_LIT:s>')<EOL>).interpolate(method='<STR_LIT:time>').values)<EOL>V1.fill_value = np.nan<EOL>V1.data[V1.mask] = V1.fill_value<EOL>return V1<EOL>
Compute the input voltage (i.e., ``V1``) based on the measured high-voltage feedback values for ``V2``, using the high-voltage transfer function. See also -------- :meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled.
f7276:c3:m5
def V_actuation(self):
if self.calibration.hw_version.major == <NUM_LIT:1>:<EOL><INDENT>return self.V_total() - np.array(self.V_fb)<EOL><DEDENT>else:<EOL><INDENT>return self.V_total()<EOL><DEDENT>
Return the voltage drop across the device (i.e., the ``Z1`` load) for each feedback measurement. Consider the feedback circuit diagrams below for the feedback measurement circuits of the two the control board hardware versions. .. code-block:: none # Hardware V1 # # Hardware V2 # V_1 @ frequency V_1 @ frequency ┬ ┯ ┯ │ ┌─┴─┐ ┌─┴─┐ ┌───┐ V_actuation │ │Z_1│ │Z_1│ ┌─┤Z_2├─┐ │ └─┬─┘ └─┬─┘ │ └───┘ │ ┴ ├───O V_2 │ │ │\ ├───O V_2 ┌─┴─┐ └────┴──│-\__│ │Z_2│ ┌──│+/ └─┬─┘ │ │/ ═╧═ │ ¯ ═╧═ ¯ Note that in the case of **hardware version 1**, the input voltage ``V1`` is divided across ``Z1`` *and* the feedback measurement load ``Z2``. Therefore, the effective *actuation* voltage across the DMF device is less than ``V1``. Specifically, the effective *actuation* voltage is ``V1 - V2``. In **hardware version 2**, since the positive terminal of the op-amp is attached to *(virtual)* ground, the negative op-amp terminal is also at ground potential. It follows that the actuation voltage is equal to ``V1`` on **hardware version 2**.
f7276:c3:m6
def Z_device(self, filter_order=None, window_size=None, tol=<NUM_LIT>):
ind = mlab.find(self.fb_resistor >= <NUM_LIT:0>)<EOL>Z1 = np.empty(self.fb_resistor.shape)<EOL>Z1.fill(np.nan)<EOL>Z1 = np.ma.masked_invalid(Z1)<EOL>R2 = self.calibration.R_fb[self.fb_resistor[ind]]<EOL>C2 = self.calibration.C_fb[self.fb_resistor[ind]]<EOL>Z1[ind] = compute_from_transfer_function(self.calibration.hw_version<EOL>.major, '<STR_LIT>',<EOL>V1=self.V_total()[ind],<EOL>V2=self.V_fb[ind], R2=R2,<EOL>C2=C2, f=self.frequency)<EOL>Z1 = np.ma.masked_invalid(pd.Series(Z1, pd.to_datetime(self.time, unit='<STR_LIT:s>')<EOL>).interpolate(method='<STR_LIT:time>').values)<EOL>Z1.fill_value = np.nan<EOL>Z1.data[Z1.mask] = Z1.fill_value<EOL>if filter_order and window_size is None:<EOL><INDENT>window_size = self._get_window_size(tol)<EOL><DEDENT>if (filter_order is None or window_size is None or window_size < filter_order + <NUM_LIT:2>):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if window_size and window_size < len(Z1) / <NUM_LIT:2>:<EOL><INDENT>with warnings.catch_warnings():<EOL><INDENT>warnings.simplefilter("<STR_LIT:ignore>")<EOL>Z1 = savgol_filter(Z1, window_size, filter_order)<EOL><DEDENT><DEDENT>else: <EOL><INDENT>result = self.mean_velocity(tol=tol)<EOL>if result['<STR_LIT>'] andresult['<STR_LIT>'] > <NUM_LIT:0.1> * self.time[-<NUM_LIT:1>] and result['<STR_LIT:p>'][<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>if self.calibration._c_drop:<EOL><INDENT>c_drop = self.calibration.c_drop(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_drop = self.capacitance()[-<NUM_LIT:1>] / self.area<EOL><DEDENT>if self.calibration._c_filler:<EOL><INDENT>c_filler = self.calibration.c_filler(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_filler = <NUM_LIT:0><EOL><DEDENT>x = result['<STR_LIT:p>'][<NUM_LIT:0>]*self.time + result['<STR_LIT:p>'][<NUM_LIT:1>]<EOL>C = self.area * (x * (c_drop - c_filler) /np.sqrt(self.area) + c_filler)<EOL>Z1 = <NUM_LIT:1.0> / (<NUM_LIT> * math.pi * self.frequency * C)<EOL>Z1[mlab.find(self.time==result['<STR_LIT>'])[<NUM_LIT:0>]+<NUM_LIT:1>:] =Z1[mlab.find(self.time==result['<STR_LIT>'])[<NUM_LIT:0>]]<EOL><DEDENT>else:<EOL><INDENT>Z1 = np.mean(Z1)*np.ones(Z1.shape)<EOL><DEDENT><DEDENT><DEDENT>return Z1<EOL>
Compute the impedance *(including resistive and capacitive load)* of the DMF device *(i.e., dielectric and droplet)*. See :func:`calibrate.compute_from_transfer_function` for details.
f7276:c3:m7
def force(self, Ly=None):
if self.calibration._c_drop:<EOL><INDENT>c_drop = self.calibration.c_drop(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_drop = self.capacitance()[-<NUM_LIT:1>] / self.area<EOL><DEDENT>if self.calibration._c_filler:<EOL><INDENT>c_filler = self.calibration.c_filler(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_filler = <NUM_LIT:0><EOL><DEDENT>if Ly is None:<EOL><INDENT>Ly = np.sqrt(self.area)<EOL><DEDENT>return <NUM_LIT> * Ly * <NUM_LIT:0.5> * (c_drop - c_filler) * self.V_actuation()**<NUM_LIT:2><EOL>
Estimate the applied force (in Newtons) on a drop according to the electromechanical model [1]. Ly is the length of the actuated electrode along the y-axis (perpendicular to the direction of motion) in milimeters. By default, use the square root of the actuated electrode area, i.e., Ly=Lx=sqrt(Area) To get the force normalized by electrode width (i.e., in units of N/mm), set Ly=1.0. 1. Chatterjee et al., "Electromechanical model for actuating liquids in a two-plate droplet microfluidic device," Lab on a Chip, no. 9 (2009): 1219-1229.
f7276:c3:m8
def capacitance(self, filter_order=None, window_size=None, tol=<NUM_LIT>):
C = np.ma.masked_invalid(<NUM_LIT:1.0> / (<NUM_LIT> * math.pi * self.frequency *<EOL>self.Z_device(filter_order=filter_order,<EOL>window_size=window_size, tol=tol)))<EOL>C.fill_value = np.nan<EOL>C.data[C.mask] = C.fill_value<EOL>return C<EOL>
Compute the capacitance of the DMF device _(i.e., dielectric and droplet)_ based on the computed impedance value. Note: this assumes impedance is purely capacitive load. TODO: Is this assumption ok?
f7276:c3:m10
def x_position(self, filter_order=None, window_size=None, tol=<NUM_LIT>,<EOL>Lx=None):
if self.calibration._c_drop:<EOL><INDENT>c_drop = self.calibration.c_drop(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_drop = self.capacitance()[-<NUM_LIT:1>] / self.area<EOL><DEDENT>if self.calibration._c_filler:<EOL><INDENT>c_filler = self.calibration.c_filler(self.frequency)<EOL><DEDENT>else:<EOL><INDENT>c_filler = <NUM_LIT:0><EOL><DEDENT>if Lx is None:<EOL><INDENT>Lx = np.sqrt(self.area)<EOL><DEDENT>return (self.capacitance(filter_order=filter_order,<EOL>window_size=window_size, tol=tol) / self.area- c_filler) / (c_drop - c_filler) * Lx<EOL>
Calculate $x$-position according to: __ | C | ╲╱ a ⋅ | - - c_f | | a | x = ────────────── c_d - c_f where: - $C$ is the measured capacitance. - $c_f$ is the capacitance of the filler medium per unit area _(e.g., air)_. - $c_d$ is the capacitance of an electrode completely covered in liquid per unit area. - $a$ is the area of the actuated electrode(s). Note that this equation for $x$ assumes a single drop moving across an electrode with a length along the x-axis of Lx. If no value is provided for Lx, the electrode is assumed to be square, i.e., Lx=Ly=sqrt(area)
f7276:c3:m11
def mean_velocity(self, tol=<NUM_LIT>, Lx=None):
dx = None<EOL>dt = None<EOL>p = None<EOL>ind = None<EOL>t_end = None<EOL>if self.area == <NUM_LIT:0>:<EOL><INDENT>return dict(dx=dx, dt=dt, p=p, ind=ind, t_end=t_end)<EOL><DEDENT>x = self.x_position(Lx=Lx)<EOL>ind_start = mlab.find(x.mask==False)[<NUM_LIT:0>]<EOL>ind_last = mlab.find(x.mask==False)[-<NUM_LIT:1>]<EOL>if x[ind_start] > (<NUM_LIT:1> - tol) * x[ind_last] or x[ind_last] < <NUM_LIT:0>:<EOL><INDENT>ind_stop = ind_last<EOL><DEDENT>else: <EOL><INDENT>ind_stop = mlab.find(x > (<NUM_LIT:1> - tol) * x[ind_last])[<NUM_LIT:0>]<EOL><DEDENT>ind = [ind_start, ind_stop]<EOL>if len(ind) >=<NUM_LIT:2>:<EOL><INDENT>dx = np.diff(x[ind])[<NUM_LIT:0>]<EOL>dt = np.diff(self.time[ind])[<NUM_LIT:0>] <EOL>with warnings.catch_warnings():<EOL><INDENT>warnings.simplefilter("<STR_LIT:ignore>")<EOL>p = np.polyfit(self.time[ind[<NUM_LIT:0>]:ind[<NUM_LIT:1>]], x[ind[<NUM_LIT:0>]:ind[<NUM_LIT:1>]], <NUM_LIT:1>)<EOL><DEDENT>ind_stop = mlab.find(self.time >(x[ind_last] - p[<NUM_LIT:1>]) / p[<NUM_LIT:0>])<EOL>if len(ind_stop):<EOL><INDENT>t_end = self.time[ind_stop[<NUM_LIT:0>]]<EOL><DEDENT>else:<EOL><INDENT>t_end = self.time[-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return dict(dx=dx, dt=dt, p=p, ind=ind, t_end=t_end)<EOL>
Calculate the mean velocity for a step (mm/ms which is equivalent to m/s). Fit a line to the capacitance data and get the slope.
f7276:c3:m12
def to_frame(self, filter_order=<NUM_LIT:3>):
window_size = self._get_window_size()<EOL>L = np.sqrt(self.area)<EOL>velocity_results = self.mean_velocity(Lx=L)<EOL>mean_velocity = None<EOL>peak_velocity = None<EOL>dx = <NUM_LIT:0><EOL>dt = <NUM_LIT:0><EOL>dxdt = np.zeros(len(self.time))<EOL>dxdt_filtered = np.zeros(len(self.time))<EOL>if filter_order and window_size and window_size < filter_order + <NUM_LIT:2>:<EOL><INDENT>filter_order = None<EOL><DEDENT>if velocity_results and velocity_results['<STR_LIT>']:<EOL><INDENT>mean_velocity = velocity_results['<STR_LIT:p>'][<NUM_LIT:0>] * <NUM_LIT><EOL>dx = velocity_results['<STR_LIT>']<EOL>dt = velocity_results['<STR_LIT>'] * <NUM_LIT> <EOL>t, dxdt = self.dxdt(Lx=L)<EOL>dxdt = np.interp(self.time,<EOL>t, dxdt) * <NUM_LIT> <EOL>dxdt = np.ma.masked_invalid(dxdt)<EOL>t, dxdt_filtered = self.dxdt(filter_order=filter_order, Lx=L)<EOL>dxdt_filtered = np.interp(self.time,<EOL>t, dxdt_filtered) * <NUM_LIT> <EOL>dxdt_filtered = np.ma.masked_invalid(dxdt_filtered)<EOL>peak_velocity = np.max(dxdt_filtered)<EOL><DEDENT>index = pd.Index(self.time * <NUM_LIT>, name='<STR_LIT>')<EOL>df = pd.DataFrame({'<STR_LIT>': self.voltage, <EOL>'<STR_LIT>': self.V_actuation(), <EOL>'<STR_LIT>': self.force(Ly=<NUM_LIT:1.0>) * <NUM_LIT>, <EOL>'<STR_LIT>': self.Z_device(filter_order=filter_order), <EOL>'<STR_LIT>': self.capacitance(filter_order=filter_order), <EOL>'<STR_LIT>': self.x_position(filter_order=filter_order), <EOL>'<STR_LIT>': dxdt_filtered, <EOL>'<STR_LIT>': self.Z_device(), <EOL>'<STR_LIT>': self.capacitance(), <EOL>'<STR_LIT>': self.x_position(), <EOL>'<STR_LIT>': dxdt, <EOL>}, index=index)<EOL>df['<STR_LIT>'] = self.frequency<EOL>df['<STR_LIT>'] = self.area <EOL>df['<STR_LIT>'] = dx <EOL>df['<STR_LIT>'] = dt <EOL>df['<STR_LIT>'] = mean_velocity <EOL>df['<STR_LIT>'] = peak_velocity <EOL>df['<STR_LIT>'] = window_size<EOL>df['<STR_LIT>'] = filter_order<EOL>return df[['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']]<EOL>
Convert data to a `pandas.DataFrame`. Parameters ---------- filter_order : int Filter order to use when filtering Z_device, capacitance, x_position, and dxdt. Data is filtered using a Savitzky-Golay filter with a window size that is adjusted based on the mean velocity of the drop (see _get_window_size). Returns ------- pandas.DataFrame This DataFrame is indexed by a utc_timestamp and contains the following columns: frequency: actuation frequency (Hz) target_voltage: target voltage (V) voltage: measured voltage (V) force: actuation force (uN/mm) area: actuated area (mm^2) Z_device_filtered: filtered device impedance for actuated area (Ohms) capacitance_filtered: filtered device capacitance for actuated area (F) x_position_filtered: filtered x-position of the drop (mm) dxdt_filtered: filtered instantaneous velocity of the drop (mm/s) Z_device: device impedance for actuated area (Ohms) capacitance: device capacitance for actuated area (F) x_position: x-position of the drop (mm) dxdt: instantaneous velocity of the drop (mm/s) dx: difference in the drop's x-position over the course of the step (mm) dt: time the drop is considered to have been "moving" (s) mean_velocity: mean drop velocity (mm/s) peak_velocity: peak drop velocity calculated from filtered instantaneous velocity (mm/s) window_size: windows size used for Savitzky-Golay filter (# bins) filter_order: order used for Savitzky-Golay filter (integer)
f7276:c3:m15
def _upgrade(self):
logging.debug("<STR_LIT>")<EOL>version = Version.fromstring(self.version)<EOL>logging.debug('<STR_LIT>',<EOL>str(version), self.class_version)<EOL>if version > Version.fromstring(self.class_version):<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>raise FutureVersionError(Version.fromstring(self.class_version),<EOL>version)<EOL><DEDENT>elif version < Version.fromstring(self.class_version):<EOL><INDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:1>):<EOL><INDENT>self.time = [None]*len(self.data)<EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:1>))<EOL><DEDENT><DEDENT>
Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software.
f7276:c4:m2
def C_drop(self, frequency):
logger.warning('<STR_LIT>')<EOL>return self.c_drop(frequency)<EOL>
This function has been depreciated. It has been replaced by the lowercase version c_drop, which signifies that the capacitance is normalized per unit area (i.e., units are F/mm^2).
f7276:c5:m1
def C_filler(self, frequency):
logger.warning('<STR_LIT>')<EOL>return self.c_filler(frequency)<EOL>
This function has been depreciated. It has been replaced by the lowercase version c_filler, which signifies that the capacitance is normalized per unit area (i.e., units are F/mm^2).
f7276:c5:m2
def c_drop(self, frequency):
try:<EOL><INDENT>return np.interp(frequency,<EOL>self._c_drop['<STR_LIT>'],<EOL>self._c_drop['<STR_LIT>']<EOL>)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return self._c_drop<EOL>
Capacitance of an electrode covered in liquid, normalized per unit area (i.e., units are F/mm^2).
f7276:c5:m3
def c_filler(self, frequency):
try:<EOL><INDENT>return np.interp(frequency,<EOL>self._c_filler['<STR_LIT>'],<EOL>self._c_filler['<STR_LIT>']<EOL>)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return self._c_filler<EOL>
Capacitance of an electrode covered in filler media (e.g., air or oil), normalized per unit area (i.e., units are F/mm^2).
f7276:c5:m4
def __getstate__(self):
out = copy.deepcopy(self.__dict__)<EOL>for k, v in list(out.items()):<EOL><INDENT>if isinstance(v, np.ndarray):<EOL><INDENT>out[k] = v.tolist()<EOL><DEDENT><DEDENT>return out<EOL>
Convert numpy arrays to lists for serialization
f7276:c5:m5
def __setstate__(self, state):
<EOL>for k in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if k in state:<EOL><INDENT>state['<STR_LIT:_>' + k.lower()] = state[k]<EOL>del state[k]<EOL><DEDENT><DEDENT>for k in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if k in state:<EOL><INDENT>state[k.lower()] = state[k]<EOL>del state[k]<EOL><DEDENT><DEDENT>self.__dict__ = state<EOL>for k, v in list(self.__dict__.items()):<EOL><INDENT>if k == '<STR_LIT>' or k == '<STR_LIT>' or k == '<STR_LIT>' or k == '<STR_LIT>':<EOL><INDENT>self.__dict__[k] = np.array(v)<EOL><DEDENT><DEDENT>if '<STR_LIT:version>' not in self.__dict__:<EOL><INDENT>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:0>))<EOL><DEDENT>self._upgrade()<EOL>
Convert lists to numpy arrays after loading serialized object
f7276:c5:m6
def _upgrade(self):
logging.debug("<STR_LIT>")<EOL>version = Version.fromstring(self.version)<EOL>logging.debug('<STR_LIT>',<EOL>str(version), self.class_version)<EOL>if version > Version.fromstring(self.class_version):<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>raise FutureVersionError(Version.fromstring(self.class_version),<EOL>version)<EOL><DEDENT>elif version < Version.fromstring(self.class_version):<EOL><INDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:1>):<EOL><INDENT>self._c_filler = None<EOL>self._c_drop = None<EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:1>))<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:2>):<EOL><INDENT>self.hw_version = Version(<NUM_LIT:1>)<EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:2>))<EOL>logging.info('<STR_LIT>',<EOL>self.version)<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:2>):<EOL><INDENT>self.hw_version = Version(<NUM_LIT:1>)<EOL>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:2>))<EOL>logging.info('<STR_LIT>',<EOL>self.version)<EOL><DEDENT>if version < Version(<NUM_LIT:0>, <NUM_LIT:3>):<EOL><INDENT>self.version = str(Version(<NUM_LIT:0>, <NUM_LIT:3>))<EOL>logging.info('<STR_LIT>',<EOL>self.version)<EOL><DEDENT><DEDENT>
Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software.
f7276:c5:m7
def force_to_voltage(self, force, frequency):
c_drop = self.calibration.c_drop(frequency)<EOL>if self.calibration._c_filler:<EOL><INDENT>c_filler = self.calibration.c_filler(frequency)<EOL><DEDENT>else:<EOL><INDENT>c_filler = <NUM_LIT:0><EOL><DEDENT>return np.sqrt(force * <NUM_LIT>/ (<NUM_LIT:0.5> * (c_drop - c_filler)))<EOL>
Convert a force in uN/mm to voltage. Parameters ---------- force : float Force in **uN/mm**. frequency : float Actuation frequency. Returns ------- float Actuation voltage to apply :data:`force` at an actuation frequency of :data:`frequency`.
f7276:c6:m1
@safe_series_resistor_index_read<EOL><INDENT>def series_capacitance(self, channel, resistor_index=None):<DEDENT>
if resistor_index is None:<EOL><INDENT>resistor_index = self.series_resistor_index(channel)<EOL><DEDENT>value = self._series_capacitance(channel)<EOL>try:<EOL><INDENT>if channel == <NUM_LIT:0>:<EOL><INDENT>self.calibration.C_hv[resistor_index] = value<EOL><DEDENT>else:<EOL><INDENT>self.calibration.C_fb[resistor_index] = value<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return value<EOL>
Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to read the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series capacitance value for the specified channel.
f7276:c6:m2
@safe_series_resistor_index_read<EOL><INDENT>def series_resistance(self, channel, resistor_index=None):<DEDENT>
if resistor_index is None:<EOL><INDENT>resistor_index = self.series_resistor_index(channel)<EOL><DEDENT>value = self._series_resistance(channel)<EOL>try:<EOL><INDENT>if channel == <NUM_LIT:0>:<EOL><INDENT>self.calibration.R_hv[resistor_index] = value<EOL><DEDENT>else:<EOL><INDENT>self.calibration.R_fb[resistor_index] = value<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return value<EOL>
Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series resistance value for the specified channel.
f7276:c6:m3
@safe_series_resistor_index_write<EOL><INDENT>def set_series_capacitance(self, channel, value, resistor_index=None):<DEDENT>
if resistor_index is None:<EOL><INDENT>resistor_index = self.series_resistor_index(channel)<EOL><DEDENT>try:<EOL><INDENT>if channel == <NUM_LIT:0>:<EOL><INDENT>self.calibration.C_hv[resistor_index] = value<EOL><DEDENT>else:<EOL><INDENT>self.calibration.C_fb[resistor_index] = value<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return self._set_series_capacitance(channel, value)<EOL>
Set the current series capacitance value for the specified channel. Parameters ---------- channel : int Analog channel index. value : float Series capacitance value. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to read the resistance before restoring back to the original value. Returns ------- int Return code from embedded call.
f7276:c6:m4
@safe_series_resistor_index_write<EOL><INDENT>def set_series_resistance(self, channel, value, resistor_index=None):<DEDENT>
if resistor_index is None:<EOL><INDENT>resistor_index = self.series_resistor_index(channel)<EOL><DEDENT>try:<EOL><INDENT>if channel == <NUM_LIT:0>:<EOL><INDENT>self.calibration.R_hv[resistor_index] = value<EOL><DEDENT>else:<EOL><INDENT>self.calibration.R_fb[resistor_index] = value<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return self._set_series_resistance(channel, value)<EOL>
Set the current series resistance value for the specified channel. Parameters ---------- channel : int Analog channel index. value : float Series resistance value. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the resistance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- int Return code from embedded call.
f7276:c6:m5
@remote_command<EOL><INDENT>def connect(self, port=None, baud_rate=<NUM_LIT>):<DEDENT>
if isinstance(port, (str,)):<EOL><INDENT>ports = [port]<EOL><DEDENT>else:<EOL><INDENT>ports = port<EOL><DEDENT>if not ports:<EOL><INDENT>ports = serial_ports().index.tolist()<EOL>if not ports:<EOL><INDENT>raise IOError("<STR_LIT>")<EOL><DEDENT><DEDENT>for comport_i in ports:<EOL><INDENT>if self.connected():<EOL><INDENT>self.disconnect()<EOL>self.port = None<EOL>self._i2c_devices = {}<EOL><DEDENT>try:<EOL><INDENT>logger.debug('<STR_LIT>', comport_i)<EOL>Base.connect(self, str(comport_i), baud_rate)<EOL>self.port = comport_i<EOL>break<EOL><DEDENT>except BadVGND as exception:<EOL><INDENT>logger.warning(exception)<EOL>break<EOL><DEDENT>except RuntimeError as exception:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>' % ports)<EOL><DEDENT>name = self.name()<EOL>version = self.hardware_version()<EOL>firmware = self.software_version()<EOL>serial_number_string = "<STR_LIT>"<EOL>try:<EOL><INDENT>serial_number_string = "<STR_LIT>" % self.serial_number<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>logger.info("<STR_LIT>" %<EOL>(name, version, firmware, serial_number_string))<EOL>logger.info("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>self._read_calibration_data()<EOL>try:<EOL><INDENT>self.__aref__ = self._aref()<EOL>logger.info("<STR_LIT>" % self.__aref__)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>expected = <NUM_LIT:2> ** <NUM_LIT:10>/<NUM_LIT:2><EOL>v = {}<EOL>channels = [<NUM_LIT:0>, <NUM_LIT:1>]<EOL>damaged = []<EOL>for channel in channels:<EOL><INDENT>try:<EOL><INDENT>v[channel] = np.mean(self.analog_reads(channel, <NUM_LIT:10>))<EOL>logger.info("<STR_LIT>", channel,<EOL>self.__aref__ * v[channel] / (<NUM_LIT:2> ** <NUM_LIT:10>), <NUM_LIT> *<EOL>v[channel] / (<NUM_LIT:2> ** <NUM_LIT:10>))<EOL>if np.abs(v[channel] - expected) / expected > <NUM_LIT>:<EOL><INDENT>damaged.append(channel)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>break<EOL><DEDENT><DEDENT>self._i2c_scan()<EOL>if damaged:<EOL><INDENT>if len(damaged) == <NUM_LIT:1>:<EOL><INDENT>msg = "<STR_LIT>" % damaged[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>" % damaged<EOL><DEDENT>raise BadVGND(msg + "<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>return self.RETURN_OK<EOL>
Parameters ---------- port : str or list-like, optional Port (or list of ports) to try to connect to as a DMF Control Board. baud_rate : int, optional Returns ------- str Port DMF control board was connected on. Raises ------ RuntimeError If connection could not be established. IOError If no ports were specified and Arduino Mega2560 not found on any port.
f7276:c6:m7
def persistent_write(self, address, byte, refresh_config=False):
self._persistent_write(address, byte)<EOL>if refresh_config:<EOL><INDENT>self.load_config(False)<EOL><DEDENT>
Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings.
f7276:c6:m10
def persistent_read_multibyte(self, address, count=None, dtype=np.uint8):
nbytes = np.dtype(dtype).itemsize<EOL>if count is not None:<EOL><INDENT>nbytes *= count<EOL><DEDENT>data_bytes = np.array([self.persistent_read(address + i)<EOL>for i in range(nbytes)], dtype=np.uint8)<EOL>result = data_bytes.view(dtype)<EOL>if count is None:<EOL><INDENT>return result[<NUM_LIT:0>]<EOL><DEDENT>return result<EOL>
Read a chunk of data from persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). count : int, optional Number of values to read. If not set, read a single value of the specified :data:`dtype`. dtype : numpy.dtype, optional The type of the value(s) to read. Returns ------- dtype or numpy.array(dtype=dtype) If :data:`count` is ``None``, return single value. Otherwise, return array of values.
f7276:c6:m11
def persistent_write_multibyte(self, address, data, refresh_config=False):
for i, byte in enumerate(data.view(np.uint8)):<EOL><INDENT>self.persistent_write(address + i, int(byte))<EOL><DEDENT>if refresh_config:<EOL><INDENT>self.load_config(False)<EOL><DEDENT>
Write multiple bytes to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). data : numpy.array Data to write. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings.
f7276:c6:m12
@remote_command<EOL><INDENT>def measure_impedance(self, sampling_window_ms, n_sampling_windows,<EOL>delay_between_windows_ms, interleave_samples, rms,<EOL>state):<DEDENT>
state_ = uint8_tVector()<EOL>for i in range(<NUM_LIT:0>, len(state)):<EOL><INDENT>state_.append(int(state[i]))<EOL><DEDENT>buffer = np.array(Base.measure_impedance(self,<EOL>sampling_window_ms,<EOL>n_sampling_windows,<EOL>delay_between_windows_ms,<EOL>interleave_samples,<EOL>rms,<EOL>state_))<EOL>return self.measure_impedance_buffer_to_feedback_result(buffer)<EOL>
Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. Parameters ---------- sampling_window_ms : float Length of sampling window (in milleseconds) for each RMS/peak-to-peak voltage measurement. n_sampling_windows : int Number of RMS/peak-to-peak voltage measurements to take. delay_between_windows_ms : float Delay (in milleseconds) between RMS/peak-to-peak voltage measurements. interleave_samples : bool If ``True``, interleave RMS/peak-to-peak measurements for analog channels. For example, ``[<i_0>, <j_0>, <i_1>, <j_1>, ..., <i_n>, <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. If ``False``, all measurements for each analog channel are taken together. For example, ``[<i_0>, ..., <i_n>, <j_0>, ..., <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. rms : bool If ``True``, a RMS voltage measurement is collected for each sampling window. Otherwise, peak-to-peak measurements are collected. state : list State of device channels. Length should be equal to the number of device channels. Returns ------- :class:`FeedbackResults`
f7276:c6:m44
@remote_command<EOL><INDENT>def sweep_channels(self,<EOL>sampling_window_ms,<EOL>n_sampling_windows_per_channel,<EOL>delay_between_windows_ms,<EOL>interleave_samples,<EOL>rms,<EOL>channel_mask):<DEDENT>
channel_cumsum = np.cumsum(channel_mask)<EOL>n_channels_in_mask = channel_cumsum[-<NUM_LIT:1>]<EOL>max_channels_per_call = (self.MAX_PAYLOAD_LENGTH - <NUM_LIT:4>*<NUM_LIT:4>) /(<NUM_LIT:3>*<NUM_LIT:2>) / n_sampling_windows_per_channel<EOL>self._channel_mask_cache = np.array(channel_mask)<EOL>buffer = np.zeros(<NUM_LIT:4>)<EOL>for i in range(int(math.ceil(n_channels_in_mask / max_channels_per_call))):<EOL><INDENT>ind = np.logical_and(channel_cumsum >= i * max_channels_per_call,<EOL>channel_cumsum < (i + <NUM_LIT:1>) * max_channels_per_call)<EOL>channel_mask_ = np.zeros(len(self._channel_mask_cache), dtype=int)<EOL>channel_mask_[ind] = self._channel_mask_cache[ind]<EOL>channel_mask_uint8 = uint8_tVector()<EOL>channel_mask_uint8.extend(channel_mask_)<EOL>buffer = buffer[:-<NUM_LIT:4>]<EOL>buffer = np.concatenate((buffer, np.array(Base.sweep_channels(self,<EOL>sampling_window_ms,<EOL>n_sampling_windows_per_channel,<EOL>delay_between_windows_ms,<EOL>interleave_samples,<EOL>rms,<EOL>channel_mask_uint8))))<EOL><DEDENT>return self.sweep_channels_buffer_to_feedback_result(buffer)<EOL>
Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. For each channel in the channel mask. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. Parameters ---------- sampling_window_ms : float Length of sampling window (in milleseconds) for each RMS/peak-to-peak voltage measurement. n_sampling_windows_per_channel : int Number of RMS/peak-to-peak voltage measurements to take. delay_between_windows_ms : float Delay (in milleseconds) between RMS/peak-to-peak voltage measurements. interleave_samples : bool If ``True``, interleave RMS/peak-to-peak measurements for analog channels. For example, ``[<i_0>, <j_0>, <i_1>, <j_1>, ..., <i_n>, <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. If ``False``, all measurements for each analog channel are taken together. For example, ``[<i_0>, ..., <i_n>, <j_0>, ..., <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. rms : bool If ``True``, a RMS voltage measurement is collected for each sampling window. Otherwise, peak-to-peak measurements are collected. channel_mask : array-like State of device channels. Length should be equal to the number of device channels. Returns ------- pandas.DataFrame Table containing one actuation RMS measurement and one device load impedance measurement per row and the columns ``frequency``, ``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and ``impedance``. Rows are indexed by time since first measurement in frame.
f7276:c6:m45
@remote_command<EOL><INDENT>def sweep_channels_slow(self, sampling_window_ms, n_sampling_windows,<EOL>delay_between_windows_ms, interleave_samples,<EOL>use_rms, channel_mask):<DEDENT>
channel_count = len(channel_mask)<EOL>scan_count = sum(channel_mask)<EOL>frames = []<EOL>print('<STR_LIT>')<EOL>scan_count_i = <NUM_LIT:0><EOL>for channel_i, state_i in enumerate(channel_mask):<EOL><INDENT>if state_i:<EOL><INDENT>scan_count_i += <NUM_LIT:1><EOL>print('<STR_LIT>'.format(channel_i,<EOL>scan_count_i,<EOL>scan_count), end='<STR_LIT:U+0020>')<EOL>channel_states_i = [<NUM_LIT:0>] * channel_count<EOL>channel_states_i[channel_i] = <NUM_LIT:1><EOL>start_time_i = datetime.utcnow()<EOL>feedback_results_i =self.measure_impedance(sampling_window_ms,<EOL>n_sampling_windows,<EOL>delay_between_windows_ms,<EOL>interleave_samples, use_rms,<EOL>channel_states_i)<EOL>df_result_i =feedback_results_to_impedance_frame(feedback_results_i)<EOL>df_result_i.insert(<NUM_LIT:2>, '<STR_LIT>', channel_i)<EOL>df_result_i.insert(<NUM_LIT:0>, '<STR_LIT>', start_time_i)<EOL>frames.append(df_result_i)<EOL><DEDENT><DEDENT>print('<STR_LIT>')<EOL>if not frames:<EOL><INDENT>df_result = pd.DataFrame(None, columns=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>df_result = pd.concat(frames)<EOL><DEDENT>return df_result<EOL>
Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. For each channel in the channel mask. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. **N.B.,** Use one firmware call per channel, as opposed to scanning all channels with a single firmware call as in :meth:`sweep_channels` method. Returns ------- pandas.DataFrame Table containing one actuation RMS measurement and one device load impedance measurement per row and the columns ``frequency``, ``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and ``impedance``. Rows are indexed by time since first measurement in frame.
f7276:c6:m46
@remote_command<EOL><INDENT>def i2c_scan(self):<DEDENT>
return np.array(Base.i2c_scan(self))<EOL>
Returns ------- numpy.array Array of addresses of I2C devices responding to I2C scan.
f7276:c6:m47
@remote_command<EOL><INDENT>def i2c_write(self, address, data):<DEDENT>
data_ = uint8_tVector()<EOL>for i in range(<NUM_LIT:0>, len(data)):<EOL><INDENT>data_.append(int(data[i]))<EOL><DEDENT>Base.i2c_write(self, address, data_)<EOL>
Parameters ---------- address : int Address of I2C device. data : array-like Array of bytes to send to device.
f7276:c6:m48
def read_all_series_channel_values(self, f, channel):
values = []<EOL>channel_max_param_count = [<NUM_LIT:3>, <NUM_LIT:5>]<EOL>for i in range(channel_max_param_count[channel]):<EOL><INDENT>try:<EOL><INDENT>values.append(f(channel, i))<EOL><DEDENT>except RuntimeError:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return values<EOL>
Return all values for the specified channel of the type corresponding to the function `f`, where `f` is either `self.series_resistance` or `self.series_capacitance`.
f7276:c6:m76
def write_all_series_channel_values(self, read_f, write_f, channel,<EOL>values):
<EOL>values = copy.deepcopy(values)<EOL>original_values = self.read_all_series_channel_values(read_f, channel)<EOL>assert(len(values) == len(original_values))<EOL>for i in range(len(original_values)):<EOL><INDENT>if values[i] != original_values[i]:<EOL><INDENT>write_f(channel, values[i], i)<EOL><DEDENT><DEDENT>
Return all values for the specified channel of the type corresponding to the function `f`, where `f` is either `self.series_resistance` or `self.series_capacitance`.
f7276:c6:m77
def plot_capacitance_summary(data, fig=None, color_map=mcm.Reds_r,<EOL>vmax=<NUM_LIT>, <EOL>reduce_func='<STR_LIT>'):
<EOL>channel_groups = data['<STR_LIT>'].groupby('<STR_LIT>')<EOL>channel_capacitance = getattr(channel_groups['<STR_LIT>'], reduce_func)()<EOL>vmax = max(<NUM_LIT> * (channel_capacitance.median() + channel_capacitance.min()),<EOL>vmax)<EOL>grid = GridSpec(<NUM_LIT:2>, <NUM_LIT:8>)<EOL>if fig is None:<EOL><INDENT>fig = plt.figure(figsize=(<NUM_LIT>, <NUM_LIT:10>))<EOL><DEDENT>axes = [fig.add_subplot(grid[:, :<NUM_LIT:3>]),<EOL>fig.add_subplot(grid[<NUM_LIT:0>, <NUM_LIT:3>:]),<EOL>fig.add_subplot(grid[<NUM_LIT:1>, <NUM_LIT:3>:])]<EOL>def label_electrodes(axis, df_shapes, channels_by_electrode):<EOL><INDENT>df_shape_min = df_shapes.groupby('<STR_LIT:id>')[['<STR_LIT:x>', '<STR_LIT:y>']].min() * <NUM_LIT><EOL>df_shape_max = df_shapes.groupby('<STR_LIT:id>')[['<STR_LIT:x>', '<STR_LIT:y>']].max() * <NUM_LIT><EOL>df_shape_centers = <NUM_LIT> * (df_shape_max + df_shape_min)<EOL>df_shape_centers.y = df_shapes.y.max() * <NUM_LIT> - df_shape_centers.y<EOL>light_color = '<STR_LIT>'<EOL>dark_color = '<STR_LIT>'<EOL>values = channel_capacitance<EOL>norm = Normalize(min(values), vmax, clip=True)<EOL>colors = color_map(norm(values.values).filled())<EOL>lightness = pd.Series(colors[:, :<NUM_LIT:3>].mean(axis=<NUM_LIT:1>), index=values.index)<EOL>for electrode_i, (x_i, y_i) in df_shape_centers.iterrows():<EOL><INDENT>channel_i = channels_by_electrode.ix[electrode_i]<EOL>axis.text(x_i, y_i, channel_i, horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>',<EOL>color=dark_color if channel_i in lightness.index and<EOL>lightness.ix[channel_i] > <NUM_LIT:0.5> else light_color)<EOL><DEDENT><DEDENT>plot_electrode_capacitance(data['<STR_LIT>'],<EOL>channel_capacitance,<EOL>data['<STR_LIT>'],<EOL>axis=axes[<NUM_LIT:0>], vmax=vmax)<EOL>label_electrodes(axes[<NUM_LIT:0>], data['<STR_LIT>'],<EOL>data['<STR_LIT>'])<EOL>plot_color_map_bars(channel_capacitance, color_map=color_map, axis=axes[<NUM_LIT:1>],<EOL>vmax=vmax)<EOL>channel_capacitance_ordered = channel_capacitance.sort_values()<EOL>plot_color_map_bars(channel_capacitance_ordered, color_map=color_map,<EOL>axis=axes[<NUM_LIT:2>], vmax=vmax)<EOL>def label_bars(axis, values, fontsize=<NUM_LIT:8>, **kwargs):<EOL><INDENT>trans_offset = offset_copy(axis.transData, fig=axis.get_figure(),<EOL>y=<NUM_LIT>)<EOL>for i, value_i in zip(axis.get_xticks(), values):<EOL><INDENT>axis.text(i, value_i, F_formatter(value_i),<EOL>horizontalalignment='<STR_LIT>', verticalalignment='<STR_LIT>',<EOL>rotation=<NUM_LIT>, fontsize=fontsize, transform=trans_offset)<EOL><DEDENT><DEDENT>label_bars(axes[<NUM_LIT:1>], channel_capacitance)<EOL>label_bars(axes[<NUM_LIT:2>], channel_capacitance_ordered)<EOL>for ax in axes:<EOL><INDENT>ax.yaxis.set_major_formatter(F_formatter)<EOL><DEDENT>fig.tight_layout()<EOL>return axes<EOL>
| ---------- | ------------------------- | | | Capacitance of | | Device | channels (index order) | | drawing | ------------------------- | | | Capacitance of | | | channels (C order) | | ---------- | ------------------------- |
f7277:m2
def plot_channel_sweep(proxy, start_channel):
test_loads = TEST_LOADS.copy()<EOL>test_loads.index += start_channel<EOL>results = sweep_channels(proxy, test_loads)<EOL>normalized_measurements = (results['<STR_LIT>']<EOL>/ results['<STR_LIT>'])<EOL>fig, axis = plt.subplots(figsize=(<NUM_LIT:10>, <NUM_LIT:8>))<EOL>axis.bar(normalized_measurements.index - <NUM_LIT>, normalized_measurements,<EOL>width=<NUM_LIT>, edgecolor='<STR_LIT:none>', facecolor='<STR_LIT>')<EOL>axis.set_xlim(left=test_loads.index.min() - <NUM_LIT:0.5>,<EOL>right=test_loads.index.max() + <NUM_LIT:0.5>)<EOL>axis.set_xlabel('<STR_LIT>')<EOL>axis.set_ylabel(r'<STR_LIT>',<EOL>fontsize=<NUM_LIT>)<EOL>return results<EOL>
Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`.
f7281:m1
def parse_args(args=None):
from argparse import ArgumentParser<EOL>if args is None:<EOL><INDENT>args = sys.argv<EOL><DEDENT>parser = ArgumentParser(description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>args = parser.parse_args()<EOL>return args<EOL>
Parses arguments, returns (options, args).
f7282:m1
def makename(package, module):
<EOL>if package:<EOL><INDENT>name = package<EOL>if module:<EOL><INDENT>name += '<STR_LIT:.>' + module<EOL><DEDENT><DEDENT>else:<EOL><INDENT>name = module<EOL><DEDENT>return name<EOL>
Join package and module with a dot.
f7283:m0
def write_file(name, text, opts):
if opts.dryrun:<EOL><INDENT>return<EOL><DEDENT>fname = os.path.join(opts.destdir, "<STR_LIT>" % (name, opts.suffix))<EOL>if not opts.force and os.path.isfile(fname):<EOL><INDENT>print('<STR_LIT>' % fname)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % fname)<EOL>f = open(fname, '<STR_LIT:w>')<EOL>f.write(text)<EOL>f.close()<EOL><DEDENT>
Write the output file for module/package <name>.
f7283:m1
def format_heading(level, text):
underlining = ['<STR_LIT:=>', '<STR_LIT:->', '<STR_LIT>', ][level-<NUM_LIT:1>] * len(text)<EOL>return '<STR_LIT>' % (text, underlining)<EOL>
Create a heading of <level> [1, 2 or 3 supported].
f7283:m2
def format_directive(module, package=None):
directive = '<STR_LIT>' % makename(package, module)<EOL>for option in OPTIONS:<EOL><INDENT>directive += '<STR_LIT>' % option<EOL><DEDENT>return directive<EOL>
Create the automodule directive and add the options.
f7283:m3
def create_module_file(package, module, opts):
text = format_heading(<NUM_LIT:1>, '<STR_LIT>' % module)<EOL>text += format_heading(<NUM_LIT:2>, '<STR_LIT>' % module)<EOL>text += format_directive(module, package)<EOL>write_file(makename(package, module), text, opts)<EOL>
Build the text of the file and write the file.
f7283:m4
def create_package_file(root, master_package, subroot, py_files, opts, subs):
package = os.path.split(root)[-<NUM_LIT:1>]<EOL>text = format_heading(<NUM_LIT:1>, '<STR_LIT>' % package)<EOL>for py_file in py_files:<EOL><INDENT>if shall_skip(os.path.join(root, py_file)):<EOL><INDENT>continue<EOL><DEDENT>is_package = py_file == INIT<EOL>py_file = os.path.splitext(py_file)[<NUM_LIT:0>]<EOL>py_path = makename(subroot, py_file)<EOL>if is_package:<EOL><INDENT>heading = '<STR_LIT>' % package<EOL><DEDENT>else:<EOL><INDENT>heading = '<STR_LIT>' % py_file<EOL><DEDENT>text += format_heading(<NUM_LIT:2>, heading)<EOL>text += format_directive(is_package and subroot or py_path, master_package)<EOL>text += '<STR_LIT:\n>'<EOL><DEDENT>subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]<EOL>if subs:<EOL><INDENT>text += format_heading(<NUM_LIT:2>, '<STR_LIT>')<EOL>text += '<STR_LIT>'<EOL>for sub in subs:<EOL><INDENT>text += '<STR_LIT>' % (makename(master_package, subroot), sub)<EOL><DEDENT>text += '<STR_LIT:\n>'<EOL><DEDENT>write_file(makename(master_package, subroot), text, opts)<EOL>
Build the text of the file and write the file.
f7283:m5
def create_modules_toc_file(master_package, modules, opts, name='<STR_LIT>'):
text = format_heading(<NUM_LIT:1>, '<STR_LIT>' % opts.header)<EOL>text += '<STR_LIT>'<EOL>text += '<STR_LIT>' % opts.maxdepth<EOL>modules.sort()<EOL>prev_module = '<STR_LIT>'<EOL>for module in modules:<EOL><INDENT>if module.startswith(prev_module + '<STR_LIT:.>'):<EOL><INDENT>continue<EOL><DEDENT>prev_module = module<EOL>text += '<STR_LIT>' % module<EOL><DEDENT>write_file(name, text, opts)<EOL>
Create the module's index.
f7283:m6
def shall_skip(module):
<EOL>return os.path.getsize(module) < <NUM_LIT:3><EOL>
Check if we want to skip this module.
f7283:m7
def recurse_tree(path, excludes, opts):
<EOL>path = os.path.abspath(path)<EOL>if INIT in os.listdir(path):<EOL><INDENT>package_name = path.split(os.path.sep)[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>package_name = None<EOL><DEDENT>toc = []<EOL>tree = os.walk(path, False)<EOL>for root, subs, files in tree:<EOL><INDENT>py_files = sorted([f for f in files if os.path.splitext(f)[<NUM_LIT:1>] == '<STR_LIT>'])<EOL>if INIT in py_files:<EOL><INDENT>py_files.remove(INIT)<EOL>py_files.insert(<NUM_LIT:0>, INIT)<EOL><DEDENT>subs = sorted([sub for sub in subs if sub[<NUM_LIT:0>] not in ['<STR_LIT:.>', '<STR_LIT:_>']])<EOL>if "<STR_LIT>" in root or "<STR_LIT>" in rootor not py_filesor is_excluded(root, excludes):<EOL><INDENT>continue<EOL><DEDENT>if INIT in py_files:<EOL><INDENT>if (<EOL>subs<EOL>or<EOL>len(py_files) > <NUM_LIT:1><EOL>or<EOL>not shall_skip(os.path.join(root, INIT))<EOL>):<EOL><INDENT>subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '<STR_LIT:.>')<EOL>create_package_file(root, package_name, subroot, py_files, opts, subs)<EOL>toc.append(makename(package_name, subroot))<EOL><DEDENT><DEDENT>elif root == path:<EOL><INDENT>for py_file in py_files:<EOL><INDENT>if not shall_skip(os.path.join(path, py_file)):<EOL><INDENT>module = os.path.splitext(py_file)[<NUM_LIT:0>]<EOL>create_module_file(package_name, module, opts)<EOL>toc.append(makename(package_name, module))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not opts.notoc:<EOL><INDENT>create_modules_toc_file(package_name, toc, opts)<EOL><DEDENT>
Look for every file in the directory tree and create the corresponding ReST files.
f7283:m8
def normalize_excludes(rootpath, excludes):
sep = os.path.sep<EOL>f_excludes = []<EOL>for exclude in excludes:<EOL><INDENT>if not os.path.isabs(exclude) and not exclude.startswith(rootpath):<EOL><INDENT>exclude = os.path.join(rootpath, exclude)<EOL><DEDENT>if not exclude.endswith(sep):<EOL><INDENT>exclude += sep<EOL><DEDENT>f_excludes.append(exclude)<EOL><DEDENT>return f_excludes<EOL>
Normalize the excluded directory list: * must be either an absolute path or start with rootpath, * otherwise it is joined with rootpath * with trailing slash
f7283:m9
def is_excluded(root, excludes):
sep = os.path.sep<EOL>if not root.endswith(sep):<EOL><INDENT>root += sep<EOL><DEDENT>for exclude in excludes:<EOL><INDENT>if root.startswith(exclude):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>
Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exlude "foo" also accidentally excluding "foobar".
f7283:m10
def main():
parser = optparse.OptionParser(usage="""<STR_LIT>""")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store>", dest="<STR_LIT>", help="<STR_LIT>", default="<STR_LIT>")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store>", dest="<STR_LIT>", help="<STR_LIT>", default="<STR_LIT>")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store>", dest="<STR_LIT>", help="<STR_LIT>", default="<STR_LIT>")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store>", dest="<STR_LIT>", help="<STR_LIT>", type="<STR_LIT:int>", default=<NUM_LIT:4>)<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store_true>", dest="<STR_LIT>", help="<STR_LIT>")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store_true>", dest="<STR_LIT>", help="<STR_LIT>")<EOL>parser.add_option("<STR_LIT>", "<STR_LIT>", action="<STR_LIT:store_true>", dest="<STR_LIT>", help="<STR_LIT>")<EOL>(opts, args) = parser.parse_args()<EOL>if not args:<EOL><INDENT>parser.error("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>rootpath, excludes = args[<NUM_LIT:0>], args[<NUM_LIT:1>:]<EOL>if os.path.isdir(rootpath):<EOL><INDENT>if opts.destdir and os.path.isdir(opts.destdir):<EOL><INDENT>excludes = normalize_excludes(rootpath, excludes)<EOL>recurse_tree(rootpath, excludes, opts)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % opts.destdir)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % rootpath)<EOL><DEDENT><DEDENT>
Parse and check the command line arguments.
f7283:m11
@register.filter<EOL>def get_page_count(pdf_file_path):
pdf = PdfFileReader(file(pdf_file_path, "<STR_LIT:rb>"))<EOL>return pdf.getNumPages()<EOL>
Returns the number of pages of a given pdf file. Usage:: {% load cmsplugin_pdf_tags %} Pages: {{ pdf_plugin.file.path|get_page_count }}
f7294:m0
def save(self, *args, **kwargs):
<EOL>img = Image(filename=self.file.path + '<STR_LIT>')<EOL>filename = os.path.basename(self.file.path).split('<STR_LIT:.>')[:-<NUM_LIT:1>]<EOL>if type(filename) == list:<EOL><INDENT>filename = '<STR_LIT>'.join(filename)<EOL><DEDENT>image_dir = os.path.join(<EOL>django_settings.MEDIA_ROOT, UPLOAD_TO_DIR)<EOL>if not os.path.exists(image_dir):<EOL><INDENT>os.makedirs(image_dir)<EOL><DEDENT>image_path = os.path.join(<EOL>image_dir, '<STR_LIT>'.format(filename))<EOL>tmp_image_path = os.path.join(<EOL>image_dir, '<STR_LIT>'.format(filename))<EOL>try:<EOL><INDENT>os.remove(image_path)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>old_files = glob.glob('<STR_LIT>'.format(image_path))<EOL>for old_file in old_files:<EOL><INDENT>try:<EOL><INDENT>os.remove(old_file)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>img.save(filename=tmp_image_path)<EOL>with open(tmp_image_path, '<STR_LIT:r>') as f:<EOL><INDENT>self.image.save('<STR_LIT>'.format(filename), File(f), save=False)<EOL><DEDENT>super(PDFPluginModel, self).save(*args, **kwargs)<EOL>try:<EOL><INDENT>os.remove(tmp_image_path)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>
Customized to generate an image from the pdf file.
f7300:c0:m1
def read(*paths):
basedir = os.path.dirname(__file__)<EOL>fullpath = os.path.join(basedir, *paths)<EOL>contents = io.open(fullpath, encoding='<STR_LIT:utf-8>').read().strip()<EOL>return contents<EOL>
Read a text file.
f7346:m0
def create_cells(headers, schema_fields, values=None, row_number=None):
fillvalue = '<STR_LIT>'<EOL>is_header_row = (values is None)<EOL>cells = []<EOL>iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue)<EOL>for column_number, (header, field, value) in enumerate(iterator, start=<NUM_LIT:1>):<EOL><INDENT>if header == fillvalue:<EOL><INDENT>header = None<EOL><DEDENT>elif is_header_row:<EOL><INDENT>value = header<EOL><DEDENT>if field == fillvalue:<EOL><INDENT>field = None<EOL><DEDENT>if value == fillvalue:<EOL><INDENT>value = None<EOL><DEDENT>elif value is None:<EOL><INDENT>value = '<STR_LIT>'<EOL><DEDENT>cell = create_cell(header, value, field, column_number, row_number)<EOL>cells.append(cell)<EOL><DEDENT>return cells<EOL>
Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells.
f7353:m0
@click.group(cls=DefaultGroup, default='<STR_LIT>', default_if_no_args=True)<EOL>@click.version_option(goodtables.__version__, message='<STR_LIT>')<EOL>def cli():
pass<EOL>
Tabular files validator. There are two categories of validation checks available: * Structural checks: ensure there are no empty rows, no blank headers, etc. * Content checks: ensure the values have the correct types (e.g. string), their format is valid (e.g. e-mail), and they respect some constraint (e.g. age is greater than 18). \b Full documentation at: <https://github.com/frictionlessdata/goodtables-py/>
f7354:m0
@cli.command()<EOL>@click.argument('<STR_LIT>', type=click.Path(), nargs=-<NUM_LIT:1>, required=True)<EOL>@click.option(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>type=click.File('<STR_LIT:w>'),<EOL>default='<STR_LIT:->',<EOL>help='<STR_LIT>'<EOL>)<EOL>def init(paths, output, **kwargs):
dp = goodtables.init_datapackage(paths)<EOL>click.secho(<EOL>json_module.dumps(dp.descriptor, indent=<NUM_LIT:4>),<EOL>file=output<EOL>)<EOL>exit(dp.valid)<EOL>
Init data package from list of files. It will also infer tabular data's schemas from their contents.
f7354:m2
def validate(source, **options):
source, options, inspector_settings = _parse_arguments(source, **options)<EOL>inspector = Inspector(**inspector_settings)<EOL>report = inspector.inspect(source, **options)<EOL>return report<EOL>
Validates a source file and returns a report. Args: source (Union[str, Dict, List[Dict], IO]): The source to be validated. It can be a local file path, URL, dict, list of dicts, or a file-like object. If it's a list of dicts and the `preset` is "nested", each of the dict key's will be used as if it was passed as a keyword argument to this method. The file can be a CSV, XLS, JSON, and any other format supported by `tabulator`_. Keyword Args: checks (List[str]): List of checks names to be enabled. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). skip_checks (List[str]): List of checks names to be skipped. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). infer_schema (bool): Infer schema if one wasn't passed as an argument. infer_fields (bool): Infer schema for columns not present in the received schema. order_fields (bool): Order source columns based on schema fields order. This is useful when you don't want to validate that the data columns' order is the same as the schema's. error_limit (int): Stop validation if the number of errors per table exceeds this value. table_limit (int): Maximum number of tables to validate. row_limit (int): Maximum number of rows to validate. preset (str): Dataset type could be `table` (default), `datapackage`, `nested` or custom. Usually, the preset can be inferred from the source, so you don't need to define it. Any (Any): Any additional arguments not defined here will be passed on, depending on the chosen `preset`. If the `preset` is `table`, the extra arguments will be passed on to `tabulator`_, if it is `datapackage`, they will be passed on to the `datapackage`_ constructor. # Table preset schema (Union[str, Dict, IO]): The Table Schema for the source. headers (Union[int, List[str]): Either the row number that contains the headers, or a list with them. If the row number is given, ????? scheme (str): The scheme used to access the source (e.g. `file`, `http`). This is usually inferred correctly from the source. See the `tabulator`_ documentation for the list of supported schemes. format (str): Format of the source data (`csv`, `datapackage`, ...). This is usually inferred correctly from the source. See the the `tabulator`_ documentation for the list of supported formats. encoding (str): Encoding of the source. skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a string. Rows beginning with the string will be ignored (e.g. '#', '//'). Raises: GoodtablesException: Raised on any non-tabular error. Returns: dict: The validation report. .. _tabulator: https://github.com/frictionlessdata/tabulator-py .. _tabulator_schemes: https://github.com/frictionlessdata/tabulator-py .. _tabulator: https://github.com/frictionlessdata/datapackage-py
f7379:m0
def init_datapackage(resource_paths):
dp = datapackage.Package({<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>})<EOL>for path in resource_paths:<EOL><INDENT>dp.infer(path)<EOL><DEDENT>return dp<EOL>
Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package.
f7379:m1
def preset(name):
def decorator(func):<EOL><INDENT>registry.register_preset(func, name)<EOL>return func<EOL><DEDENT>return decorator<EOL>
https://github.com/frictionlessdata/goodtables-py#custom-presets
f7380:m0
def check(name, type=None, context=None, position=None):
def decorator(func):<EOL><INDENT>registry.register_check(func, name, type, context, position)<EOL>return func<EOL><DEDENT>return decorator<EOL>
https://github.com/frictionlessdata/goodtables-py#custom-checks
f7380:m1
def _clean_empty(d):
if not isinstance(d, (dict, list)):<EOL><INDENT>return d<EOL><DEDENT>if isinstance(d, list):<EOL><INDENT>return [v for v in (_clean_empty(v) for v in d) if v is not None]<EOL><DEDENT>return {<EOL>k: v for k, v in<EOL>((k, _clean_empty(v)) for k, v in d.items())<EOL>if v is not None<EOL>}<EOL>
Remove None values from a dict.
f7382:m3
def __init__(self,<EOL>checks=['<STR_LIT>', '<STR_LIT>'],<EOL>skip_checks=[],<EOL>infer_schema=False,<EOL>infer_fields=False,<EOL>order_fields=False,<EOL>error_limit=config.DEFAULT_ERROR_LIMIT,<EOL>table_limit=config.DEFAULT_TABLE_LIMIT,<EOL>row_limit=config.DEFAULT_ROW_LIMIT):
<EOL>self.__checks = checks<EOL>self.__skip_checks = skip_checks<EOL>self.__infer_schema = infer_schema<EOL>self.__infer_fields = infer_fields<EOL>self.__order_fields = order_fields<EOL>parse_limit = lambda num: float('<STR_LIT>') if (num < <NUM_LIT:0>) else num <EOL>self.__error_limit = parse_limit(error_limit)<EOL>self.__table_limit = parse_limit(table_limit)<EOL>self.__row_limit = parse_limit(row_limit)<EOL>
https://github.com/frictionlessdata/goodtables-py#inspector
f7382:c0:m0
def inspect(self, source, preset=None, **options):
<EOL>start = datetime.datetime.now()<EOL>preset = self.__get_source_preset(source, preset)<EOL>if preset == '<STR_LIT>':<EOL><INDENT>options['<STR_LIT>'] = self.__presets<EOL>for s in source:<EOL><INDENT>if s.get('<STR_LIT>') is None:<EOL><INDENT>s['<STR_LIT>'] = self.__get_source_preset(s['<STR_LIT:source>'])<EOL><DEDENT><DEDENT><DEDENT>preset_func = self.__get_preset(preset)['<STR_LIT>']<EOL>warnings, tables = preset_func(source, **options)<EOL>if len(tables) > self.__table_limit:<EOL><INDENT>warnings.append(<EOL>'<STR_LIT>' %<EOL>(self.__table_limit))<EOL>tables = tables[:self.__table_limit]<EOL><DEDENT>table_reports = []<EOL>if tables:<EOL><INDENT>tasks = []<EOL>pool = ThreadPool(processes=len(tables))<EOL>try:<EOL><INDENT>for table in tables:<EOL><INDENT>tasks.append(pool.apply_async(self.__inspect_table, (table,)))<EOL><DEDENT>for task in tasks:<EOL><INDENT>table_warnings, table_report = task.get()<EOL>warnings.extend(table_warnings)<EOL>table_reports.append(table_report)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>pool.terminate()<EOL><DEDENT><DEDENT>stop = datetime.datetime.now()<EOL>report = {<EOL>'<STR_LIT:time>': round((stop - start).total_seconds(), <NUM_LIT:3>),<EOL>'<STR_LIT>': all(item['<STR_LIT>'] for item in table_reports),<EOL>'<STR_LIT>': sum(len(item['<STR_LIT>']) for item in table_reports),<EOL>'<STR_LIT>': len(tables),<EOL>'<STR_LIT>': table_reports,<EOL>'<STR_LIT>': warnings,<EOL>'<STR_LIT>': preset,<EOL>}<EOL>return report<EOL>
https://github.com/frictionlessdata/goodtables-py#inspector
f7382:c0:m1
def _masquerade(origin: str, orig: ServiceDefn, new: ServiceDefn, **map: str) -> str:
origin: ParseResult = urlparse(origin)<EOL>prev_maps = {}<EOL>if origin.query:<EOL><INDENT>prev_maps = {k: v for k, v in parse_qsl(origin.query)}<EOL><DEDENT>r_args = {}<EOL>for new_k, orig_k in map.items():<EOL><INDENT>assert new_k in new.rpcs, [new_k, new.rpcs]<EOL>assert orig_k in orig.rpcs, [orig_k, orig.rpcs]<EOL>new_v = new.rpcs[new_k]<EOL>orig_v = orig.rpcs[orig_k]<EOL>if orig_k in prev_maps:<EOL><INDENT>orig_k = prev_maps[orig_k]<EOL><DEDENT>assert new_v.res == orig_v.res, [new_v.res, orig_v.res]<EOL>assert new_v.req == orig_v.req, [new_v.req, orig_v.req]<EOL>r_args[new_k] = orig_k<EOL><DEDENT>return urlunparse(origin._replace(query=urlencode(r_args)))<EOL>
build an origin URL such that the orig has all of the mappings to new defined by map
f7388:m9
def masquerade(origin: str, orig: Type[TA], new: Type[TB], **map: str) -> str:
return _masquerade(origin, cache_get(orig), cache_get(new), **map)<EOL>
Make ``orig`` appear as new
f7388:m10
def popen(fn, *args, **kwargs) -> subprocess.Popen:
args = popen_encode(fn, *args, **kwargs)<EOL>logging.getLogger(__name__).debug('<STR_LIT>', args)<EOL>p = subprocess.Popen(args)<EOL>return p<EOL>
Please ensure you're not killing the process before it had started properly :param fn: :param args: :param kwargs: :return:
f7389:m5
@rpc(exc=True)<EOL><INDENT>def ep(self, exc: Exception) -> bool:<DEDENT>
if not isinstance(exc, ConnectionAbortedError):<EOL><INDENT>return False<EOL><DEDENT>if len(exc.args) != <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>origin, reason = exc.args<EOL>logging.getLogger(__name__).warning('<STR_LIT>')<EOL>return True<EOL>
Return False if the exception had not been handled gracefully
f7405:c1:m0
def _insert_ordered(insert_to, insert_val, insert_item, ord_col, cmp_fn=lambda x: x):
idx = ord_col.index(cmp_fn(insert_item))<EOL>return insert_to[:idx] + [insert_val] + insert_to[idx:]<EOL>
:param insert_to: ``len(insert_to) = len(ord_col) - 1`` :param insert_val: value to insert into ``insert_to`` :param insert_item: value inserted into ``ord_col`` :param ord_col: collection of ``insert_item`` ordered by cmp_fn :param cmp_fn: :return:
f7408:m2
def trc(postfix: Optional[str] = None, *, depth=<NUM_LIT:1>) -> logging.Logger:
x = inspect.stack()[depth]<EOL>code = x[<NUM_LIT:0>].f_code<EOL>func = [obj for obj in gc.get_referrers(code) if inspect.isfunction(obj)][<NUM_LIT:0>]<EOL>mod = inspect.getmodule(x.frame)<EOL>parts = (mod.__name__, func.__qualname__)<EOL>if postfix:<EOL><INDENT>parts += (postfix,)<EOL><DEDENT>logger_name = '<STR_LIT:.>'.join(parts)<EOL>return logging.getLogger(logger_name)<EOL>
Automatically generate a logger from the calling function :param postfix: append another logger name on top this :param depth: depth of the call stack at which to capture the caller name :return: instance of a logger with a correct path to a current caller
f7409:m0
def _start_services(self, console_env):
self._ad.load_snippet(name='<STR_LIT>', package=self._package)<EOL>console_env['<STR_LIT>'] = self._ad.snippet<EOL>console_env['<STR_LIT:s>'] = self._ad.snippet<EOL>
Overrides superclass.
f7451:c0:m1
def _start_services(self, console_env):
self._ad.services.register('<STR_LIT>', sl4a_service.Sl4aService)<EOL>console_env['<STR_LIT:s>'] = self._ad.services.sl4a<EOL>console_env['<STR_LIT>'] = self._ad.sl4a<EOL>console_env['<STR_LIT>'] = self._ad.ed<EOL>
Overrides superclass.
f7452:c0:m0
def get_print_function_name():
if sys.version_info >= (<NUM_LIT:3>, <NUM_LIT:0>):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>
Gets the name of the print function for mocking. Returns: A str representing the print function to mock.
f7458:m0
def __getattr__(self, name):
def adb_call(*args):<EOL><INDENT>arg_str = '<STR_LIT:U+0020>'.join(str(elem) for elem in args)<EOL>return arg_str<EOL><DEDENT>return adb_call<EOL>
All calls to the none-existent functions in adb proxy would simply return the adb command string.
f7458:c0:m3
def __getattr__(self, name):
def adb_call(*args):<EOL><INDENT>arg_str = '<STR_LIT:U+0020>'.join(str(elem) for elem in args)<EOL>return arg_str<EOL><DEDENT>return adb_call<EOL>
All calls to the none-existent functions in adb proxy would simply return the adb command string.
f7461:c0:m3
def tearDown(self):
shutil.rmtree(self.tmp_dir)<EOL>
Removes the temp dir.
f7464:c0:m1
def tearDown(self):
shutil.rmtree(self.tmp_dir)<EOL>
Removes the temp dir.
f7470:c0:m1
def setup_mock_socket_file(self, mock_create_connection, resp=MOCK_RESP):
fake_file = self.MockSocketFile(resp)<EOL>fake_conn = mock.MagicMock()<EOL>fake_conn.makefile.return_value = fake_file<EOL>mock_create_connection.return_value = fake_conn<EOL>return fake_file<EOL>
Sets up a fake socket file from the mock connection. Args: mock_create_connection: The mock method for creating a method. resp: (str) response to give. MOCK_RESP by default. Returns: The mock file that will be injected into the code.
f7480:c0:m0
def get_mock_ads(num):
ads = []<EOL>for i in range(num):<EOL><INDENT>ad = mock.MagicMock(name="<STR_LIT>", serial=str(i), h_port=None)<EOL>ad.skip_logcat = False<EOL>ads.append(ad)<EOL><DEDENT>return ads<EOL>
Generates a list of mock AndroidDevice objects. The serial number of each device will be integer 0 through num - 1. Args: num: An integer that is the number of mock AndroidDevice objects to create.
f7482:m0
def __getattr__(self, name):
def adb_call(*args, **kwargs):<EOL><INDENT>arg_str = '<STR_LIT:U+0020>'.join(str(elem) for elem in args)<EOL>return arg_str<EOL><DEDENT>return adb_call<EOL>
All calls to the none-existent functions in adb proxy would simply return the adb command string.
f7482:c1:m4
def __init__(self, configs):
self.tests = []<EOL>self._class_name = self.__class__.__name__<EOL>if configs.test_class_name_suffix and self.TAG is None:<EOL><INDENT>self.TAG = '<STR_LIT>' % (self._class_name,<EOL>configs.test_class_name_suffix)<EOL><DEDENT>elif self.TAG is None:<EOL><INDENT>self.TAG = self._class_name<EOL><DEDENT>self.log_path = configs.log_path<EOL>self.test_bed_name = configs.test_bed_name<EOL>self.user_params = configs.user_params<EOL>self.results = records.TestResult()<EOL>self.summary_writer = configs.summary_writer<EOL>self.current_test_name = None<EOL>self._generated_test_table = collections.OrderedDict()<EOL>self._controller_manager = controller_manager.ControllerManager(<EOL>class_name=self.TAG, controller_configs=configs.controller_configs)<EOL>self.controller_configs = self._controller_manager.controller_configs<EOL>
Constructor of BaseTestClass. The constructor takes a config_parser.TestRunConfig object and which has all the information needed to execute this test class, like log_path and controller configurations. For details, see the definition of class config_parser.TestRunConfig. Args: configs: A config_parser.TestRunConfig object.
f7490:c1:m0
def unpack_userparams(self,<EOL>req_param_names=None,<EOL>opt_param_names=None,<EOL>**kwargs):
req_param_names = req_param_names or []<EOL>opt_param_names = opt_param_names or []<EOL>for k, v in kwargs.items():<EOL><INDENT>if k in self.user_params:<EOL><INDENT>v = self.user_params[k]<EOL><DEDENT>setattr(self, k, v)<EOL><DEDENT>for name in req_param_names:<EOL><INDENT>if hasattr(self, name):<EOL><INDENT>continue<EOL><DEDENT>if name not in self.user_params:<EOL><INDENT>raise Error('<STR_LIT>'<EOL>'<STR_LIT>' % name)<EOL><DEDENT>setattr(self, name, self.user_params[name])<EOL><DEDENT>for name in opt_param_names:<EOL><INDENT>if hasattr(self, name):<EOL><INDENT>continue<EOL><DEDENT>if name in self.user_params:<EOL><INDENT>setattr(self, name, self.user_params[name])<EOL><DEDENT>else:<EOL><INDENT>logging.warning('<STR_LIT>'<EOL>'<STR_LIT>', name)<EOL><DEDENT><DEDENT>
An optional function that unpacks user defined parameters into individual variables. After unpacking, the params can be directly accessed with self.xxx. If a required param is not provided, an exception is raised. If an optional param is not provided, a warning line will be logged. To provide a param, add it in the config file or pass it in as a kwarg. If a param appears in both the config file and kwarg, the value in the config file is used. User params from the config file can also be directly accessed in self.user_params. Args: req_param_names: A list of names of the required user params. opt_param_names: A list of names of the optional user params. **kwargs: Arguments that provide default values. e.g. unpack_userparams(required_list, opt_list, arg_a='hello') self.arg_a will be 'hello' unless it is specified again in required_list or opt_list. Raises: Error: A required user params is not provided.
f7490:c1:m3
def register_controller(self, module, required=True, min_number=<NUM_LIT:1>):
return self._controller_manager.register_controller(<EOL>module, required, min_number)<EOL>
Loads a controller module and returns its loaded devices. A Mobly controller module is a Python lib that can be used to control a device, service, or equipment. To be Mobly compatible, a controller module needs to have the following members: .. code-block:: python def create(configs): [Required] Creates controller objects from configurations. Args: configs: A list of serialized data like string/dict. Each element of the list is a configuration for a controller object. Returns: A list of objects. def destroy(objects): [Required] Destroys controller objects created by the create function. Each controller object shall be properly cleaned up and all the resources held should be released, e.g. memory allocation, sockets, file handlers etc. Args: A list of controller objects created by the create function. def get_info(objects): [Optional] Gets info from the controller objects used in a test run. The info will be included in test_summary.yaml under the key 'ControllerInfo'. Such information could include unique ID, version, or anything that could be useful for describing the test bed and debugging. Args: objects: A list of controller objects created by the create function. Returns: A list of json serializable objects: each represents the info of a controller object. The order of the info object should follow that of the input objects. Registering a controller module declares a test class's dependency the controller. If the module config exists and the module matches the controller interface, controller objects will be instantiated with corresponding configs. The module should be imported first. Args: module: A module that follows the controller module interface. required: A bool. If True, failing to register the specified controller module raises exceptions. If False, the objects failed to instantiate will be skipped. min_number: An integer that is the minimum number of controller objects to be created. Default is one, since you should not register a controller module without expecting at least one object. Returns: A list of controller objects instantiated from controller_module, or None if no config existed for this controller and it was not a required controller. Raises: ControllerError: * The controller module has already been registered. * The actual number of objects instantiated is less than the * `min_number`. * `required` is True and no corresponding config can be found. * Any other error occurred in the registration process.
f7490:c1:m4
def _setup_class(self):
<EOL>class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS,<EOL>self.TAG)<EOL>class_record.test_begin()<EOL>self.current_test_info = runtime_test_info.RuntimeTestInfo(<EOL>STAGE_NAME_SETUP_CLASS, self.log_path, class_record)<EOL>expects.recorder.reset_internal_states(class_record)<EOL>try:<EOL><INDENT>with self._log_test_stage(STAGE_NAME_SETUP_CLASS):<EOL><INDENT>self.setup_class()<EOL><DEDENT><DEDENT>except signals.TestAbortSignal:<EOL><INDENT>raise<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.exception('<STR_LIT>', self.TAG)<EOL>class_record.test_error(e)<EOL>self.results.add_class_error(class_record)<EOL>self._exec_procedure_func(self._on_fail, class_record)<EOL>class_record.update_record()<EOL>self.summary_writer.dump(class_record.to_dict(),<EOL>records.TestSummaryEntryType.RECORD)<EOL>self._skip_remaining_tests(e)<EOL>return self.results<EOL><DEDENT>if expects.recorder.has_error:<EOL><INDENT>self._exec_procedure_func(self._on_fail, class_record)<EOL>class_record.test_error()<EOL>class_record.update_record()<EOL>self.summary_writer.dump(class_record.to_dict(),<EOL>records.TestSummaryEntryType.RECORD)<EOL>self.results.add_class_error(class_record)<EOL>self._skip_remaining_tests(<EOL>class_record.termination_signal.exception)<EOL>return self.results<EOL><DEDENT>
Proxy function to guarantee the base implementation of setup_class is called. Returns: If `self.results` is returned instead of None, this means something has gone wrong, and the rest of the test class should not execute.
f7490:c1:m8
def setup_class(self):
Setup function that will be called before executing any test in the class. To signal setup failure, use asserts or raise your own exception. Errors raised from `setup_class` will trigger `on_fail`. Implementation is optional.
f7490:c1:m9
def _teardown_class(self):
stage_name = STAGE_NAME_TEARDOWN_CLASS<EOL>record = records.TestResultRecord(stage_name, self.TAG)<EOL>record.test_begin()<EOL>self.current_test_info = runtime_test_info.RuntimeTestInfo(<EOL>stage_name, self.log_path, record)<EOL>expects.recorder.reset_internal_states(record)<EOL>try:<EOL><INDENT>with self._log_test_stage(stage_name):<EOL><INDENT>self.teardown_class()<EOL><DEDENT><DEDENT>except signals.TestAbortAll as e:<EOL><INDENT>setattr(e, '<STR_LIT>', self.results)<EOL>raise<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.exception('<STR_LIT>', stage_name)<EOL>record.test_error(e)<EOL>record.update_record()<EOL>self.results.add_class_error(record)<EOL>self.summary_writer.dump(record.to_dict(),<EOL>records.TestSummaryEntryType.RECORD)<EOL><DEDENT>else:<EOL><INDENT>if expects.recorder.has_error:<EOL><INDENT>record.update_record()<EOL>self.results.add_class_error(record)<EOL>self.summary_writer.dump(record.to_dict(),<EOL>records.TestSummaryEntryType.RECORD)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self._clean_up()<EOL><DEDENT>
Proxy function to guarantee the base implementation of teardown_class is called.
f7490:c1:m10
def teardown_class(self):
Teardown function that will be called after all the selected tests in the test class have been executed. Errors raised from `teardown_class` do not trigger `on_fail`. Implementation is optional.
f7490:c1:m11
def _on_fail(self, record):
self.on_fail(record)<EOL>
Proxy function to guarantee the base implementation of on_fail is called. Args: record: records.TestResultRecord, a copy of the test record for this test, containing all information of the test execution including exception objects.
f7490:c1:m17
def on_fail(self, record):
A function that is executed upon a test failure. User implementation is optional. Args: record: records.TestResultRecord, a copy of the test record for this test, containing all information of the test execution including exception objects.
f7490:c1:m18