code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
data = clips.data.DataObject(self._env) if lib.EnvGetDefglobalValue( self._env, self.name.encode(), data.byref) != 1: raise CLIPSError(self._env) return data.value
def value(self)
Global value.
14.066885
11.374062
1.236751
return ffi.string( lib.EnvGetDefglobalName(self._env, self._glb)).decode()
def name(self)
Global name.
30.238077
19.306301
1.566228
modname = ffi.string(lib.EnvDefglobalModule(self._env, self._glb)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
def module(self)
The module in which the Global is defined. Python equivalent of the CLIPS defglobal-module command.
10.907097
6.834846
1.595807
lib.EnvSetDefglobalWatch(self._env, int(flag), self._glb)
def watch(self, flag)
Whether or not the Global is being watched.
49.071358
31.816757
1.542312
if lib.EnvUndefglobal(self._env, self._glb) != 1: raise CLIPSError(self._env) self._env = None
def undefine(self)
Undefine the Global. Python equivalent of the CLIPS undefglobal command. The object becomes unusable after this method has been called.
21.682037
13.683454
1.584544
return ffi.string( lib.EnvGetDefmoduleName(self._env, self._mdl)).decode()
def name(self)
Global name.
39.48716
28.780842
1.371995
deffunction = lib.EnvGetNextDeffunction(self._env, ffi.NULL) while deffunction != ffi.NULL: yield Function(self._env, deffunction) deffunction = lib.EnvGetNextDeffunction(self._env, deffunction)
def functions(self)
Iterates over the defined Globals.
4.217135
3.936982
1.071159
deffunction = lib.EnvFindDeffunction(self._env, name.encode()) if deffunction == ffi.NULL: raise LookupError("Function '%s' not found" % name) return Function(self._env, deffunction)
def find_function(self, name)
Find the Function by its name.
4.190552
3.711089
1.129197
defgeneric = lib.EnvGetNextDefgeneric(self._env, ffi.NULL) while defgeneric != ffi.NULL: yield Generic(self._env, defgeneric) defgeneric = lib.EnvGetNextDefgeneric(self._env, defgeneric)
def generics(self)
Iterates over the defined Generics.
6.169922
4.801214
1.285076
defgeneric = lib.EnvFindDefgeneric(self._env, name.encode()) if defgeneric == ffi.NULL: raise LookupError("Generic '%s' not found" % name) return Generic(self._env, defgeneric)
def find_generic(self, name)
Find the Generic by its name.
5.31523
4.607732
1.153546
return ffi.string( lib.EnvGetDeffunctionName(self._env, self._fnc)).decode()
def name(self)
Function name.
22.547785
14.719519
1.531829
modname = ffi.string(lib.EnvDeffunctionModule(self._env, self._fnc)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
def module(self)
The module in which the Function is defined. Python equivalent of the CLIPS deffunction-module command.
9.268104
6.372092
1.454484
lib.EnvSetDeffunctionWatch(self._env, int(flag), self._fnc)
def watch(self, flag)
Whether or not the Function is being watched.
34.484779
22.083975
1.56153
if lib.EnvUndeffunction(self._env, self._fnc) != 1: raise CLIPSError(self._env) self._env = None
def undefine(self)
Undefine the Function. Python equivalent of the CLIPS undeffunction command. The object becomes unusable after this method has been called.
15.429393
10.232152
1.507932
if lib.EnvUndefgeneric(self._env, self._gnc) != 1: raise CLIPSError(self._env) self._env = None
def undefine(self)
Undefine the Generic. Python equivalent of the CLIPS undefgeneric command. The object becomes unusable after this method has been called.
22.145975
14.774216
1.498961
if lib.EnvUndefmethod(self._env, self._gnc, self._idx) != 1: raise CLIPSError(self._env) self._env = None
def undefine(self)
Undefine the Method. Python equivalent of the CLIPS undefmethod command. The object becomes unusable after this method has been called.
20.066502
14.082079
1.424967
capped_series = np.minimum( series, series.mean() + multiplier * series.std()) count = pd.value_counts(series != capped_series) count = count[True] if True in count else 0 perc = self._percent(count / self.length) return count, perc
def _get_deviation_of_mean(self, series, multiplier=3)
Returns count of values deviating of the mean, i.e. larger than `multiplier` * `std`. :type series: :param multiplier: :return:
5.499213
5.572425
0.986862
capped_series = np.minimum( series, series.median() + multiplier * series.mad()) count = pd.value_counts(series != capped_series) count = count[True] if True in count else 0 perc = self._percent(count / self.length) return count, perc
def _get_median_absolute_deviation(self, series, multiplier=3)
Returns count of values larger than `multiplier` * `mad` :type series: :param multiplier: :return (array):
5.802502
6.023079
0.963378
columns_excluded = pd.Index([]) columns_included = df.columns if usage == self.INCLUDE: try: columns_included = columns_included.intersection(pd.Index(columns)) except TypeError: pass elif usage == self.EXCLUDE: try: columns_excluded = columns_excluded.union(pd.Index(columns)) except TypeError: pass columns_included = columns_included.difference(columns_excluded) return columns_included.intersection(df.columns)
def get_columns(self, df, usage, columns=None)
Returns a `data_frame.columns`. :param df: dataframe to select columns from :param usage: should be a value from [ALL, INCLUDE, EXCLUDE]. this value only makes sense if attr `columns` is also set. otherwise, should be used with default value ALL. :param columns: * if `usage` is all, this value is not used. * if `usage` is INCLUDE, the `df` is restricted to the intersection between `columns` and the `df.columns` * if usage is EXCLUDE, returns the `df.columns` excluding these `columns` :return: `data_frame` columns, excluding `target_column` and `id_column` if given. `data_frame` columns, including/excluding the `columns` depending on `usage`.
2.260909
2.272285
0.994994
# Plot fluorescence of beads populations plt.plot(fl_rfi, fl_mef, 'o', label='Beads', color=standard_curve_colors[0]) # Generate points in x axis to plot beads model and standard curve. if xlim is None: xlim = plt.xlim() if xscale=='linear': xdata = np.linspace(xlim[0], xlim[1], 200) elif xscale=='log': xdata = np.logspace(np.log10(xlim[0]), np.log10(xlim[1]), 200) # Plot beads model and standard curve plt.plot(xdata, beads_model(xdata), label='Beads model', color=standard_curve_colors[1]) plt.plot(xdata, std_crv(xdata), label='Standard curve', color=standard_curve_colors[2]) plt.xscale(xscale) plt.yscale(yscale) plt.xlim(xlim) plt.ylim(ylim) plt.grid(True) plt.legend(loc = 'best')
def plot_standard_curve(fl_rfi, fl_mef, beads_model, std_crv, xscale='linear', yscale='linear', xlim=None, ylim=(1.,1e8))
Plot a standard curve with fluorescence of calibration beads. Parameters ---------- fl_rfi : array_like Fluorescence of the calibration beads' subpopulations, in RFI units. fl_mef : array_like Fluorescence of the calibration beads' subpopulations, in MEF units. beads_model : function Fluorescence model of the calibration beads. std_crv : function The standard curve, mapping relative fluorescence (RFI) units to MEF units. Other Parameters ---------------- xscale : str, optional Scale of the x axis, either ``linear`` or ``log``. yscale : str, optional Scale of the y axis, either ``linear`` or ``log``. xlim : tuple, optional Limits for the x axis. ylim : tuple, optional Limits for the y axis.
1.946517
1.974378
0.985889
# Catch sheetname as list or None if sheetname is None or \ (hasattr(sheetname, '__iter__') \ and not isinstance(sheetname, six.string_types)): raise TypeError("sheetname should specify a single sheet") # Load excel table using pandas # Parameter specifying sheet name is slightly different depending on pandas' # version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): table = pd.read_excel(filename, sheetname=sheetname, index_col=index_col) else: table = pd.read_excel(filename, sheet_name=sheetname, index_col=index_col) # Eliminate rows whose index are null if index_col is not None: table = table[pd.notnull(table.index)] # Check for duplicated rows if table.index.has_duplicates: raise ValueError("sheet {} on file {} contains duplicated values " "for column {}".format(sheetname, filename, index_col)) return table
def read_table(filename, sheetname, index_col=None)
Return the contents of an Excel table as a pandas DataFrame. Parameters ---------- filename : str Name of the Excel file to read. sheetname : str or int Name or index of the sheet inside the Excel file to read. index_col : str, optional Column name or index to be used as row labels of the DataFrame. If None, default index will be used. Returns ------- table : DataFrame A DataFrame containing the data in the specified Excel table. If `index_col` is not None, rows in which their `index_col` field is empty will not be present in `table`. Raises ------ ValueError If `index_col` is specified and two rows contain the same `index_col` field.
3.490259
3.659248
0.953819
# Modify default header format # Pandas' default header format is bold text with thin borders. Here we # use bold text only, without borders. # The header style structure is in pd.core.format in pandas<=0.18.0, # pd.formats.format in 0.18.1<=pandas<0.20, and pd.io.formats.excel in # pandas>=0.20. # Also, wrap in a try-except block in case style structure is not found. format_module_found = False try: # Get format module if packaging.version.parse(pd.__version__) \ <= packaging.version.parse('0.18'): format_module = pd.core.format elif packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.20'): format_module = pd.formats.format else: import pandas.io.formats.excel as format_module # Save previous style, replace, and indicate that previous style should # be restored at the end old_header_style = format_module.header_style format_module.header_style = {"font": {"bold": True}} format_module_found = True except AttributeError as e: pass # Generate output writer object writer = pd.ExcelWriter(filename, engine='xlsxwriter') # Write tables for sheet_name, df in table_list: # Convert index names to regular columns df = df.reset_index() # Write to an Excel sheet df.to_excel(writer, sheet_name=sheet_name, index=False) # Set column width if column_width is None: for i, (col_name, column) in enumerate(six.iteritems(df)): # Get the maximum number of characters in a column max_chars_col = column.astype(str).str.len().max() max_chars_col = max(len(col_name), max_chars_col) # Write width writer.sheets[sheet_name].set_column( i, i, width=1.*max_chars_col) else: writer.sheets[sheet_name].set_column( 0, len(df.columns) - 1, width=column_width) # Write excel file writer.save() # Restore previous header format if format_module_found: format_module.header_style = old_header_style
def write_workbook(filename, table_list, column_width=None)
Write an Excel workbook from a list of tables. Parameters ---------- filename : str Name of the Excel file to write. table_list : list of ``(str, DataFrame)`` tuples Tables to be saved as individual sheets in the Excel table. Each tuple contains two values: the name of the sheet to be saved as a string, and the contents of the table as a DataFrame. column_width: int, optional The column width to use when saving the spreadsheet. If None, calculate width automatically from the maximum number of characters in each column.
3.243129
3.231119
1.003717
# Extract channels that require stats histograms headers = list(samples_table.columns) hist_headers = [h for h in headers if re_units.match(h)] hist_channels = [re_units.match(h).group(1) for h in hist_headers] # The number of columns in the DataFrame has to be set to the maximum # number of bins of any of the histograms about to be generated. # The following iterates through these histograms and finds the # largest. n_columns = 0 for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): if n_columns < sample.resolution(channel): n_columns = sample.resolution(channel) # Saturate at max_bins if n_columns > max_bins: n_columns = max_bins # Declare multi-indexed DataFrame index = pd.MultiIndex.from_arrays([[],[],[]], names = ['Sample ID', 'Channel', '']) columns = ['Bin {}'.format(i + 1) for i in range(n_columns)] hist_table = pd.DataFrame([], index=index, columns=columns) # Generate histograms for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): # Get units in which bins are being reported unit = samples_table[header][sample_id] # Decide which scale to use # Channel units result in linear scale. Otherwise, use logicle. if unit == 'Channel': scale = 'linear' else: scale = 'logicle' # Define number of bins nbins = min(sample.resolution(channel), max_bins) # Calculate bin edges and centers # We generate twice the necessary number of bins. We then take # every other value as the proper bin edges, and the remaining # values as the bin centers. bins_extended = sample.hist_bins(channel, 2*nbins, scale) bin_edges = bins_extended[::2] bin_centers = bins_extended[1::2] # Store bin centers hist_table.loc[(sample_id, channel, 'Bin Centers ({})'.format(unit)), columns[0:len(bin_centers)]] = bin_centers # Calculate and store histogram counts hist, __ = np.histogram(sample[:,channel], bins=bin_edges) hist_table.loc[(sample_id, channel, 'Counts'), columns[0:len(bin_centers)]] = hist return hist_table
def generate_histograms_table(samples_table, samples, max_bins=1024)
Generate a table of histograms as a DataFrame. Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate histograms. ``samples[i]`` should correspond to ``samples_table.iloc[i]`` max_bins : int, optional Maximum number of bins to use. Returns ------- hist_table : DataFrame A multi-indexed DataFrame. Rows cotain the histogram bins and counts for every sample and channel specified in samples_table. `hist_table` is indexed by the sample's ID, the channel name, and whether the row corresponds to bins or counts.
3.408316
3.285875
1.037263
# Make keyword and value arrays keywords = [] values = [] # FlowCal version keywords.append('FlowCal version') values.append(FlowCal.__version__) # Analysis date and time keywords.append('Date of analysis') values.append(time.strftime("%Y/%m/%d")) keywords.append('Time of analysis') values.append(time.strftime("%I:%M:%S%p")) # Add additional keyword:value pairs for k, v in six.iteritems(extra_info): keywords.append(k) values.append(v) # Make table as data frame about_table = pd.DataFrame(values, index=keywords) # Set column names about_table.columns = ['Value'] about_table.index.name = 'Keyword' return about_table
def generate_about_table(extra_info={})
Make a table with information about FlowCal and the current analysis. Parameters ---------- extra_info : dict, optional Additional keyword:value pairs to include in the table. Returns ------- about_table : DataFrame Table with information about FlowCal and the current analysis, as keyword:value pairs. The following keywords are included: FlowCal version, and date and time of analysis. Keywords and values from `extra_info` are also included.
2.954134
2.357602
1.253025
# The following line is used to Tk's main window is not shown Tk().withdraw() # OSX ONLY: Call bash script to prevent file select window from sticking # after use. if platform.system() == 'Darwin': subprocess.call("defaults write org.python.python " + "ApplePersistenceIgnoreState YES", shell=True) filename = askopenfilename(filetypes=filetypes) subprocess.call("defaults write org.python.python " + "ApplePersistenceIgnoreState NO", shell=True) else: filename = askopenfilename(filetypes=filetypes) return filename
def show_open_file_dialog(filetypes)
Show an open file dialog and return the path of the file selected. Parameters ---------- filetypes : list of tuples Types of file to show on the dialog. Each tuple on the list must have two elements associated with a filetype: the first element is a description, and the second is the associated extension. Returns ------- filename : str The path of the filename selected, or an empty string if no file was chosen.
6.736431
7.148593
0.942344
# Get arguments from ``sys.argv`` if necessary. # ``sys.argv`` has the name of the script as its first element. We remove # this element because it will break ``parser.parse_args()`` later. In fact, # ``parser.parse_args()``, if provided with no arguments, will also use # ``sys.argv`` after removing the first element. if args is None: args = sys.argv[1:] import argparse # Read command line arguments parser = argparse.ArgumentParser( description="process flow cytometry files with FlowCal's Excel UI.") parser.add_argument( "-i", "--inputpath", type=str, nargs='?', help="input Excel file name. If not specified, show open file window") parser.add_argument( "-o", "--outputpath", type=str, nargs='?', help="output Excel file name. If not specified, use [INPUTPATH]_output") parser.add_argument( "-v", "--verbose", action="store_true", help="print information about individual processing steps") parser.add_argument( "-p", "--plot", action="store_true", help="generate and save density plots/histograms of beads and samples") parser.add_argument( "-H", "--histogram-sheet", action="store_true", help="generate sheet in output Excel file specifying histogram bins") args = parser.parse_args(args=args) # Run Excel UI run(input_path=args.inputpath, output_path=args.outputpath, verbose=args.verbose, plot=args.plot, hist_sheet=args.histogram_sheet)
def run_command_line(args=None)
Entry point for the FlowCal and flowcal console scripts. Parameters ---------- args: list of strings, optional Command line arguments. If None or not specified, get arguments from ``sys.argv``. See Also ---------- FlowCal.excel_ui.run() http://amir.rachum.com/blog/2017/07/28/python-entry-points/
3.69515
3.355903
1.10109
fields = [ 'version', 'text_begin', 'text_end', 'data_begin', 'data_end', 'analysis_begin', 'analysis_end'] FCSHeader = collections.namedtuple('FCSHeader', fields) field_values = [] buf.seek(begin) field_values.append(buf.read(10).decode(encoding).rstrip()) # version field_values.append(int(buf.read(8))) # text_begin field_values.append(int(buf.read(8))) # text_end field_values.append(int(buf.read(8))) # data_begin field_values.append(int(buf.read(8))) # data_end fv = buf.read(8).decode(encoding) # analysis_begin field_values.append(0 if fv == ' '*8 else int(fv)) fv = buf.read(8).decode(encoding) # analysis_end field_values.append(0 if fv == ' '*8 else int(fv)) header = FCSHeader._make(field_values) return header
def read_fcs_header_segment(buf, begin=0)
Read HEADER segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as HEADER segment. begin : int Offset (in bytes) to first byte of HEADER segment in `buf`. Returns ------- header : namedtuple Version information and byte offset values of other FCS segments (see FCS standards for more information) in the following order: - version : str - text_begin : int - text_end : int - data_begin : int - data_end : int - analysis_begin : int - analysis_end : int Notes ----- Blank ANALYSIS segment offsets are converted to zeros. OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_). References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951.
2.091989
1.973573
1.060001
# Get time channels indices time_channel_idx = [idx for idx, channel in enumerate(self.channels) if channel.lower() == 'time'] if len(time_channel_idx) > 1: raise KeyError("more than one time channel in data") # Check if the time channel is available elif len(time_channel_idx) == 1: # Use the event list time_channel = self.channels[time_channel_idx[0]] return (self[-1, time_channel] - self[0, time_channel]) \ * self.time_step elif (self._acquisition_start_time is not None and self._acquisition_end_time is not None): # Use start_time and end_time: dt = (self._acquisition_end_time - self._acquisition_start_time) return dt.total_seconds() else: return None
def acquisition_time(self)
Acquisition time, in seconds. The acquisition time is calculated using the 'time' channel by default (channel name is case independent). If the 'time' channel is not available, the acquisition_start_time and acquisition_end_time, extracted from the $BTIM and $ETIM keyword parameters will be used. If these are not found, None will be returned.
3.115105
2.911875
1.069793
# Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._amplification_type[ch] for ch in channels] else: return self._amplification_type[channels]
def amplification_type(self, channels=None)
Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear.
3.695888
4.207898
0.878322
# Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._detector_voltage[ch] for ch in channels] else: return self._detector_voltage[channels]
def detector_voltage(self, channels=None)
Get the detector voltage used for the specified channel(s). The detector voltage for channel "n" is extracted from the $PnV parameter, if available. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the detector voltage. If None, return a list with the detector voltage of all channels, in the order of ``FCSData.channels``. Return ------ float or list of float The detector voltage of the specified channel(s). If no information about the detector voltage is found for a channel, return None.
3.834873
4.304756
0.890846
# Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._amplifier_gain[ch] for ch in channels] else: return self._amplifier_gain[channels]
def amplifier_gain(self, channels=None)
Get the amplifier gain used for the specified channel(s). The amplifier gain for channel "n" is extracted from the $PnG parameter, if available. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplifier gain. If None, return a list with the amplifier gain of all channels, in the order of ``FCSData.channels``. Return ------ float or list of float The amplifier gain of the specified channel(s). If no information about the amplifier gain is found for a channel, return None.
3.87161
4.28276
0.903999
# Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get the range of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._range[ch] for ch in channels] else: return self._range[channels]
def range(self, channels=None)
Get the range of the specified channel(s). The range is a two-element list specifying the smallest and largest values that an event in a channel should have. Note that with floating point data, some events could have values outside the range in either direction due to instrument compensation. The range should be transformed along with the data when passed through a transformation function. The range of channel "n" is extracted from the $PnR parameter as ``[0, $PnR - 1]``. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the range. If None, return a list with the range of all channels, in the order of ``FCSData.channels``. Return ------ array or list of arrays The range of the specified channel(s).
3.596635
3.671987
0.979479
# Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get resolution of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._resolution[ch] for ch in channels] else: return self._resolution[channels]
def resolution(self, channels=None)
Get the resolution of the specified channel(s). The resolution specifies the number of different values that the events can take. The resolution is directly obtained from the $PnR parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the resolution. If None, return a list with the resolution of all channels, in the order of ``FCSData.channels``. Return ------ int or list of ints Resolution of the specified channel(s).
3.631719
3.754949
0.967182
# If input is None, return None if time_str is None: return None time_l = time_str.split(':') if len(time_l) == 3: # Either 'hh:mm:ss' or 'hh:mm:ss.cc' if '.' in time_l[2]: # 'hh:mm:ss.cc' format time_str = time_str.replace('.', ':') else: # 'hh:mm:ss' format time_str = time_str + ':0' # Attempt to parse string, return None if not possible try: t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time() except: t = None elif len(time_l) == 4: # 'hh:mm:ss:tt' format time_l[3] = '{:06d}'.format(int(float(time_l[3])*1e6/60)) time_str = ':'.join(time_l) # Attempt to parse string, return None if not possible try: t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time() except: t = None else: # Unknown format t = None return t
def _parse_time_string(time_str)
Get a datetime.time object from a string time representation. The start and end of acquisition are stored in the optional keyword parameters $BTIM and $ETIM. The following formats are used according to the FCS standard: - FCS 2.0: 'hh:mm:ss' - FCS 3.0: 'hh:mm:ss[:tt]', where 'tt' is optional, and represents fractional seconds in 1/60ths. - FCS 3.1: 'hh:mm:ss[.cc]', where 'cc' is optional, and represents fractional seconds in 1/100ths. This function attempts to transform these formats to 'hh:mm:ss:ffffff', where 'ffffff' is in microseconds, and then parse it using the datetime module. Parameters: ----------- time_str : str, or None String representation of time, or None. Returns: -------- t : datetime.time, or None Time parsed from `time_str`. If parsing was not possible, return None. If `time_str` is None, return None
2.078799
1.907951
1.089545
# If input is None, return None if date_str is None: return None # Standard format for FCS2.0 try: return datetime.datetime.strptime(date_str, '%d-%b-%y') except ValueError: pass # Standard format for FCS3.0 try: return datetime.datetime.strptime(date_str, '%d-%b-%Y') except ValueError: pass # Nonstandard format 1 try: return datetime.datetime.strptime(date_str, '%y-%b-%d') except ValueError: pass # Nonstandard format 2 try: return datetime.datetime.strptime(date_str, '%Y-%b-%d') except ValueError: pass # If none of these formats work, return None return None
def _parse_date_string(date_str)
Get a datetime.date object from a string date representation. The FCS standard includes an optional keyword parameter $DATE in which the acquistion date is stored. In FCS 2.0, the date is saved as 'dd-mmm-yy', whereas in FCS 3.0 and 3.1 the date is saved as 'dd-mmm-yyyy'. This function attempts to parse these formats, along with a couple of nonstandard ones, using the datetime module. Parameters: ----------- date_str : str, or None String representation of date, or None. Returns: -------- t : datetime.datetime, or None Date parsed from `date_str`. If parsing was not possible, return None. If `date_str` is None, return None
2.18511
1.87462
1.165629
# Check if list, then run recursively if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._name_to_index(ch) for ch in channels] if isinstance(channels, six.string_types): # channels is a string containing a channel name if channels in self.channels: return self.channels.index(channels) else: raise ValueError("{} is not a valid channel name." .format(channels)) if isinstance(channels, int): if (channels < len(self.channels) and channels >= -len(self.channels)): return channels else: raise ValueError("index out of range") else: raise TypeError("input argument should be an integer, string or " "list of integers or strings")
def _name_to_index(self, channels)
Return the channel indices for the specified channel names. Integers contained in `channel` are returned unmodified, if they are within the range of ``self.channels``. Parameters ---------- channels : int or str or list of int or list of str Name(s) of the channel(s) of interest. Returns ------- int or list of int Numerical index(ces) of the specified channels.
2.628853
2.726131
0.964316
with open(file_path, 'r') as f: file_contents = f.read() version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", file_contents, re.M) if version_match: return version_match.group(1) else: raise RuntimeError("unable to find version string")
def find_version(file_path)
Scrape version information from specified file path.
1.65553
1.606786
1.030336
if num_start < 0: num_start = 0 if num_end < 0: num_end = 0 if data.shape[0] < (num_start + num_end): raise ValueError('Number of events to discard greater than total' + ' number of events.') mask = np.ones(shape=data.shape[0],dtype=bool) mask[:num_start] = False if num_end > 0: # catch the edge case where `num_end=0` causes mask[-num_end:] to mask # off all events mask[-num_end:] = False gated_data = data[mask] if full_output: StartEndGateOutput = collections.namedtuple( 'StartEndGateOutput', ['gated_data', 'mask']) return StartEndGateOutput(gated_data=gated_data, mask=mask) else: return gated_data
def start_end(data, num_start=250, num_end=100, full_output=False)
Gate out first and last events. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). num_start, num_end : int, optional Number of events to gate out from beginning and end of `data`. Ignored if less than 0. full_output : bool, optional Flag specifying to return additional outputs. If true, the outputs are given as a namedtuple. Returns ------- gated_data : FCSData or numpy array Gated flow cytometry data of the same format as `data`. mask : numpy array of bool, only if ``full_output==True`` Boolean gate mask used to gate data such that ``gated_data = data[mask]``. Raises ------ ValueError If the number of events to discard is greater than the total number of events in `data`.
2.930763
2.402163
1.220052
# Extract channels in which to gate if channels is None: data_ch = data else: data_ch = data[:,channels] if data_ch.ndim == 1: data_ch = data_ch.reshape((-1,1)) # Default values for high and low if high is None: if hasattr(data_ch, 'range'): high = [np.Inf if di is None else di[1] for di in data_ch.range()] high = np.array(high) else: high = np.Inf if low is None: if hasattr(data_ch, 'range'): low = [-np.Inf if di is None else di[0] for di in data_ch.range()] low = np.array(low) else: low = -np.Inf # Gate mask = np.all((data_ch < high) & (data_ch > low), axis = 1) gated_data = data[mask] if full_output: HighLowGateOutput = collections.namedtuple( 'HighLowGateOutput', ['gated_data', 'mask']) return HighLowGateOutput(gated_data=gated_data, mask=mask) else: return gated_data
def high_low(data, channels=None, high=None, low=None, full_output=False)
Gate out high and low values across all specified channels. Gate out events in `data` with values in the specified channels which are larger than or equal to `high` or less than or equal to `low`. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str, optional Channels on which to perform gating. If None, use all channels. high, low : int, float, optional High and low threshold values. If None, `high` and `low` will be taken from ``data.range`` if available, otherwise ``np.inf`` and ``-np.inf`` will be used. full_output : bool, optional Flag specifying to return additional outputs. If true, the outputs are given as a namedtuple. Returns ------- gated_data : FCSData or numpy array Gated flow cytometry data of the same format as `data`. mask : numpy array of bool, only if ``full_output==True`` Boolean gate mask used to gate data such that ``gated_data = data[mask]``.
2.168142
2.003597
1.082124
# Extract channels in which to gate if len(channels) != 2: raise ValueError('2 channels should be specified.') data_ch = data[:,channels].view(np.ndarray) # Log if necessary if log: data_ch = np.log10(data_ch) # Center center = np.array(center) data_centered = data_ch - center # Rotate R = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]) data_rotated = np.dot(data_centered, R.T) # Generate mask mask = ((data_rotated[:,0]/a)**2 + (data_rotated[:,1]/b)**2 <= 1) # Gate data_gated = data[mask] if full_output: # Calculate contour t = np.linspace(0,1,100)*2*np.pi ci = np.array([a*np.cos(t), b*np.sin(t)]).T ci = np.dot(ci, R) + center if log: ci = 10**ci cntr = [ci] # Build output namedtuple EllipseGateOutput = collections.namedtuple( 'EllipseGateOutput', ['gated_data', 'mask', 'contour']) return EllipseGateOutput( gated_data=data_gated, mask=mask, contour=cntr) else: return data_gated
def ellipse(data, channels, center, a, b, theta=0, log=False, full_output=False)
Gate that preserves events inside an ellipse-shaped region. Events are kept if they satisfy the following relationship:: (x/a)**2 + (y/b)**2 <= 1 where `x` and `y` are the coordinates of the event list, after substracting `center` and rotating by -`theta`. This is mathematically equivalent to maintaining the events inside an ellipse with major axis `a`, minor axis `b`, center at `center`, and tilted by `theta`. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : list of int, list of str Two channels on which to perform gating. center, a, b, theta (optional) : float Ellipse parameters. `a` is the major axis, `b` is the minor axis. log : bool, optional Flag specifying that log10 transformation should be applied to `data` before gating. full_output : bool, optional Flag specifying to return additional outputs. If true, the outputs are given as a namedtuple. Returns ------- gated_data : FCSData or numpy array Gated flow cytometry data of the same format as `data`. mask : numpy array of bool, only if ``full_output==True`` Boolean gate mask used to gate data such that ``gated_data = data[mask]``. contour : list of 2D numpy arrays, only if ``full_output==True`` List of 2D numpy array(s) of x-y coordinates tracing out the edge of the gated region. Raises ------ ValueError If more or less than 2 channels are specified.
2.714022
2.365336
1.147415
# Copy data array data_t = data.copy().astype(np.float64) # Default if channels is None: if def_channels is None: channels = range(data_t.shape[1]) else: channels = def_channels # Convert channels to iterable if not (hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types)): channels = [channels] # Apply transformation data_t[:,channels] = transform_fxn(data_t[:,channels]) # Apply transformation to ``data.range`` if hasattr(data_t, '_range'): for channel in channels: # Transform channel name to index if necessary channel_idx = data_t._name_to_index(channel) if data_t._range[channel_idx] is not None: data_t._range[channel_idx] = \ transform_fxn(data_t._range[channel_idx]) return data_t
def transform(data, channels, transform_fxn, def_channels = None)
Apply some transformation function to flow cytometry data. This function is a template transformation function, intended to be used by other specific transformation functions. It performs basic checks on `channels` and `data`. It then applies `transform_fxn` to the specified channels. Finally, it rescales ``data.range`` and if necessary. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str, optional Channels on which to perform the transformation. If `channels` is None, use def_channels. transform_fxn : function Function that performs the actual transformation. def_channels : int, str, list of int, list of str, optional Default set of channels in which to perform the transformation. If `def_channels` is None, use all channels. Returns ------- data_t : FCSData or numpy array NxD transformed flow cytometry data.
2.686494
2.563261
1.048077
# Default sc_channels if sc_channels is None: if data.ndim == 1: sc_channels = range(data.shape[0]) else: sc_channels = range(data.shape[1]) # Check that sc_channels and sc_list have the same length if len(sc_channels) != len(sc_list): raise ValueError("sc_channels and sc_list should have the same length") # Convert sc_channels to indices if hasattr(data, '_name_to_index'): sc_channels = data._name_to_index(sc_channels) # Default channels if channels is None: channels = sc_channels # Convert channels to iterable if not (hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types)): channels = [channels] # Convert channels to index if hasattr(data, '_name_to_index'): channels_ind = data._name_to_index(channels) else: channels_ind = channels # Check if every channel is in sc_channels for chi, chs in zip(channels_ind, channels): if chi not in sc_channels: raise ValueError("no standard curve for channel {}".format(chs)) # Copy data array data_t = data.copy().astype(np.float64) # Iterate over channels for chi, sc in zip(sc_channels, sc_list): if chi not in channels_ind: continue # Apply transformation data_t[:,chi] = sc(data_t[:,chi]) # Apply transformation to range if hasattr(data_t, '_range') and data_t._range[chi] is not None: data_t._range[chi] = [sc(data_t._range[chi][0]), sc(data_t._range[chi][1])] return data_t
def to_mef(data, channels, sc_list, sc_channels = None)
Transform flow cytometry data using a standard curve function. This function accepts a list of standard curves (`sc_list`) and a list of channels to which those standard curves should be applied (`sc_channels`). `to_mef` automatically checks whether a standard curve is available for each channel specified in `channels`, and throws an error otherwise. This function is intended to be reduced to the following signature:: to_mef_reduced(data, channels) by using ``functools.partial`` once a list of standard curves and their respective channels is available. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str Channels on which to perform the transformation. If `channels` is None, perform transformation in all channels specified on `sc_channels`. sc_list : list of functions Functions implementing the standard curves for each channel in `sc_channels`. sc_channels : list of int or list of str, optional List of channels corresponding to each function in `sc_list`. If None, use all channels in `data`. Returns ------- FCSData or numpy array NxD transformed flow cytometry data. Raises ------ ValueError If any channel specified in `channels` is not in `sc_channels`.
2.333042
2.210673
1.055354
# Mask out-of-range values if mask_out_of_range: x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax), x) else: x_masked = x # Calculate s and return return np.interp(x_masked, self._x_range, self._s_range)
def transform_non_affine(self, x, mask_out_of_range=True)
Transform a Nx1 numpy array. Parameters ---------- x : array Data to be transformed. mask_out_of_range : bool, optional Whether to mask input values out of range. Return ------ array or masked array Transformed data.
2.926005
3.301725
0.886205
T = self._T M = self._M W = self._W p = self._p # Calculate x return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)
def transform_non_affine(self, s)
Apply transformation to a Nx1 numpy array. Parameters ---------- s : array Data to be transformed in display scale units. Return ------ array or masked array Transformed data, in data value units.
7.684227
8.072868
0.951858
if numticks is not None: self.numticks = numticks if subs is not None: self._subs = subs
def set_params(self, subs=None, numticks=None)
Set parameters within this locator. Parameters ---------- subs : array, optional Subtick values, as multiples of the main ticks. numticks : array, optional Number of ticks.
2.457584
3.786777
0.648991
b = self._transform.base if vmax < vmin: vmin, vmax = vmax, vmin if not matplotlib.ticker.is_decade(abs(vmin), b): if vmin < 0: vmin = -matplotlib.ticker.decade_up(-vmin, b) else: vmin = matplotlib.ticker.decade_down(vmin, b) if not matplotlib.ticker.is_decade(abs(vmax), b): if vmax < 0: vmax = -matplotlib.ticker.decade_down(-vmax, b) else: vmax = matplotlib.ticker.decade_up(vmax, b) if vmin == vmax: if vmin < 0: vmin = -matplotlib.ticker.decade_up(-vmin, b) vmax = -matplotlib.ticker.decade_down(-vmax, b) else: vmin = matplotlib.ticker.decade_down(vmin, b) vmax = matplotlib.ticker.decade_up(vmax, b) result = matplotlib.transforms.nonsingular(vmin, vmax) return result
def view_limits(self, vmin, vmax)
Try to choose the view limits intelligently.
1.884403
1.838111
1.025185
return _InterpolatedInverseTransform(transform=self._transform, smin=0, smax=self._transform._M)
def get_transform(self)
Get a new object to perform the scaling transformation.
21.495564
18.700768
1.149448
axis.set_major_locator(_LogicleLocator(self._transform)) axis.set_minor_locator(_LogicleLocator(self._transform, subs=np.arange(2.0, 10.))) axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation( labelOnlyBase=True))
def set_default_locators_and_formatters(self, axis)
Set up the locators and formatters for the scale. Parameters ---------- axis: matplotlib.axis Axis for which to set locators and formatters.
3.505335
3.653873
0.959348
vmin_bound = self._transform.transform_non_affine(0) vmax_bound = self._transform.transform_non_affine(self._transform.M) vmin = max(vmin, vmin_bound) vmax = min(vmax, vmax_bound) return vmin, vmax
def limit_range_for_scale(self, vmin, vmax, minpos)
Return minimum and maximum bounds for the logicle axis. Parameters ---------- vmin : float Minimum data value. vmax : float Maximum data value. minpos : float Minimum positive value in the data. Ignored by this function. Return ------ float Minimum axis bound. float Maximum axis bound.
2.83125
3.375638
0.83873
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.mean(data_stats, axis=0)
def mean(data, channels=None)
Calculate the mean of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The mean of the events in the specified channels of `data`.
4.489032
6.018084
0.745924
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return scipy.stats.gmean(data_stats, axis=0)
def gmean(data, channels=None)
Calculate the geometric mean of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The geometric mean of the events in the specified channels of `data`.
4.570107
5.566637
0.820982
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.median(data_stats, axis=0)
def median(data, channels=None)
Calculate the median of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The median of the events in the specified channels of `data`.
4.445992
6.019383
0.738613
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic # scipy.stats.mode returns two outputs, the first of which is an array # containing the modal values. This array has the same number of # dimensions as the input, and with only one element in the first # dimension. We extract this fist element to make it match the other # functions in this module. return scipy.stats.mode(data_stats, axis=0)[0][0]
def mode(data, channels=None)
Calculate the mode of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The mode of the events in the specified channels of `data`.
5.406348
6.00777
0.899893
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.std(data_stats, axis=0)
def std(data, channels=None)
Calculate the standard deviation of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The standard deviation of the events in the specified channels of `data`.
4.539021
5.811913
0.780986
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.std(data_stats, axis=0) / np.mean(data_stats, axis=0)
def cv(data, channels=None)
Calculate the Coeff. of Variation of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The Coefficient of Variation of the events in the specified channels of `data`. Notes ----- The Coefficient of Variation (CV) of a dataset is defined as the standard deviation divided by the mean of such dataset.
4.014305
4.259686
0.942394
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.exp(np.std(np.log(data_stats), axis=0))
def gstd(data, channels=None)
Calculate the geometric std. dev. of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The geometric standard deviation of the events in the specified channels of `data`.
4.617376
5.418686
0.852121
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic return np.sqrt(np.exp(np.std(np.log(data_stats), axis=0)**2) - 1)
def gcv(data, channels=None)
Calculate the geometric CV of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The geometric coefficient of variation of the events in the specified channels of `data`.
4.659977
5.400321
0.862908
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic q75, q25 = np.percentile(data_stats, [75 ,25], axis=0) return q75 - q25
def iqr(data, channels=None)
Calculate the Interquartile Range of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The Interquartile Range of the events in the specified channels of `data`. Notes ----- The Interquartile Range (IQR) of a dataset is defined as the interval between the 25% and the 75% percentiles of such dataset.
3.480556
4.095978
0.84975
# Slice data to take statistics from if channels is None: data_stats = data else: data_stats = data[:, channels] # Calculate and return statistic q75, q25 = np.percentile(data_stats, [75 ,25], axis=0) return (q75 - q25)/np.median(data_stats, axis=0)
def rcv(data, channels=None)
Calculate the RCV of the events in an FCSData object. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int or str or list of int or list of str, optional Channels on which to calculate the statistic. If None, use all channels. Returns ------- float or numpy array The Robust Coefficient of Variation of the events in the specified channels of `data`. Notes ----- The Robust Coefficient of Variation (RCV) of a dataset is defined as the Interquartile Range (IQR) divided by the median of such dataset.
3.850543
3.74756
1.02748
infCFe = cfe.getroot().find('./infCFe') cnpjcpf_consumidor = infCFe.findtext('dest/CNPJ') or \ infCFe.findtext('dest/CPF') or '' return '|'.join([ infCFe.attrib['Id'][3:], # remove prefixo "CFe" '{}{}'.format( infCFe.findtext('ide/dEmi'), infCFe.findtext('ide/hEmi')), infCFe.findtext('total/vCFe'), cnpjcpf_consumidor, infCFe.findtext('ide/assinaturaQRCODE'),])
def dados_qrcode(cfe)
Compila os dados que compõem o QRCode do CF-e-SAT, conforme a documentação técnica oficial **Guia para Geração do QRCode pelo Aplicativo Comercial**, a partir de uma instância de ``ElementTree`` que represente a árvore do XML do CF-e-SAT. :param cfe: Instância de :py:mod:`xml.etree.ElementTree.ElementTree`. :return: String contendo a massa de dados para ser usada ao gerar o QRCode. :rtype: str Por exemplo, para gerar a imagem do QRCode [#qrcode]_: .. sourcecode:: python import xml.etree.ElementTree as ET import qrcode with open('CFe_1.xml', 'r') as fp: tree = ET.parse(fp) imagem = qrcode.make(dados_qrcode(tree)) .. [#qrcode] https://pypi.python.org/pypi/qrcode
4.97704
4.915155
1.012591
assert 44 % num_partes == 0, 'O numero de partes nao produz um '\ 'resultado inteiro (partes por 44 digitos): '\ 'num_partes=%s' % num_partes salto = 44 // num_partes return [self._campos[n:(n + salto)] for n in range(0, 44, salto)]
def partes(self, num_partes=11)
Particiona a chave do CF-e-SAT em uma lista de *n* segmentos. :param int num_partes: O número de segmentos (partes) em que os digitos da chave do CF-e-SAT serão particionados. **Esse número deverá resultar em uma divisão inteira por 44 (o comprimento da chave)**. Se não for informado, assume ``11`` partes, comumente utilizado para apresentar a chave do CF-e-SAT no extrato. :return: Lista de strings contendo a chave do CF-e-SAT particionada. :rtype: list
5.704041
5.42108
1.052196
texto = '{:n}'.format(valor) if remover_zeros: dp = locale.localeconv().get('decimal_point') texto = texto.rstrip('0').rstrip(dp) if dp in texto else texto return texto
def texto_decimal(valor, remover_zeros=True)
Converte um valor :py:class:`decimal.Decimal` para texto, com a opção de remover os zeros à direita não significativos. A conversão para texto irá considerar o :py:module:`locale` para converter o texto pronto para apresentação. :param decimal.Decimal valor: Valor a converter para texto. :param bool remover_zeros: *Opcional* Indica se os zeros à direita não significativos devem ser removidos do texto, o que irá incluir o separador decimal se for o caso.
3.850396
5.412796
0.711351
pesos = '23456789' * ((len(base) // 8) + 1) acumulado = sum([int(a) * int(b) for a, b in zip(base[::-1], pesos)]) digito = 11 - (acumulado % 11) return 0 if digito >= 10 else digito
def modulo11(base)
Calcula o dígito verificador (DV) para o argumento usando "Módulo 11". :param str base: String contendo os dígitos sobre os quais o DV será calculado, assumindo que o DV não está incluído no argumento. :return: O dígito verificador calculado. :rtype: int
2.732668
2.951121
0.925976
atributos = valor.as_tuple() if not (minimo <= abs(atributos.exponent) <= maximo): raise ValueError('Numero de casas decimais fora dos limites esperados ' '(valor={!r}, minimo={!r}, maximo={!r}): {!r}'.format( valor, minimo, maximo, atributos))
def validar_casas_decimais(valor, minimo=1, maximo=2)
Valida o número de casas decimais. Se o número de casas decimais não estiver dentro do mínimo e máximo, será lançada uma exceção do tipo :py:exc:`ValueError`. :param valor: Um objeto :py:class:`~decimal.Decimal`. :param minimo: Valor inteiro maior ou igual a zero indicando o número mínimo de casas decimais. Se não informado, ``1`` é o mínimo. :param maximo: Valor inteiro maior ou igual a zero indicando o número máximo de casas decimais. Se não informado, ``2`` é o máximo. :raises ValueError: Se o valor possuir um número de casas decimais fora dos limites mínimo e máximo informados.
3.505343
3.579484
0.979287
if not sigla in [s for s, i, n, r in UNIDADES_FEDERACAO]: raise UnidadeFederativaError('Estado (sigla) UF "%s" ' 'inexistente' % sigla)
def uf(sigla)
Valida a sigla da Unidade Federativa. Se não for uma sigla de UF válida, será lançada a exceção :exc:`UnidadeFederativaError`.
7.462634
6.21608
1.200537
idx = [i for s, i, n, r in UNIDADES_FEDERACAO].index(codigo_ibge) return UNIDADES_FEDERACAO[idx][_UF_SIGLA]
def uf_pelo_codigo(codigo_ibge)
Retorna a UF para o código do IBGE informado.
7.234877
6.660296
1.08627
idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla) return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE]
def codigo_ibge_uf(sigla)
Retorna o código do IBGE para a UF informada.
5.862553
5.594562
1.047902
_digitos = [int(c) for c in numero if c.isdigit()] if len(_digitos) != 14 or len(numero) != 14: raise NumeroCNPJError('Nao possui 14 digitos: {!r}'.format(numero)) if numero == numero[0] * 14: raise NumeroCNPJError('Todos os digitos iguais: {!r}'.format(numero)) multiplicadores = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2] soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(12)]) soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(13)]) digito1 = 11 - (soma1 % 11) digito2 = 11 - (soma2 % 11) if digito1 >= 10: digito1 = 0 if digito2 >= 10: digito2 = 0 if _digitos[12] != digito1 or _digitos[13] != digito2: raise NumeroCNPJError('Digitos verificadores invalidos: {!r}'.format(numero))
def cnpj(numero)
Valida um número de CNPJ. O número deverá ser informado como uma string contendo 14 dígitos numéricos. Se o número informado for inválido será lançada a exceção :exc:`NumeroCNPJError`. Esta implementação da validação foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`.
1.821371
1.761745
1.033845
try: cnpj(digitos(numero) if not estrito else numero) return True except NumeroCNPJError: pass return False
def is_cnpj(numero, estrito=False)
Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada.
5.114462
6.923688
0.73869
_num = digitos(numero) if is_cnpj(_num): return '{}.{}.{}/{}-{}'.format( _num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:]) return numero
def as_cnpj(numero)
Formata um número de CNPJ. Se o número não for um CNPJ válido apenas retorna o argumento sem qualquer modificação.
2.60433
2.494881
1.043869
_digitos = [int(c) for c in numero if c.isdigit()] if len(_digitos) != 11 or len(numero) != 11: raise NumeroCPFError('Nao possui 11 digitos: {!r}'.format(numero)) if numero == numero[0] * 11: raise NumeroCPFError('Todos os digitos iguais: {!r}'.format(numero)) multiplicadores = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2] soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(9)]) soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(10)]) digito1 = 11 - (soma1 % 11) digito2 = 11 - (soma2 % 11) if digito1 >= 10: digito1 = 0 if digito2 >= 10: digito2 = 0 if _digitos[9] != digito1 or _digitos[10] != digito2: raise NumeroCPFError('Digitos verificadores invalidos: {!r}'.format(numero))
def cpf(numero)
Valida um número de CPF. O número deverá ser informado como uma string contendo 11 dígitos numéricos. Se o número informado for inválido será lançada a exceção :exc:`NumeroCPFError`. Esta implementação da validação foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`.
1.874791
1.809049
1.036341
try: cpf(digitos(numero) if not estrito else numero) return True except NumeroCPFError: pass return False
def is_cpf(numero, estrito=False)
Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada.
5.710249
7.698184
0.741766
_num = digitos(numero) if is_cpf(_num): return '{}.{}.{}-{}'.format(_num[:3], _num[3:6], _num[6:9], _num[9:]) return numero
def as_cpf(numero)
Formata um número de CPF. Se o número não for um CPF válido apenas retorna o argumento sem qualquer modificação.
3.126936
2.855118
1.095204
try: cnpj(numero) except NumeroCNPJError: try: cpf(numero) except NumeroCPFError: raise NumeroCNPJCPFError('numero "%s" nao valida como ' 'CNPJ nem como CPF' % numero)
def cnpjcpf(numero)
Valida um número de CNPJ ou CPF. Veja :func:`cnpj` e/ou :func:`cpf`.
4.327929
4.109443
1.053167
_numero = digitos(numero) if not estrito else numero try: cnpj(_numero) return True except NumeroCNPJError: try: cpf(_numero) return True except NumeroCPFError: pass return False
def is_cnpjcpf(numero, estrito=False)
Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada.
3.675898
4.261965
0.862489
if is_cnpj(numero): return as_cnpj(numero) elif is_cpf(numero): return as_cpf(numero) return numero
def as_cnpjcpf(numero)
Formata um número de CNPJ ou CPF. Se o número não for um CNPJ ou CPF válidos apenas retorna o argumento sem qualquer modificação.
2.413321
2.508561
0.962034
_digitos = digitos(numero) if len(_digitos) != 8 or len(numero) != 8: raise NumeroCEPError('CEP "%s" nao possui 8 digitos' % numero) elif _digitos[0] * 8 == _digitos: raise NumeroCEPError('CEP "%s" considerado invalido' % numero)
def cep(numero)
Valida um número de CEP. O número deverá ser informado como uma string contendo 8 dígitos numéricos. Se o número informado for inválido será lançada a exceção :exc:`NumeroCEPError`. .. warning:: Qualquer string que contenha 8 dígitos será considerada como um CEP válido, desde que os dígitos não sejam todos iguais.
4.217037
3.522371
1.197215
try: cep(digitos(numero) if not estrito else numero) return True except NumeroCEPError: pass return False
def is_cep(numero, estrito=False)
Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada.
7.467427
8.843053
0.84444
_numero = digitos(numero) if is_cep(_numero): return '{}-{}'.format(_numero[:5], _numero[5:]) return numero
def as_cep(numero)
Formata um número de CEP. Se o argumento não for um CEP válido apenas retorna o argumento sem qualquer modificação.
5.160287
4.491198
1.148978
ct = message.get_content_type() cs = message.get_content_subtype() if charset is None: charset = get_charset_from_message_fragment(message) if not message.is_multipart(): # we're on a leaf converted = None disposition = message.get('Content-Disposition', 'inline') if disposition == 'inline' and ct in ('text/plain', 'text/markdown'): converted = convert_one(message, config, charset) if converted is not None: if wrap_alternative: new_tree = MIMEMultipart('alternative') _move_headers(message, new_tree) new_tree.attach(message) new_tree.attach(converted) return new_tree, True else: return converted, True return message, False else: if ct == 'multipart/signed': # if this is a multipart/signed message, then let's just # recurse into the non-signature part new_root = MIMEMultipart('alternative') if message.preamble: new_root.preamble = message.preamble _move_headers(message, new_root) converted = None for part in message.get_payload(): if part.get_content_type() != 'application/pgp-signature': converted, did_conversion = convert_tree(part, config, indent=indent + 1, wrap_alternative=False, charset=charset) if did_conversion: new_root.attach(converted) new_root.attach(message) return new_root, did_conversion else: did_conversion = False new_root = MIMEMultipart(cs, message.get_charset()) if message.preamble: new_root.preamble = message.preamble _move_headers(message, new_root) for part in message.get_payload(): part, did_this_conversion = convert_tree(part, config, indent=indent + 1, charset=charset) did_conversion |= did_this_conversion new_root.attach(part) return new_root, did_conversion
def convert_tree(message, config, indent=0, wrap_alternative=True, charset=None)
Recursively convert a potentially-multipart tree. Returns a tuple of (the converted tree, whether any markdown was found)
2.323485
2.300509
1.009988
if c.smtp_ssl: klass = smtplib.SMTP_SSL else: klass = smtplib.SMTP conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout) if not c.smtp_ssl: conn.ehlo() conn.starttls() conn.ehlo() if c.smtp_username: conn.login(c.smtp_username, c.smtp_password) return conn
def smtp_connection(c)
Create an SMTP connection from a Config object
1.776195
1.710109
1.038644
issuers = self._get_property('issuers') or [] result = { '_embedded': { 'issuers': issuers, }, 'count': len(issuers), } return List(result, Issuer)
def issuers(self)
Return the list of available issuers for this payment method.
4.684786
4.046549
1.157724
if not payment_id or not payment_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid payment ID: '{id}'. A payment ID should start with '{prefix}'.".format( id=payment_id, prefix=self.RESOURCE_ID_PREFIX) ) result = super(Payments, self).delete(payment_id, data) return self.get_resource_object(result)
def delete(self, payment_id, data=None)
Cancel payment and return the payment object. Deleting a payment causes the payment status to change to canceled. The updated payment object is returned.
3.256946
3.275284
0.994401
return self.client.customer_mandates.with_parent_id(self.customer_id).get(self.mandate_id)
def mandate(self)
Return the mandate for this payment.
7.029842
5.220212
1.346658
return self.client.customer_subscriptions.with_parent_id(self.customer_id).get(self.subscription_id)
def subscription(self)
Return the subscription for this payment.
6.102824
5.110013
1.194287
from ..resources.orders import Order url = self._get_link('order') if url: resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url) return Order(resp, self.client)
def order(self)
Return the order for this payment.
7.272339
6.348135
1.145587
try: version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py'), encoding='utf=8') except TypeError: # support python 2 version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py')) contents = version_file.read() match = re.search(r'VERSION = [\'"]([^\'"]+)', contents) if match: return match.group(1) else: raise RuntimeError("Can't determine package version")
def get_version()
Read the version from a file (mollie/api/version.py) in the repository. We can't import here since we might import from an installed version.
2.670403
2.283023
1.169678
if data is None: data = {'lines': []} refund = OrderRefunds(self.client).on(self).create(data, **params) return refund
def create_refund(self, data=None, **params)
Create a refund for the order. When no data arg is given, a refund for all order lines is assumed.
6.240181
5.285131
1.180705
from ..resources.order_lines import OrderLines if data is None: data = {'lines': []} canceled = OrderLines(self.client).on(self).delete(data) return canceled
def cancel_lines(self, data=None)
Cancel the lines given. When no lines are given, cancel all the lines. Canceling an order line causes the order line status to change to canceled. An empty dictionary will be returned.
8.113512
6.747589
1.202431
return OrderLines(self.client).on(self).update(resource_id, data)
def update_line(self, resource_id, data)
Update a line for an order.
15.431857
9.50104
1.624228
if data is None: data = {'lines': []} return Shipments(self.client).on(self).create(data)
def create_shipment(self, data=None)
Create a shipment for an order. When no data arg is given, a shipment for all order lines is assumed.
7.41896
5.553693
1.335861
return Shipments(self.client).on(self).get(resource_id)
def get_shipment(self, resource_id)
Retrieve a single shipment by a shipment's ID.
13.268768
11.831061
1.12152
return Shipments(self.client).on(self).update(resource_id, data)
def update_shipment(self, resource_id, data)
Update the tracking information of a shipment.
9.68917
10.136998
0.955822
return OrderPayments(self.client).on(self).create(data)
def create_payment(self, data)
Creates a new payment object for an order.
18.920118
11.648121
1.624306
path = self.get_resource_name() result = self.perform_api_call(self.REST_DELETE, path, data=data) return result
def delete(self, data, *args)
Custom handling for deleting orderlines. Orderlines are deleted by issuing a DELETE on the orders/*/lines endpoint, with the orderline IDs and quantities in the request body.
5.865684
6.730604
0.871494
path = self.get_resource_name() + '/' + str(resource_id) result = self.perform_api_call(self.REST_UPDATE, path, data=data) for line in result['lines']: if line['id'] == resource_id: return self.get_resource_object(line) raise DataConsistencyError('Line id {resource_id} not found in response.'.format(resource_id=resource_id))
def update(self, resource_id, data=None, **params)
Custom handling for updating orderlines. The API returns an Order object. Since we are sending the request through an orderline object, it makes more sense to convert the returned object to to the updated orderline object. If you wish to retrieve the order object, you can do so by using the order_id property of the orderline.
4.210009
4.035707
1.04319
url = self._get_link('next') resource = self.object_type.get_resource_class(self.client) resp = resource.perform_api_call(resource.REST_READ, url) return List(resp, self.object_type, self.client)
def get_next(self)
Return the next set of objects in a list
6.416053
6.308798
1.017001
if not subscription_id or not subscription_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid subscription ID: '{id}'. A subscription ID should start with '{prefix}'.".format( id=subscription_id, prefix=self.RESOURCE_ID_PREFIX) ) result = super(CustomerSubscriptions, self).delete(subscription_id, data) return self.get_resource_object(result)
def delete(self, subscription_id, data=None)
Cancel subscription and return the subscription object. Deleting a subscription causes the subscription status to changed to 'canceled'. The updated subscription object is returned.
3.325505
3.342787
0.99483
if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.".format( id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX) ) return super(Chargebacks, self).get(chargeback_id, **params)
def get(self, chargeback_id, **params)
Verify the chargeback ID and retrieve the chargeback from the API.
2.651923
2.68288
0.988461
if not params: return None parts = [] for param, value in sorted(params.items()): if not isinstance(value, dict): parts.append(urlencode({param: value})) else: # encode dictionary with square brackets for key, sub_value in sorted(value.items()): composed = '{param}[{key}]'.format(param=param, key=key) parts.append(urlencode({composed: sub_value})) if parts: return '&'.join(parts)
def generate_querystring(params)
Generate a querystring suitable for use in the v2 api. The Requests library doesn't know how to generate querystrings that encode dictionaries using square brackets: https://api.mollie.com/v2/methods?amount[value]=100.00&amount[currency]=USD Note: we use `sorted()` to work around a difference in iteration behaviour between Python 2 and 3. This makes the output predictable, and ordering of querystring parameters shouldn't matter.
2.723418
2.543288
1.070825