sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def public(self): """True if the Slot is public.""" return bool(lib.EnvSlotPublicP(self._env, self._cls, self._name))
True if the Slot is public.
entailment
def initializable(self): """True if the Slot is initializable.""" return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name))
True if the Slot is initializable.
entailment
def writable(self): """True if the Slot is writable.""" return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name))
True if the Slot is writable.
entailment
def accessible(self): """True if the Slot is directly accessible.""" return bool(lib.EnvSlotDirectAccessP(self._env, self._cls, self._name))
True if the Slot is directly accessible.
entailment
def types(self): """A tuple containing the value types for this Slot. The Python equivalent of the CLIPS slot-types function. """ data = clips.data.DataObject(self._env) lib.EnvSlotTypes(self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the value types for this Slot. The Python equivalent of the CLIPS slot-types function.
entailment
def sources(self): """A tuple containing the names of the Class sources for this Slot. The Python equivalent of the CLIPS slot-sources function. """ data = clips.data.DataObject(self._env) lib.EnvSlotSources(self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the names of the Class sources for this Slot. The Python equivalent of the CLIPS slot-sources function.
entailment
def range(self): """A tuple containing the numeric range for this Slot. The Python equivalent of the CLIPS slot-range function. """ data = clips.data.DataObject(self._env) lib.EnvSlotRange(self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the numeric range for this Slot. The Python equivalent of the CLIPS slot-range function.
entailment
def facets(self): """A tuple containing the facets for this Slot. The Python equivalent of the CLIPS slot-facets function. """ data = clips.data.DataObject(self._env) lib.EnvSlotFacets(self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the facets for this Slot. The Python equivalent of the CLIPS slot-facets function.
entailment
def cardinality(self): """A tuple containing the cardinality for this Slot. The Python equivalent of the CLIPS slot-cardinality function. """ data = clips.data.DataObject(self._env) lib.EnvSlotCardinality( self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the cardinality for this Slot. The Python equivalent of the CLIPS slot-cardinality function.
entailment
def default_value(self): """The default value for this Slot. The Python equivalent of the CLIPS slot-default-value function. """ data = clips.data.DataObject(self._env) lib.EnvSlotDefaultValue( self._env, self._cls, self._name, data.byref) return data.value
The default value for this Slot. The Python equivalent of the CLIPS slot-default-value function.
entailment
def allowed_values(self): """A tuple containing the allowed values for this Slot. The Python equivalent of the CLIPS slot-allowed-values function. """ data = clips.data.DataObject(self._env) lib.EnvSlotAllowedValues( self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the allowed values for this Slot. The Python equivalent of the CLIPS slot-allowed-values function.
entailment
def allowed_classes(self): """Iterate over the allowed classes for this slot. The Python equivalent of the CLIPS slot-allowed-classes function. """ data = clips.data.DataObject(self._env) lib.EnvSlotAllowedClasses( self._env, self._cls, self._name, data.byref) if isinstance(data.value, list): for klass in classes(self._env, data.value): yield klass
Iterate over the allowed classes for this slot. The Python equivalent of the CLIPS slot-allowed-classes function.
entailment
def name(self): """Instance name.""" return ffi.string(lib.EnvGetInstanceName(self._env, self._ist)).decode()
Instance name.
entailment
def instance_class(self): """Instance class.""" return Class(self._env, lib.EnvGetInstanceClass(self._env, self._ist))
Instance class.
entailment
def send(self, message, arguments=None): """Send a message to the Instance. Message arguments must be provided as a string. """ output = clips.data.DataObject(self._env) instance = clips.data.DataObject( self._env, dtype=CLIPSType.INSTANCE_ADDRESS) instance.value = self._ist args = arguments.encode() if arguments is not None else ffi.NULL lib.EnvSend( self._env, instance.byref, message.encode(), args, output.byref) return output.value
Send a message to the Instance. Message arguments must be provided as a string.
entailment
def delete(self): """Delete the instance.""" if lib.EnvDeleteInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
Delete the instance.
entailment
def unmake(self): """This method is equivalent to delete except that it uses message-passing instead of directly deleting the instance. """ if lib.EnvUnmakeInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
This method is equivalent to delete except that it uses message-passing instead of directly deleting the instance.
entailment
def name(self): """MessageHandler name.""" return ffi.string(lib.EnvGetDefmessageHandlerName( self._env, self._cls, self._idx)).decode()
MessageHandler name.
entailment
def type(self): """MessageHandler type.""" return ffi.string(lib.EnvGetDefmessageHandlerType( self._env, self._cls, self._idx)).decode()
MessageHandler type.
entailment
def watch(self): """True if the MessageHandler is being watched.""" return bool(lib.EnvGetDefmessageHandlerWatch( self._env, self._cls, self._idx))
True if the MessageHandler is being watched.
entailment
def watch(self, flag): """True if the MessageHandler is being watched.""" lib.EnvSetDefmessageHandlerWatch( self._env, int(flag), self._cls, self._idx)
True if the MessageHandler is being watched.
entailment
def deletable(self): """True if the MessageHandler can be deleted.""" return bool(lib.EnvIsDefmessageHandlerDeletable( self._env, self._cls, self._idx))
True if the MessageHandler can be deleted.
entailment
def undefine(self): """Undefine the MessageHandler. Python equivalent of the CLIPS undefmessage-handler command. The object becomes unusable after this method has been called. """ if lib.EnvUndefmessageHandler(self._env, self._cls, self._idx) != 1: raise CLIPSError(self._env) self._env = None
Undefine the MessageHandler. Python equivalent of the CLIPS undefmessage-handler command. The object becomes unusable after this method has been called.
entailment
def load(self, path): """Load a set of constructs into the CLIPS data base. Constructs can be in text or binary format. The Python equivalent of the CLIPS load command. """ try: self._load_binary(path) except CLIPSError: self._load_text(path)
Load a set of constructs into the CLIPS data base. Constructs can be in text or binary format. The Python equivalent of the CLIPS load command.
entailment
def save(self, path, binary=False): """Save a set of constructs into the CLIPS data base. If binary is True, the constructs will be saved in binary format. The Python equivalent of the CLIPS load command. """ if binary: ret = lib.EnvBsave(self._env, path.encode()) else: ret = lib.EnvSave(self._env, path.encode()) if ret == 0: raise CLIPSError(self._env)
Save a set of constructs into the CLIPS data base. If binary is True, the constructs will be saved in binary format. The Python equivalent of the CLIPS load command.
entailment
def batch_star(self, path): """Evaluate the commands contained in the specific path. The Python equivalent of the CLIPS batch* command. """ if lib.EnvBatchStar(self._env, path.encode()) != 1: raise CLIPSError(self._env)
Evaluate the commands contained in the specific path. The Python equivalent of the CLIPS batch* command.
entailment
def build(self, construct): """Build a single construct in CLIPS. The Python equivalent of the CLIPS build command. """ if lib.EnvBuild(self._env, construct.encode()) != 1: raise CLIPSError(self._env)
Build a single construct in CLIPS. The Python equivalent of the CLIPS build command.
entailment
def eval(self, construct): """Evaluate an expression returning its value. The Python equivalent of the CLIPS eval command. """ data = clips.data.DataObject(self._env) if lib.EnvEval(self._env, construct.encode(), data.byref) != 1: raise CLIPSError(self._env) return data.value
Evaluate an expression returning its value. The Python equivalent of the CLIPS eval command.
entailment
def define_function(self, function, name=None): """Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct. """ name = name if name is not None else function.__name__ ENVIRONMENT_DATA[self._env].user_functions[name] = function self.build(DEFFUNCTION.format(name))
Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct.
entailment
def activate(self): """Activate the Router.""" if lib.EnvActivateRouter(self._env, self._name.encode()) == 0: raise RuntimeError("Unable to activate router %s" % self._name)
Activate the Router.
entailment
def deactivate(self): """Deactivate the Router.""" if lib.EnvDeactivateRouter(self._env, self._name.encode()) == 0: raise RuntimeError("Unable to deactivate router %s" % self._name)
Deactivate the Router.
entailment
def delete(self): """Delete the Router.""" if lib.EnvDeleteRouter(self._env, self._name.encode()) == 0: raise RuntimeError("Unable to delete router %s" % self._name)
Delete the Router.
entailment
def add_to_environment(self, environment): """Add the router to the given environment.""" self._env = environment._env self._userdata = ffi.new_handle(self) ENVIRONMENT_DATA[self._env].routers[self.name] = self lib.EnvAddRouterWithContext( self._env, self._name.encode(), self._priority, lib.query_function, lib.print_function, lib.getc_function, lib.ungetc_function, lib.exit_function, self._userdata)
Add the router to the given environment.
entailment
def value(self): """Return the DATA_OBJECT stored value.""" dtype = lib.get_data_type(self._data) dvalue = lib.get_data_value(self._data) if dvalue == ffi.NULL: return None return self.python_value(dtype, dvalue)
Return the DATA_OBJECT stored value.
entailment
def value(self, value): """Sets the DATA_OBJECT stored value.""" dtype = TYPES[type(value)] if self._type is None else self._type lib.set_data_type(self._data, dtype) lib.set_data_value(self._data, self.clips_value(value))
Sets the DATA_OBJECT stored value.
entailment
def python_value(self, dtype, dvalue): """Convert a CLIPS type into Python.""" try: return CONVERTERS[dtype](dvalue) except KeyError: if dtype == clips.common.CLIPSType.MULTIFIELD: return self.multifield_to_list() if dtype == clips.common.CLIPSType.FACT_ADDRESS: return clips.facts.new_fact(self._env, lib.to_pointer(dvalue)) if dtype == clips.common.CLIPSType.INSTANCE_ADDRESS: return clips.classes.Instance(self._env, lib.to_pointer(dvalue)) return None
Convert a CLIPS type into Python.
entailment
def clips_value(self, dvalue): """Convert a Python type into CLIPS.""" try: return VALUES[type(dvalue)](self._env, dvalue) except KeyError: if isinstance(dvalue, (list, tuple)): return self.list_to_multifield(dvalue) if isinstance(dvalue, (clips.facts.Fact)): return dvalue._fact if isinstance(dvalue, (clips.classes.Instance)): return dvalue._ist return ffi.NULL
Convert a Python type into CLIPS.
entailment
def agenda_changed(self): """True if any rule activation changes have occurred.""" value = bool(lib.EnvGetAgendaChanged(self._env)) lib.EnvSetAgendaChanged(self._env, int(False)) return value
True if any rule activation changes have occurred.
entailment
def rules(self): """Iterate over the defined Rules.""" rule = lib.EnvGetNextDefrule(self._env, ffi.NULL) while rule != ffi.NULL: yield Rule(self._env, rule) rule = lib.EnvGetNextDefrule(self._env, rule)
Iterate over the defined Rules.
entailment
def find_rule(self, rule): """Find a Rule by name.""" defrule = lib.EnvFindDefrule(self._env, rule.encode()) if defrule == ffi.NULL: raise LookupError("Rule '%s' not found" % defrule) return Rule(self._env, defrule)
Find a Rule by name.
entailment
def reorder(self, module=None): """Reorder the Activations in the Agenda. If no Module is specified, the current one is used. To be called after changing the conflict resolution strategy. """ module = module._mdl if module is not None else ffi.NULL lib.EnvReorderAgenda(self._env, module)
Reorder the Activations in the Agenda. If no Module is specified, the current one is used. To be called after changing the conflict resolution strategy.
entailment
def refresh(self, module=None): """Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used. """ module = module._mdl if module is not None else ffi.NULL lib.EnvRefreshAgenda(self._env, module)
Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used.
entailment
def activations(self): """Iterate over the Activations in the Agenda.""" activation = lib.EnvGetNextActivation(self._env, ffi.NULL) while activation != ffi.NULL: yield Activation(self._env, activation) activation = lib.EnvGetNextActivation(self._env, activation)
Iterate over the Activations in the Agenda.
entailment
def clear(self): """Deletes all activations in the agenda.""" if lib.EnvDeleteActivation(self._env, ffi.NULL) != 1: raise CLIPSError(self._env)
Deletes all activations in the agenda.
entailment
def name(self): """Rule name.""" return ffi.string(lib.EnvGetDefruleName(self._env, self._rule)).decode()
Rule name.
entailment
def module(self): """The module in which the Rule is defined. Python equivalent of the CLIPS defrule-module command. """ modname = ffi.string(lib.EnvDefruleModule(self._env, self._rule)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
The module in which the Rule is defined. Python equivalent of the CLIPS defrule-module command.
entailment
def watch_firings(self, flag): """Whether or not the Rule firings are being watched.""" lib.EnvSetDefruleWatchFirings(self._env, int(flag), self._rule)
Whether or not the Rule firings are being watched.
entailment
def watch_activations(self, flag): """Whether or not the Rule Activations are being watched.""" lib.EnvSetDefruleWatchActivations(self._env, int(flag), self._rule)
Whether or not the Rule Activations are being watched.
entailment
def matches(self, verbosity=Verbosity.TERSE): """Shows partial matches and activations. Returns a tuple containing the combined sum of the matches for each pattern, the combined sum of partial matches and the number of activations. The verbosity parameter controls how much to output: * Verbosity.VERBOSE: detailed matches are printed to stdout * Verbosity.SUCCINT: a brief description is printed to stdout * Verbosity.TERSE: (default) nothing is printed to stdout """ data = clips.data.DataObject(self._env) lib.EnvMatches(self._env, self._rule, verbosity, data.byref) return tuple(data.value)
Shows partial matches and activations. Returns a tuple containing the combined sum of the matches for each pattern, the combined sum of partial matches and the number of activations. The verbosity parameter controls how much to output: * Verbosity.VERBOSE: detailed matches are printed to stdout * Verbosity.SUCCINT: a brief description is printed to stdout * Verbosity.TERSE: (default) nothing is printed to stdout
entailment
def refresh(self): """Refresh the Rule. The Python equivalent of the CLIPS refresh command. """ if lib.EnvRefresh(self._env, self._rule) != 1: raise CLIPSError(self._env)
Refresh the Rule. The Python equivalent of the CLIPS refresh command.
entailment
def undefine(self): """Undefine the Rule. Python equivalent of the CLIPS undefrule command. The object becomes unusable after this method has been called. """ if lib.EnvUndefrule(self._env, self._rule) != 1: raise CLIPSError(self._env) self._env = None
Undefine the Rule. Python equivalent of the CLIPS undefrule command. The object becomes unusable after this method has been called.
entailment
def name(self): """Activation Rule name.""" return ffi.string( lib.EnvGetActivationName(self._env, self._act)).decode()
Activation Rule name.
entailment
def salience(self, salience): """Activation salience value.""" lib.EnvSetActivationSalience(self._env, self._act, salience)
Activation salience value.
entailment
def delete(self): """Remove the activation from the agenda.""" if lib.EnvDeleteActivation(self._env, self._act) != 1: raise CLIPSError(self._env) self._env = None
Remove the activation from the agenda.
entailment
def globals_changed(self): """True if any Global has changed.""" value = bool(lib.EnvGetGlobalsChanged(self._env)) lib.EnvSetGlobalsChanged(self._env, int(False)) return value
True if any Global has changed.
entailment
def globals(self): """Iterates over the defined Globals.""" defglobal = lib.EnvGetNextDefglobal(self._env, ffi.NULL) while defglobal != ffi.NULL: yield Global(self._env, defglobal) defglobal = lib.EnvGetNextDefglobal(self._env, defglobal)
Iterates over the defined Globals.
entailment
def find_global(self, name): """Find the Global by its name.""" defglobal = lib.EnvFindDefglobal(self._env, name.encode()) if defglobal == ffi.NULL: raise LookupError("Global '%s' not found" % name) return Global(self._env, defglobal)
Find the Global by its name.
entailment
def modules(self): """Iterates over the defined Modules.""" defmodule = lib.EnvGetNextDefmodule(self._env, ffi.NULL) while defmodule != ffi.NULL: yield Module(self._env, defmodule) defmodule = lib.EnvGetNextDefmodule(self._env, defmodule)
Iterates over the defined Modules.
entailment
def find_module(self, name): """Find the Module by its name.""" defmodule = lib.EnvFindDefmodule(self._env, name.encode()) if defmodule == ffi.NULL: raise LookupError("Module '%s' not found" % name) return Module(self._env, defmodule)
Find the Module by its name.
entailment
def value(self): """Global value.""" data = clips.data.DataObject(self._env) if lib.EnvGetDefglobalValue( self._env, self.name.encode(), data.byref) != 1: raise CLIPSError(self._env) return data.value
Global value.
entailment
def name(self): """Global name.""" return ffi.string( lib.EnvGetDefglobalName(self._env, self._glb)).decode()
Global name.
entailment
def module(self): """The module in which the Global is defined. Python equivalent of the CLIPS defglobal-module command. """ modname = ffi.string(lib.EnvDefglobalModule(self._env, self._glb)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
The module in which the Global is defined. Python equivalent of the CLIPS defglobal-module command.
entailment
def watch(self, flag): """Whether or not the Global is being watched.""" lib.EnvSetDefglobalWatch(self._env, int(flag), self._glb)
Whether or not the Global is being watched.
entailment
def undefine(self): """Undefine the Global. Python equivalent of the CLIPS undefglobal command. The object becomes unusable after this method has been called. """ if lib.EnvUndefglobal(self._env, self._glb) != 1: raise CLIPSError(self._env) self._env = None
Undefine the Global. Python equivalent of the CLIPS undefglobal command. The object becomes unusable after this method has been called.
entailment
def name(self): """Global name.""" return ffi.string( lib.EnvGetDefmoduleName(self._env, self._mdl)).decode()
Global name.
entailment
def functions(self): """Iterates over the defined Globals.""" deffunction = lib.EnvGetNextDeffunction(self._env, ffi.NULL) while deffunction != ffi.NULL: yield Function(self._env, deffunction) deffunction = lib.EnvGetNextDeffunction(self._env, deffunction)
Iterates over the defined Globals.
entailment
def find_function(self, name): """Find the Function by its name.""" deffunction = lib.EnvFindDeffunction(self._env, name.encode()) if deffunction == ffi.NULL: raise LookupError("Function '%s' not found" % name) return Function(self._env, deffunction)
Find the Function by its name.
entailment
def generics(self): """Iterates over the defined Generics.""" defgeneric = lib.EnvGetNextDefgeneric(self._env, ffi.NULL) while defgeneric != ffi.NULL: yield Generic(self._env, defgeneric) defgeneric = lib.EnvGetNextDefgeneric(self._env, defgeneric)
Iterates over the defined Generics.
entailment
def find_generic(self, name): """Find the Generic by its name.""" defgeneric = lib.EnvFindDefgeneric(self._env, name.encode()) if defgeneric == ffi.NULL: raise LookupError("Generic '%s' not found" % name) return Generic(self._env, defgeneric)
Find the Generic by its name.
entailment
def name(self): """Function name.""" return ffi.string( lib.EnvGetDeffunctionName(self._env, self._fnc)).decode()
Function name.
entailment
def module(self): """The module in which the Function is defined. Python equivalent of the CLIPS deffunction-module command. """ modname = ffi.string(lib.EnvDeffunctionModule(self._env, self._fnc)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
The module in which the Function is defined. Python equivalent of the CLIPS deffunction-module command.
entailment
def watch(self, flag): """Whether or not the Function is being watched.""" lib.EnvSetDeffunctionWatch(self._env, int(flag), self._fnc)
Whether or not the Function is being watched.
entailment
def undefine(self): """Undefine the Function. Python equivalent of the CLIPS undeffunction command. The object becomes unusable after this method has been called. """ if lib.EnvUndeffunction(self._env, self._fnc) != 1: raise CLIPSError(self._env) self._env = None
Undefine the Function. Python equivalent of the CLIPS undeffunction command. The object becomes unusable after this method has been called.
entailment
def undefine(self): """Undefine the Generic. Python equivalent of the CLIPS undefgeneric command. The object becomes unusable after this method has been called. """ if lib.EnvUndefgeneric(self._env, self._gnc) != 1: raise CLIPSError(self._env) self._env = None
Undefine the Generic. Python equivalent of the CLIPS undefgeneric command. The object becomes unusable after this method has been called.
entailment
def undefine(self): """Undefine the Method. Python equivalent of the CLIPS undefmethod command. The object becomes unusable after this method has been called. """ if lib.EnvUndefmethod(self._env, self._gnc, self._idx) != 1: raise CLIPSError(self._env) self._env = None
Undefine the Method. Python equivalent of the CLIPS undefmethod command. The object becomes unusable after this method has been called.
entailment
def _get_deviation_of_mean(self, series, multiplier=3): """ Returns count of values deviating of the mean, i.e. larger than `multiplier` * `std`. :type series: :param multiplier: :return: """ capped_series = np.minimum( series, series.mean() + multiplier * series.std()) count = pd.value_counts(series != capped_series) count = count[True] if True in count else 0 perc = self._percent(count / self.length) return count, perc
Returns count of values deviating of the mean, i.e. larger than `multiplier` * `std`. :type series: :param multiplier: :return:
entailment
def _get_median_absolute_deviation(self, series, multiplier=3): """ Returns count of values larger than `multiplier` * `mad` :type series: :param multiplier: :return (array): """ capped_series = np.minimum( series, series.median() + multiplier * series.mad()) count = pd.value_counts(series != capped_series) count = count[True] if True in count else 0 perc = self._percent(count / self.length) return count, perc
Returns count of values larger than `multiplier` * `mad` :type series: :param multiplier: :return (array):
entailment
def get_columns(self, df, usage, columns=None): """ Returns a `data_frame.columns`. :param df: dataframe to select columns from :param usage: should be a value from [ALL, INCLUDE, EXCLUDE]. this value only makes sense if attr `columns` is also set. otherwise, should be used with default value ALL. :param columns: * if `usage` is all, this value is not used. * if `usage` is INCLUDE, the `df` is restricted to the intersection between `columns` and the `df.columns` * if usage is EXCLUDE, returns the `df.columns` excluding these `columns` :return: `data_frame` columns, excluding `target_column` and `id_column` if given. `data_frame` columns, including/excluding the `columns` depending on `usage`. """ columns_excluded = pd.Index([]) columns_included = df.columns if usage == self.INCLUDE: try: columns_included = columns_included.intersection(pd.Index(columns)) except TypeError: pass elif usage == self.EXCLUDE: try: columns_excluded = columns_excluded.union(pd.Index(columns)) except TypeError: pass columns_included = columns_included.difference(columns_excluded) return columns_included.intersection(df.columns)
Returns a `data_frame.columns`. :param df: dataframe to select columns from :param usage: should be a value from [ALL, INCLUDE, EXCLUDE]. this value only makes sense if attr `columns` is also set. otherwise, should be used with default value ALL. :param columns: * if `usage` is all, this value is not used. * if `usage` is INCLUDE, the `df` is restricted to the intersection between `columns` and the `df.columns` * if usage is EXCLUDE, returns the `df.columns` excluding these `columns` :return: `data_frame` columns, excluding `target_column` and `id_column` if given. `data_frame` columns, including/excluding the `columns` depending on `usage`.
entailment
def clustering_gmm(data, n_clusters, tol=1e-7, min_covar=None, scale='logicle'): """ Find clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation. """ # Initialize min_covar parameter # Parameter is initialized differently depending on scikit's version if min_covar is None: if packaging.version.parse(sklearn.__version__) \ >= packaging.version.parse('0.18'): min_covar = 1e-3 else: min_covar = 5e-5 # Copy events before rescaling data = data.copy() # Apply rescaling if scale=='linear': # No rescaling pass elif scale=='log': # Logarithm of zero and negatives is undefined. Therefore, saturate # any non-positives to a small positive value. # The machine epsilon `eps` is the smallest number such that # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`. data[data < 1e-15] = 1e-15 # Rescale data = np.log10(data) elif scale=='logicle': # Use the logicle transform class in the plot module, and transform # data one channel at a time. for ch in range(data.shape[1]): # We need a transformation from "data value" to "display scale" # units. To do so, we use an inverse logicle transformation. t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted() data[:,ch] = t.transform_non_affine(data[:,ch], mask_out_of_range=False) else: raise ValueError("scale {} not supported".format(scale)) ### # Parameter initialization ### weights = np.tile(1.0 / n_clusters, n_clusters) means = [] covars = [] # Calculate distance to minimum value. Then, sort based on this distance. dist = np.sum((data - np.min(data, axis=0))**2., axis=1) sorted_idx = np.argsort(dist) # Expected number of elements per cluster n_per_cluster = data.shape[0]/float(n_clusters) # Get means and covariances per cluster # We will just use a fraction of ``1 - discard_frac`` of the data. # Data at the edges that actually corresponds to another cluster can # really mess up the final result. discard_frac = 0.5 for i in range(n_clusters): il = int((i + discard_frac/2)*n_per_cluster) ih = int((i + 1 - discard_frac/2)*n_per_cluster) sorted_idx_cluster = sorted_idx[il:ih] data_cluster = data[sorted_idx_cluster] # Calculate means and covariances means.append(np.mean(data_cluster, axis=0)) if data.shape[1] == 1: cov = np.cov(data_cluster.T).reshape(1,1) else: cov = np.cov(data_cluster.T) # Add small number to diagonal to avoid near-singular covariances cov += np.eye(data.shape[1]) * min_covar covars.append(cov) # Means should be an array means = np.array(means) ### # Run Gaussian Mixture Model Clustering ### if packaging.version.parse(sklearn.__version__) \ >= packaging.version.parse('0.18'): # GaussianMixture uses precisions, the inverse of covariances. # To get the inverse, we solve the linear equation C*P = I. We also # use the fact that C is positive definite. precisions = [scipy.linalg.solve(c, np.eye(c.shape[0]), assume_a='pos') for c in covars] precisions = np.array(precisions) # Initialize GaussianMixture object gmm = GaussianMixture(n_components=n_clusters, tol=tol, covariance_type='full', weights_init=weights, means_init=means, precisions_init=precisions, max_iter=500) else: # Initialize GMM object gmm = GMM(n_components=n_clusters, tol=tol, min_covar=min_covar, covariance_type='full', params='mc', init_params='') # Set initial parameters gmm.weight_ = weights gmm.means_ = means gmm.covars_ = covars # Fit gmm.fit(data) # Get labels by sampling from the responsibilities # This avoids the complete elimination of a cluster if two or more # clusters have very similar means. resp = gmm.predict_proba(data) labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp] return labels
Find clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation.
entailment
def selection_std(populations, low=None, high=None, n_std_low=2.5, n_std_high=2.5, scale='logicle'): """ Select populations if most of their elements are between two values. This function selects populations from `populations` if their means are more than `n_std_low` standard deviations greater than `low` and `n_std_high` standard deviations lower than `high`. Optionally, all elements in `populations` can be rescaled as specified by the `scale` argument before calculating means and standard deviations. Parameters ---------- populations : list of 1D arrays or 1-channel FCSData objects Populations to select or discard. low, high : int or float Low and high thresholds. Required if the elements in `populations` are numpy arrays. If not specified, and the elements in `populations` are FCSData objects, use 1.5% and 98.5% of the range in ``populations[0].range``. n_std_low, n_std_high : float, optional Number of standard deviations from `low` and `high`, respectively, that a population's mean has to be closer than to be discarded. scale : str, optional Rescaling applied to `populations` before calculating means and standard deviations. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- selected_mask : boolean array Flags indicating whether a population has been selected. """ # Generate scaling functions if scale == 'linear': # Identity function sf = lambda x: x elif scale == 'log': sf = np.log10 elif scale == 'logicle': # We need a transformation from "data value" to "display scale" # units. To do so, we use an inverse logicle transformation. t = FlowCal.plot._LogicleTransform(data=populations[0], channel=0).inverted() sf = lambda x: t.transform_non_affine(x, mask_out_of_range=False) else: raise ValueError("scale {} not supported".format(scale)) # If thresholds were provided, apply scaling function. Else, obtain and # rescale thresholds from range. if low is None: if hasattr(populations[0], 'hist_bins'): # Obtain default thresholds from range r = populations[0].range(channels=0) # If using log scale and the lower limit is non-positive, change to # a very small positive number. # The machine epsilon `eps` is the smallest number such that # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`. if scale == 'log' and r[0] <= 0: r[0] = 1e-15 low = sf(r[0]) + 0.015*(sf(r[1]) - sf(r[0])) else: raise TypeError("argument 'low' not specified") else: low = sf(low) if high is None: if hasattr(populations[0], 'hist_bins'): # Obtain default thresholds from range r = populations[0].range(channels=0) # If using log scale and the lower limit is non-positive, change to # a very small positive number. # The machine epsilon `eps` is the smallest number such that # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`. if scale == 'log' and r[0] <= 0: r[0] = 1e-15 high = sf(r[0]) + 0.985*(sf(r[1]) - sf(r[0])) else: raise TypeError("argument 'high' not specified") else: high = sf(high) # Copy events for i in range(len(populations)): populations[i] = populations[i].copy() # For log scaling, logarithm of zero and negatives is undefined. Therefore, # saturate any non-positives to a small positive value. # The machine epsilon `eps` is the smallest number such that # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`. if scale == 'log': for p in populations: p[p < 1e-15] = 1e-15 # Rescale events for i in range(len(populations)): populations[i] = sf(populations[i]) # Calculate means and standard deviations pop_mean = np.array([FlowCal.stats.mean(p) for p in populations]) pop_std = np.array([FlowCal.stats.std(p) for p in populations]) # Some populations, especially the highest ones when they are near # saturation, tend to aggregate mostly on one bin and give a standard # deviation of almost zero. This is an effect of the finite bin resolution # and probably gives a bad estimate of the standard deviation. We choose # to be conservative and overestimate the standard deviation in these # cases. Therefore, we set the minimum standard deviation to 0.005. min_std = 0.005 pop_std[pop_std < min_std] = min_std # Return populations that don't cross either threshold selected_mask = np.logical_and( (pop_mean - n_std_low*pop_std) > low, (pop_mean + n_std_high*pop_std) < high) return selected_mask
Select populations if most of their elements are between two values. This function selects populations from `populations` if their means are more than `n_std_low` standard deviations greater than `low` and `n_std_high` standard deviations lower than `high`. Optionally, all elements in `populations` can be rescaled as specified by the `scale` argument before calculating means and standard deviations. Parameters ---------- populations : list of 1D arrays or 1-channel FCSData objects Populations to select or discard. low, high : int or float Low and high thresholds. Required if the elements in `populations` are numpy arrays. If not specified, and the elements in `populations` are FCSData objects, use 1.5% and 98.5% of the range in ``populations[0].range``. n_std_low, n_std_high : float, optional Number of standard deviations from `low` and `high`, respectively, that a population's mean has to be closer than to be discarded. scale : str, optional Rescaling applied to `populations` before calculating means and standard deviations. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- selected_mask : boolean array Flags indicating whether a population has been selected.
entailment
def fit_beads_autofluorescence(fl_rfi, fl_mef): """ Fit a standard curve using a beads model with autofluorescence. Parameters ---------- fl_rfi : array Fluorescence values of bead populations in units of Relative Fluorescence Intensity (RFI). fl_mef : array Fluorescence values of bead populations in MEF units. Returns ------- std_crv : function Standard curve that transforms fluorescence values from RFI to MEF units. This function has the signature ``y = std_crv(x)``, where `x` is some fluorescence value in RFI and `y` is the same fluorescence expressed in MEF units. beads_model : function Fluorescence model of calibration beads. This function has the signature ``y = beads_model(x)``, where `x` is the fluorescence of some bead population in RFI units and `y` is the same fluorescence expressed in MEF units, without autofluorescence. beads_params : array Fitted parameters of the bead fluorescence model: ``[m, b, fl_mef_auto]``. beads_model_str : str String representation of the beads model used. beads_params_names : list of str Names of the parameters in a list, in the same order as they are given in `beads_params`. Notes ----- The following model is used to describe bead fluorescence:: m*log(fl_rfi[i]) + b = log(fl_mef_auto + fl_mef[i]) where ``fl_rfi[i]`` is the fluorescence of bead subpopulation ``i`` in RFI units and ``fl_mef[i]`` is the corresponding fluorescence in MEF units. The model includes 3 parameters: ``m`` (slope), ``b`` (intercept), and ``fl_mef_auto`` (bead autofluorescence). The last term is constrained to be greater or equal to zero. The bead fluorescence model is fit in log space using nonlinear least squares regression. In our experience, fitting in log space weights the residuals more evenly, whereas fitting in linear space vastly overvalues the brighter beads. A standard curve is constructed by solving for ``fl_mef``. As cell samples may not have the same autofluorescence as beads, the bead autofluorescence term (``fl_mef_auto``) is omitted from the standard curve; the user is expected to use an appropriate white cell sample to account for cellular autofluorescence if necessary. The returned standard curve mapping fluorescence in RFI units to MEF units is thus of the following form:: fl_mef = exp(m*log(fl_rfi) + b) This is equivalent to:: fl_mef = exp(b) * (fl_rfi**m) This works for positive ``fl_rfi`` values, but it is undefined for ``fl_rfi < 0`` and non-integer ``m`` (general case). To extend this standard curve to negative values of ``fl_rfi``, we define ``s(fl_rfi)`` to be equal to the standard curve above when ``fl_rfi >= 0``. Next, we require this function to be odd, that is, ``s(fl_rfi) = - s(-fl_rfi)``. This extends the domain to negative ``fl_rfi`` values and results in ``s(fl_rfi) < 0`` for any negative ``fl_rfi``. Finally, we make ``fl_mef = s(fl_rfi)`` our new standard curve. In this way,:: s(fl_rfi) = exp(b) * ( fl_rfi **m), fl_rfi >= 0 - exp(b) * ((-fl_rfi)**m), fl_rfi < 0 This satisfies the definition of an odd function. In addition, ``s(0) = 0``, and ``s(fl_rfi)`` converges to zero when ``fl_rfi -> 0`` from both sides. Therefore, the function is continuous at ``fl_rfi = 0``. The definition of ``s(fl_rfi)`` can be expressed more conveniently as:: s(fl_rfi) = sign(fl_rfi) * exp(b) * (abs(fl_rfi)**m) This is the equation implemented. """ # Check that the input data has consistent dimensions if len(fl_rfi) != len(fl_mef): raise ValueError("fl_rfi and fl_mef have different lengths") # Check that we have at least three points if len(fl_rfi) <= 2: raise ValueError("standard curve model requires at least three " "values") # Initialize parameters params = np.zeros(3) # Initial guesses: # 0: slope found by putting a line through the highest two points. # 1: y-intercept found by putting a line through highest two points. # 2: bead autofluorescence initialized using the first point. params[0] = (np.log(fl_mef[-1]) - np.log(fl_mef[-2])) / \ (np.log(fl_rfi[-1]) - np.log(fl_rfi[-2])) params[1] = np.log(fl_mef[-1]) - params[0] * np.log(fl_rfi[-1]) params[2] = np.exp(params[0]*np.log(fl_rfi[0]) + params[1]) - fl_mef[0] # Error function def err_fun(p, x, y): return np.sum((np.log(y + p[2]) - ( p[0] * np.log(x) + p[1] ))**2) # Bead model function def fit_fun(p,x): return np.exp(p[0] * np.log(x) + p[1]) - p[2] # RFI-to-MEF standard curve transformation function def sc_fun(p,x): return np.sign(x) * np.exp(p[1]) * (np.abs(x)**p[0]) # Fit parameters err_par = lambda p: err_fun(p, fl_rfi, fl_mef) res = minimize(err_par, params, bounds=((None, None), (None, None), (0, None)), options = {'gtol': 1e-10, 'ftol': 1e-10}) # Separate parameters beads_params = res.x # Beads model function beads_model = lambda x: fit_fun(beads_params, x) # Standard curve function std_crv = lambda x: sc_fun(beads_params, x) # Model string representation beads_model_str = 'm*log(fl_rfi) + b = log(fl_mef_auto + fl_mef)' # Parameter names beads_params_names = ['m', 'b', 'fl_mef_auto'] return (std_crv, beads_model, beads_params, beads_model_str, beads_params_names)
Fit a standard curve using a beads model with autofluorescence. Parameters ---------- fl_rfi : array Fluorescence values of bead populations in units of Relative Fluorescence Intensity (RFI). fl_mef : array Fluorescence values of bead populations in MEF units. Returns ------- std_crv : function Standard curve that transforms fluorescence values from RFI to MEF units. This function has the signature ``y = std_crv(x)``, where `x` is some fluorescence value in RFI and `y` is the same fluorescence expressed in MEF units. beads_model : function Fluorescence model of calibration beads. This function has the signature ``y = beads_model(x)``, where `x` is the fluorescence of some bead population in RFI units and `y` is the same fluorescence expressed in MEF units, without autofluorescence. beads_params : array Fitted parameters of the bead fluorescence model: ``[m, b, fl_mef_auto]``. beads_model_str : str String representation of the beads model used. beads_params_names : list of str Names of the parameters in a list, in the same order as they are given in `beads_params`. Notes ----- The following model is used to describe bead fluorescence:: m*log(fl_rfi[i]) + b = log(fl_mef_auto + fl_mef[i]) where ``fl_rfi[i]`` is the fluorescence of bead subpopulation ``i`` in RFI units and ``fl_mef[i]`` is the corresponding fluorescence in MEF units. The model includes 3 parameters: ``m`` (slope), ``b`` (intercept), and ``fl_mef_auto`` (bead autofluorescence). The last term is constrained to be greater or equal to zero. The bead fluorescence model is fit in log space using nonlinear least squares regression. In our experience, fitting in log space weights the residuals more evenly, whereas fitting in linear space vastly overvalues the brighter beads. A standard curve is constructed by solving for ``fl_mef``. As cell samples may not have the same autofluorescence as beads, the bead autofluorescence term (``fl_mef_auto``) is omitted from the standard curve; the user is expected to use an appropriate white cell sample to account for cellular autofluorescence if necessary. The returned standard curve mapping fluorescence in RFI units to MEF units is thus of the following form:: fl_mef = exp(m*log(fl_rfi) + b) This is equivalent to:: fl_mef = exp(b) * (fl_rfi**m) This works for positive ``fl_rfi`` values, but it is undefined for ``fl_rfi < 0`` and non-integer ``m`` (general case). To extend this standard curve to negative values of ``fl_rfi``, we define ``s(fl_rfi)`` to be equal to the standard curve above when ``fl_rfi >= 0``. Next, we require this function to be odd, that is, ``s(fl_rfi) = - s(-fl_rfi)``. This extends the domain to negative ``fl_rfi`` values and results in ``s(fl_rfi) < 0`` for any negative ``fl_rfi``. Finally, we make ``fl_mef = s(fl_rfi)`` our new standard curve. In this way,:: s(fl_rfi) = exp(b) * ( fl_rfi **m), fl_rfi >= 0 - exp(b) * ((-fl_rfi)**m), fl_rfi < 0 This satisfies the definition of an odd function. In addition, ``s(0) = 0``, and ``s(fl_rfi)`` converges to zero when ``fl_rfi -> 0`` from both sides. Therefore, the function is continuous at ``fl_rfi = 0``. The definition of ``s(fl_rfi)`` can be expressed more conveniently as:: s(fl_rfi) = sign(fl_rfi) * exp(b) * (abs(fl_rfi)**m) This is the equation implemented.
entailment
def plot_standard_curve(fl_rfi, fl_mef, beads_model, std_crv, xscale='linear', yscale='linear', xlim=None, ylim=(1.,1e8)): """ Plot a standard curve with fluorescence of calibration beads. Parameters ---------- fl_rfi : array_like Fluorescence of the calibration beads' subpopulations, in RFI units. fl_mef : array_like Fluorescence of the calibration beads' subpopulations, in MEF units. beads_model : function Fluorescence model of the calibration beads. std_crv : function The standard curve, mapping relative fluorescence (RFI) units to MEF units. Other Parameters ---------------- xscale : str, optional Scale of the x axis, either ``linear`` or ``log``. yscale : str, optional Scale of the y axis, either ``linear`` or ``log``. xlim : tuple, optional Limits for the x axis. ylim : tuple, optional Limits for the y axis. """ # Plot fluorescence of beads populations plt.plot(fl_rfi, fl_mef, 'o', label='Beads', color=standard_curve_colors[0]) # Generate points in x axis to plot beads model and standard curve. if xlim is None: xlim = plt.xlim() if xscale=='linear': xdata = np.linspace(xlim[0], xlim[1], 200) elif xscale=='log': xdata = np.logspace(np.log10(xlim[0]), np.log10(xlim[1]), 200) # Plot beads model and standard curve plt.plot(xdata, beads_model(xdata), label='Beads model', color=standard_curve_colors[1]) plt.plot(xdata, std_crv(xdata), label='Standard curve', color=standard_curve_colors[2]) plt.xscale(xscale) plt.yscale(yscale) plt.xlim(xlim) plt.ylim(ylim) plt.grid(True) plt.legend(loc = 'best')
Plot a standard curve with fluorescence of calibration beads. Parameters ---------- fl_rfi : array_like Fluorescence of the calibration beads' subpopulations, in RFI units. fl_mef : array_like Fluorescence of the calibration beads' subpopulations, in MEF units. beads_model : function Fluorescence model of the calibration beads. std_crv : function The standard curve, mapping relative fluorescence (RFI) units to MEF units. Other Parameters ---------------- xscale : str, optional Scale of the x axis, either ``linear`` or ``log``. yscale : str, optional Scale of the y axis, either ``linear`` or ``log``. xlim : tuple, optional Limits for the x axis. ylim : tuple, optional Limits for the y axis.
entailment
def get_transform_fxn(data_beads, mef_values, mef_channels, clustering_fxn=clustering_gmm, clustering_params={}, clustering_channels=None, statistic_fxn=FlowCal.stats.median, statistic_params={}, selection_fxn=selection_std, selection_params={}, fitting_fxn=fit_beads_autofluorescence, fitting_params={}, verbose=False, plot=False, plot_dir=None, plot_filename=None, full_output=False): """ Get a transformation function to convert flow cytometry data to MEF. Parameters ---------- data_beads : FCSData object Flow cytometry data describing calibration beads. mef_values : sequence of sequences Known MEF values for the calibration bead subpopulations, for each channel specified in `mef_channels`. The innermost sequences must have the same length (the same number of bead subpopulations must exist for each channel). mef_channels : int, or str, or list of int, or list of str Channels for which to generate transformation functions. verbose : bool, optional Flag specifying whether to print information about step completion and warnings. plot : bool, optional Flag specifying whether to produce diagnostic plots. plot_dir : str, optional Directory where to save diagnostics plots. Ignored if `plot` is False. If ``plot==True`` and ``plot_dir is None``, plot without saving. plot_filename : str, optional Name to use for plot files. If None, use ``str(data_beads)``. full_output : bool, optional Flag specifying whether to include intermediate results in the output. If `full_output` is True, the function returns a `MEFOutput` ``namedtuple`` with fields as described below. If `full_output` is False, the function only returns the calculated transformation function. Returns ------- transform_fxn : function Transformation function to convert flow cytometry data from RFI units to MEF. This function has the following signature:: data_mef = transform_fxn(data_rfi, channels) mef_channels : int, or str, or list, only if ``full_output==True`` Channels on which the transformation function has been generated. Directly copied from the `mef_channels` argument. clustering : dict, only if ``full_output==True`` Results of the clustering step. The structure of this dictionary is:: clustering = {"labels": np.array} A description of each ``"key": value`` is given below. "labels" : array Array of length ``N``, where ``N`` is the number of events in `data_beads`. This array contains labels indicating which subpopulation each event has been assigned to by the clustering algorithm. Labels range from ``0`` to ``M - 1``, where ``M`` is the number of MEF values specified, and therefore the number of subpopulations identified by the clustering algorithm. statistic : dict, only if ``full_output==True`` Results of the calculation of bead subpopulations' fluorescence. The structure of this dictionary is:: statistic = {"values": [np.array, ...]} A description of each ``"key": value`` is given below. "values" : list of arrays Each array contains the representative fluorescence values of all subpopulations, for a specific fluorescence channel from `mef_channels`. Therefore, each array has a length equal to the number of subpopulations, and the outer list has as many arrays as the number of channels in `mef_channels`. selection : dict, only if ``full_output==True`` Results of the subpopulation selection step. The structure of this dictionary is:: selection = {"rfi": [np.array, ...], "mef": [np.array, ...]} A description of each ``"key": value`` is given below. "rfi" : list of arrays Each array contains the fluorescence values of each selected subpopulation in RFI units, for a specific fluorescence channel from `mef_channels`. The outer list has as many arrays as the number of channels in `mef_channels`. Because the selection step may discard subpopulations, each array has a length less than or equal to the total number of subpopulations. Furthermore, different arrays in this list may not have the same length. However, the length of each array is consistent with the corresponding array in ``selection["mef"]`` (see below). "mef" : list of arrays Each array contains the fluorescence values of each selected subpopulation in MEF units, for a specific fluorescence channel from `mef_channels`. The outer list has as many arrays as the number of channels in `mef_channels`. Because the selection step may discard subpopulations, each array has a length less than or equal to the total number of subpopulations. Furthermore, different arrays in this list may not have the same length. However, the length of each array is consistent with the corresponding array in ``selection["rfi"]`` (see above). fitting : dict, only if ``full_output==True`` Results of the model fitting step. The structure of this dictionary is:: selection = {"std_crv": [func, ...], "beads_model": [func, ...], "beads_params": [np.array, ...], "beads_model_str": [str, ...], "beads_params_names": [[], ...]} A description of each ``"key": value`` is given below. "std_crv" : list of functions Functions encoding the fitted standard curves, for each channel in `mef_channels`. Each element of this list is the ``std_crv`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_model" : list of functions Functions encoding the fluorescence model of the calibration beads, for each channel in `mef_channels`. Each element of this list is the ``beads_model`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_params" : list of arrays Fitted parameter values of the bead fluorescence model, for each channel in `mef_chanels`. Each element of this list is the ``beads_params`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels`. "beads_model_str" : list of str String representation of the bead models used, for each channel in `mef_channels`. Each element of this list is the ``beads_model_str`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_params_names" : list of list Names of the parameters given in `beads_params`, for each channel in `mef_channels`. Each element of this list is the ``beads_params_names`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . Other parameters ---------------- clustering_fxn : function, optional Function used for clustering, or identification of subpopulations. Must have the following signature:: labels = clustering_fxn(data, n_clusters, **clustering_params) where `data` is a NxD FCSData object or numpy array, `n_clusters` is the expected number of bead subpopulations, and `labels` is a 1D numpy array of length N, assigning each event in `data` to one subpopulation. clustering_params : dict, optional Additional keyword parameters to pass to `clustering_fxn`. clustering_channels : list, optional Channels used for clustering. If not specified, use `mef_channels`. If more than three channels are specified and `plot` is True, only a 3D scatter plot will be produced using the first three channels. statistic_fxn : function, optional Function used to calculate the representative fluorescence of each subpopulation. Must have the following signature:: s = statistic_fxn(data, **statistic_params) where `data` is a 1D FCSData object or numpy array, and `s` is a float. Statistical functions from numpy, scipy, or FlowCal.stats are valid options. statistic_params : dict, optional Additional keyword parameters to pass to `statistic_fxn`. selection_fxn : function, optional Function to use for bead population selection. Must have the following signature:: selected_mask = selection_fxn(data_list, **selection_params) where `data_list` is a list of FCSData objects, each one containing the events of one population, and `selected_mask` is a boolean array indicating whether the population has been selected (True) or discarded (False). If None, don't use a population selection procedure. selection_params : dict, optional Additional keyword parameters to pass to `selection_fxn`. fitting_fxn : function, optional Function used to fit the beads fluorescence model and obtain a standard curve. Must have the following signature:: std_crv, beads_model, beads_params, \\ beads_model_str, beads_params_names = fitting_fxn( fl_rfi, fl_mef, **fitting_params) where `std_crv` is a function implementing the standard curve, `beads_model` is a function implementing the beads fluorescence model, `beads_params` is an array containing the fitted parameters of the beads model, `beads_model_str` is a string representation of the beads model used, `beads_params_names` is a list with the parameter names in the same order as they are given in `beads_params`, and `fl_rfi` and `fl_mef` are the fluorescence values of the beads in RFI units and MEF units, respectively. Note that the standard curve and the fitted beads model are not necessarily the same. fitting_params : dict, optional Additional keyword parameters to pass to `fitting_fxn`. Notes ----- The steps involved in generating the MEF transformation function are: 1. The individual subpopulations of beads are first identified using a clustering method of choice. Clustering is performed in all specified channels simultaneously. 2. The fluorescence of each subpopulation is calculated, for each channel in `mef_channels`. 3. Some subpopulations are then discarded if they are close to either the minimum or the maximum channel range limits. In addition, if the MEF value of some subpopulation is unknown (represented as a ``np.nan`` in `mef_values`), the whole subpopulation is also discarded. 4. The measured fluorescence of each subpopulation is compared with the known MEF values in `mef_values`, and a standard curve function is generated using the appropriate MEF model. At the end, a transformation function is generated using the calculated standard curves, `mef_channels`, and ``FlowCal.transform.to_mef()``. Note that applying the resulting transformation function to other flow cytometry samples only yields correct results if they have been taken at the same settings as the calibration beads, for all channels in `mef_channels`. Examples -------- Here is a simple application of this function: >>> transform_fxn = FlowCal.mef.get_transform_fxn( ... beads_data, ... mef_channels=['FL1', 'FL3'], ... mef_values=[np.array([ 0, 646, 1704, 4827, ... 15991, 47609, 135896, 273006], ... np.array([ 0, 1614, 4035, 12025, ... 31896, 95682, 353225, 1077421]], ... ) >>> sample_mef = transform_fxn(data=sample_rfi, ... channels=['FL1', 'FL3']) Here, we first generate ``transform_fxn`` from flow cytometry data contained in ``FCSData`` object ``beads_data``, for channels FL1 and FL3, using provided MEF values for each one of these channels. In the next line, we use the resulting transformation function to transform cell sample data in RFI to MEF. More data about intermediate steps can be obtained with the option ``full_output=True``: >>> get_transform_output = FlowCal.mef.get_transform_fxn( ... beads_data, ... mef_channels=['FL1', 'FL3'], ... mef_values=[np.array([ 0, 646, 1704, 4827, ... 15991, 47609, 135896, 273006], ... np.array([ 0, 1614, 4035, 12025, ... 31896, 95682, 353225, 1077421]], ... full_output=True) In this case, the output ``get_transform_output`` will be a `MEFOutput` ``namedtuple`` similar to the following:: FlowCal.mef.MEFOutput( transform_fxn=<functools.partial object>, mef_channels=['FL1', 'FL3'], clustering={ 'labels' : [7, 2, 2, ... 4, 3, 5] }, statistic={ 'values' : [np.array([ 101, 150, 231, 433, 1241, 3106, 7774, 9306]), np.array([ 3, 30, 71, 204, 704, 2054, 6732, 9912])] }, selection={ 'rfi' : [np.array([ 101, 150, 231, 433, 1241, 3106, 7774]), np.array([ 30, 71, 204, 704, 2054, 6732])] 'mef' : [np.array([ 0, 646, 1704, 4827, 15991, 47609, 135896]), np.array([ 1614, 4035, 12025, 31896, 95682, 353225])] }, fitting={ 'std_crv' : [<function <lambda>>, <function <lambda>>] 'beads_model' : [<function <lambda>>, <function <lambda>>] 'beads_params' : [np.array([ 1.09e0, 2.02e0, 1.15e3]), np.array([9.66e-1, 4.17e0, 6.63e1])] 'beads_model_str' : ['m*log(fl_rfi) + b =\ log(fl_mef_auto + fl_mef)', 'm*log(fl_rfi) + b =\ log(fl_mef_auto + fl_mef)'] 'beads_params_names' : [['m', 'b', 'fl_mef_auto], ['m', 'b', 'fl_mef_auto]] }, ) """ if verbose: prev_precision = np.get_printoptions()['precision'] np.set_printoptions(precision=2) # Create directory if plot is True if plot and plot_dir is not None: if not os.path.exists(plot_dir): os.makedirs(plot_dir) # Default plot filename if plot_filename is None: plot_filename = str(data_beads) # mef_channels and mef_values should be iterables. if hasattr(mef_channels, '__iter__') \ and not isinstance(mef_channels, six.string_types): mef_channels = list(mef_channels) else: mef_channels = [mef_channels] mef_values = [mef_values] # Ensure matching number of `mef_values` for all channels (this implies # that the calibration beads have the same number of subpopulations for # all channels). if not np.all([len(mef_values_channel)==len(mef_values[0]) for mef_values_channel in mef_values]): msg = "innermost sequences of mef_values must have the same length" msg += " (same number of bead subpopulations must exist for each" msg += " channel)" raise ValueError(msg) ### # 1. Clustering ### # If clustering channels not specified, use channels in mef_channels if clustering_channels is None: clustering_channels = mef_channels # Get number of clusters from number of specified MEF values n_clusters = len(mef_values[0]) # Run clustering function labels = clustering_fxn(data_beads[:, clustering_channels], n_clusters, **clustering_params) # Separate events corresponding to each cluster unique_labels = np.array(list(set(labels))) populations = [data_beads[labels == i] for i in unique_labels] # Sort populations based on distance to the origin population_dist = [np.sum((np.mean(population[:,clustering_channels], axis=0))**2) for population in populations] population_sorted_idx = np.argsort(population_dist) populations = [populations[i] for i in population_sorted_idx] # Print information if verbose: # Calculate and display percentage of events on each population population_count = np.array([population.shape[0] for population in populations]) population_perc = population_count * 100.0 / population_count.sum() # Print information print("Step 1: Clustering") print(" Number of populations to find: {}".format(n_clusters)) print(" Percentage of events in each population:") print(" " + str(population_perc)) # Plot if plot: if plot_dir is not None: savefig = '{}/clustering_{}.png'.format(plot_dir, plot_filename) else: savefig = None # If used one channel for clustering, make histogram if len(clustering_channels) == 1: plt.figure(figsize=(8,4)) FlowCal.plot.hist1d( populations, channel=clustering_channels[0], xscale='logicle', bins=256, alpha=0.75, savefig=savefig) # If used two channels for clustering, make 2D scatter plot elif len(clustering_channels) == 2: plt.figure(figsize=(6,4)) FlowCal.plot.scatter2d( populations, channels=clustering_channels, xscale='logicle', yscale='logicle', savefig=savefig) # If used three channels or more for clustering, make 3D scatter plot # with the first three. elif len(clustering_channels) >= 3: plt.figure(figsize=(8,6)) FlowCal.plot.scatter3d_and_projections( populations, channels=clustering_channels[:3], xscale='logicle', yscale='logicle', zscale='logicle', savefig=savefig) if plot_dir is not None: plt.close() # Initialize lists to acumulate results std_crv_res = [] if full_output: stats_values_res = [] selected_rfi_res = [] selected_mef_res = [] beads_model_res = [] beads_params_res =[] beads_model_str_res =[] beads_params_names_res =[] # Iterate through each mef channel for mef_channel, mef_values_channel in zip(mef_channels, mef_values): populations_channel = [population[:, mef_channel] for population in populations] ### # 2. Calculate statistics in each subpopulation. ### # Calculate statistics stats_values = [statistic_fxn(population, **statistic_params) for population in populations_channel] stats_values = np.array(stats_values) # Accumulate results if full_output: stats_values_res.append(stats_values) # Print information if verbose: print("({}) Step 2: Population Statistic".format(mef_channel)) print(" Fluorescence of each population (RFI):") print(" " + str(stats_values)) ### # 3. Select populations to be used for fitting ### # Select populations based on selection_fxn if selection_fxn is not None: selected_mask = selection_fxn( [population for population in populations_channel], **selection_params) else: selected_mask = np.ones(n_clusters, dtype=bool) # Discard values specified as nan in mef_values_channel selected_mask = np.logical_and(selected_mask, ~np.isnan(mef_values_channel)) # Get selected rfi and mef values selected_rfi = stats_values[selected_mask] selected_mef = mef_values_channel[selected_mask] # Accumulate results if full_output: selected_rfi_res.append(selected_rfi) selected_mef_res.append(selected_mef) # Print information if verbose: print("({}) Step 3: Population Selection".format(mef_channel)) print(" {} populations selected.".format(len(selected_rfi))) print(" Fluorescence of selected populations (RFI):") print(" " + str(selected_rfi)) print(" Fluorescence of selected populations (MEF):") print(" " + str(selected_mef)) # Plot if plot: # Get colors for each population. Colors are taken from the default # colormap in FlowCal.plot, if the population has been selected. # Otherwise, the population is displayed in gray. color_levels = np.linspace(0, 1, n_clusters) colors = [FlowCal.plot.cmap_default(level) if selected else (0.6, 0.6, 0.6) for selected, level in zip(selected_mask, color_levels)] # Plot histograms plt.figure(figsize=(8,4)) FlowCal.plot.hist1d(populations, channel=mef_channel, xscale='logicle', bins=256, alpha=0.75, facecolor=colors) # Plot a vertical line for each population, with an x coordinate # corresponding to their statistic value. ylim = plt.ylim() for val, color in zip(stats_values, colors): plt.plot([val, val], [ylim[0], ylim[1]], color=color) plt.ylim(ylim) # Save and close if plot_dir is not None: plt.tight_layout() plt.savefig('{}/populations_{}_{}.png'.format(plot_dir, mef_channel, plot_filename), dpi=FlowCal.plot.savefig_dpi) plt.close() ### # 4. Get standard curve ### # Fit fitting_output = fitting_fxn(selected_rfi, selected_mef, **fitting_params) std_crv = fitting_output[0] beads_model = fitting_output[1] beads_params = fitting_output[2] beads_model_str = fitting_output[3] beads_params_names = fitting_output[4] # Accumulate results std_crv_res.append(std_crv) if full_output: beads_model_res.append(beads_model) beads_params_res.append(beads_params) beads_model_str_res.append(beads_model_str) beads_params_names_res.append(beads_params_names) # Print information if verbose: print("({}) Step 4: Standard Curve Fitting".format(mef_channel)) print(" Parameters of bead fluorescence model:") print(" " + str(beads_params)) # Plot if plot: # Get channel range xlim = populations[0].range(channels=mef_channel) # The plot will be made in log scale. If the lower limit of the # range is zero or less, replace by one or some lower value, such # that the range covers at least five decades. if xlim[0] <= 0: xlim[0] = min(1., xlim[1]/1e5) # Plot standard curve plt.figure(figsize=(6,4)) plot_standard_curve(selected_rfi, selected_mef, beads_model, std_crv, xscale='log', yscale='log', xlim=xlim) plt.xlabel('{} (a.u.)'.format(mef_channel)) plt.ylabel('{} (MEF)'.format(mef_channel)) # Save if required if plot_dir is not None: plt.tight_layout() plt.savefig('{}/std_crv_{}_{}.png'.format(plot_dir, mef_channel, plot_filename), dpi=FlowCal.plot.savefig_dpi) plt.close() # Make output transformation function transform_fxn = functools.partial(FlowCal.transform.to_mef, sc_list=std_crv_res, sc_channels=mef_channels) if verbose: np.set_printoptions(precision=prev_precision) if full_output: # Clustering results clustering_res = {} clustering_res['labels'] = labels # Population stats results statistic_res = {} statistic_res['values'] = stats_values_res # Population selection results selection_res = {} selection_res['rfi'] = selected_rfi_res selection_res['mef'] = selected_mef_res # Fitting results fitting_res = {} fitting_res['std_crv'] = std_crv_res fitting_res['beads_model'] = beads_model_res fitting_res['beads_params'] = beads_params_res fitting_res['beads_model_str'] = beads_model_str_res fitting_res['beads_params_names'] = beads_params_names_res # Make namedtuple fields = ['mef_channels', 'transform_fxn', 'clustering', 'statistic', 'selection', 'fitting'] MEFOutput = collections.namedtuple('MEFOutput', fields) out = MEFOutput(mef_channels=mef_channels, transform_fxn=transform_fxn, clustering=clustering_res, statistic=statistic_res, selection=selection_res, fitting=fitting_res) return out else: return transform_fxn
Get a transformation function to convert flow cytometry data to MEF. Parameters ---------- data_beads : FCSData object Flow cytometry data describing calibration beads. mef_values : sequence of sequences Known MEF values for the calibration bead subpopulations, for each channel specified in `mef_channels`. The innermost sequences must have the same length (the same number of bead subpopulations must exist for each channel). mef_channels : int, or str, or list of int, or list of str Channels for which to generate transformation functions. verbose : bool, optional Flag specifying whether to print information about step completion and warnings. plot : bool, optional Flag specifying whether to produce diagnostic plots. plot_dir : str, optional Directory where to save diagnostics plots. Ignored if `plot` is False. If ``plot==True`` and ``plot_dir is None``, plot without saving. plot_filename : str, optional Name to use for plot files. If None, use ``str(data_beads)``. full_output : bool, optional Flag specifying whether to include intermediate results in the output. If `full_output` is True, the function returns a `MEFOutput` ``namedtuple`` with fields as described below. If `full_output` is False, the function only returns the calculated transformation function. Returns ------- transform_fxn : function Transformation function to convert flow cytometry data from RFI units to MEF. This function has the following signature:: data_mef = transform_fxn(data_rfi, channels) mef_channels : int, or str, or list, only if ``full_output==True`` Channels on which the transformation function has been generated. Directly copied from the `mef_channels` argument. clustering : dict, only if ``full_output==True`` Results of the clustering step. The structure of this dictionary is:: clustering = {"labels": np.array} A description of each ``"key": value`` is given below. "labels" : array Array of length ``N``, where ``N`` is the number of events in `data_beads`. This array contains labels indicating which subpopulation each event has been assigned to by the clustering algorithm. Labels range from ``0`` to ``M - 1``, where ``M`` is the number of MEF values specified, and therefore the number of subpopulations identified by the clustering algorithm. statistic : dict, only if ``full_output==True`` Results of the calculation of bead subpopulations' fluorescence. The structure of this dictionary is:: statistic = {"values": [np.array, ...]} A description of each ``"key": value`` is given below. "values" : list of arrays Each array contains the representative fluorescence values of all subpopulations, for a specific fluorescence channel from `mef_channels`. Therefore, each array has a length equal to the number of subpopulations, and the outer list has as many arrays as the number of channels in `mef_channels`. selection : dict, only if ``full_output==True`` Results of the subpopulation selection step. The structure of this dictionary is:: selection = {"rfi": [np.array, ...], "mef": [np.array, ...]} A description of each ``"key": value`` is given below. "rfi" : list of arrays Each array contains the fluorescence values of each selected subpopulation in RFI units, for a specific fluorescence channel from `mef_channels`. The outer list has as many arrays as the number of channels in `mef_channels`. Because the selection step may discard subpopulations, each array has a length less than or equal to the total number of subpopulations. Furthermore, different arrays in this list may not have the same length. However, the length of each array is consistent with the corresponding array in ``selection["mef"]`` (see below). "mef" : list of arrays Each array contains the fluorescence values of each selected subpopulation in MEF units, for a specific fluorescence channel from `mef_channels`. The outer list has as many arrays as the number of channels in `mef_channels`. Because the selection step may discard subpopulations, each array has a length less than or equal to the total number of subpopulations. Furthermore, different arrays in this list may not have the same length. However, the length of each array is consistent with the corresponding array in ``selection["rfi"]`` (see above). fitting : dict, only if ``full_output==True`` Results of the model fitting step. The structure of this dictionary is:: selection = {"std_crv": [func, ...], "beads_model": [func, ...], "beads_params": [np.array, ...], "beads_model_str": [str, ...], "beads_params_names": [[], ...]} A description of each ``"key": value`` is given below. "std_crv" : list of functions Functions encoding the fitted standard curves, for each channel in `mef_channels`. Each element of this list is the ``std_crv`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_model" : list of functions Functions encoding the fluorescence model of the calibration beads, for each channel in `mef_channels`. Each element of this list is the ``beads_model`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_params" : list of arrays Fitted parameter values of the bead fluorescence model, for each channel in `mef_chanels`. Each element of this list is the ``beads_params`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels`. "beads_model_str" : list of str String representation of the bead models used, for each channel in `mef_channels`. Each element of this list is the ``beads_model_str`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . "beads_params_names" : list of list Names of the parameters given in `beads_params`, for each channel in `mef_channels`. Each element of this list is the ``beads_params_names`` output of the fitting function (see required signature of the ``fitting_fxn`` optional parameter), after applying it to the MEF and RFI fluorescence values of a specific channel from `mef_channels` . Other parameters ---------------- clustering_fxn : function, optional Function used for clustering, or identification of subpopulations. Must have the following signature:: labels = clustering_fxn(data, n_clusters, **clustering_params) where `data` is a NxD FCSData object or numpy array, `n_clusters` is the expected number of bead subpopulations, and `labels` is a 1D numpy array of length N, assigning each event in `data` to one subpopulation. clustering_params : dict, optional Additional keyword parameters to pass to `clustering_fxn`. clustering_channels : list, optional Channels used for clustering. If not specified, use `mef_channels`. If more than three channels are specified and `plot` is True, only a 3D scatter plot will be produced using the first three channels. statistic_fxn : function, optional Function used to calculate the representative fluorescence of each subpopulation. Must have the following signature:: s = statistic_fxn(data, **statistic_params) where `data` is a 1D FCSData object or numpy array, and `s` is a float. Statistical functions from numpy, scipy, or FlowCal.stats are valid options. statistic_params : dict, optional Additional keyword parameters to pass to `statistic_fxn`. selection_fxn : function, optional Function to use for bead population selection. Must have the following signature:: selected_mask = selection_fxn(data_list, **selection_params) where `data_list` is a list of FCSData objects, each one containing the events of one population, and `selected_mask` is a boolean array indicating whether the population has been selected (True) or discarded (False). If None, don't use a population selection procedure. selection_params : dict, optional Additional keyword parameters to pass to `selection_fxn`. fitting_fxn : function, optional Function used to fit the beads fluorescence model and obtain a standard curve. Must have the following signature:: std_crv, beads_model, beads_params, \\ beads_model_str, beads_params_names = fitting_fxn( fl_rfi, fl_mef, **fitting_params) where `std_crv` is a function implementing the standard curve, `beads_model` is a function implementing the beads fluorescence model, `beads_params` is an array containing the fitted parameters of the beads model, `beads_model_str` is a string representation of the beads model used, `beads_params_names` is a list with the parameter names in the same order as they are given in `beads_params`, and `fl_rfi` and `fl_mef` are the fluorescence values of the beads in RFI units and MEF units, respectively. Note that the standard curve and the fitted beads model are not necessarily the same. fitting_params : dict, optional Additional keyword parameters to pass to `fitting_fxn`. Notes ----- The steps involved in generating the MEF transformation function are: 1. The individual subpopulations of beads are first identified using a clustering method of choice. Clustering is performed in all specified channels simultaneously. 2. The fluorescence of each subpopulation is calculated, for each channel in `mef_channels`. 3. Some subpopulations are then discarded if they are close to either the minimum or the maximum channel range limits. In addition, if the MEF value of some subpopulation is unknown (represented as a ``np.nan`` in `mef_values`), the whole subpopulation is also discarded. 4. The measured fluorescence of each subpopulation is compared with the known MEF values in `mef_values`, and a standard curve function is generated using the appropriate MEF model. At the end, a transformation function is generated using the calculated standard curves, `mef_channels`, and ``FlowCal.transform.to_mef()``. Note that applying the resulting transformation function to other flow cytometry samples only yields correct results if they have been taken at the same settings as the calibration beads, for all channels in `mef_channels`. Examples -------- Here is a simple application of this function: >>> transform_fxn = FlowCal.mef.get_transform_fxn( ... beads_data, ... mef_channels=['FL1', 'FL3'], ... mef_values=[np.array([ 0, 646, 1704, 4827, ... 15991, 47609, 135896, 273006], ... np.array([ 0, 1614, 4035, 12025, ... 31896, 95682, 353225, 1077421]], ... ) >>> sample_mef = transform_fxn(data=sample_rfi, ... channels=['FL1', 'FL3']) Here, we first generate ``transform_fxn`` from flow cytometry data contained in ``FCSData`` object ``beads_data``, for channels FL1 and FL3, using provided MEF values for each one of these channels. In the next line, we use the resulting transformation function to transform cell sample data in RFI to MEF. More data about intermediate steps can be obtained with the option ``full_output=True``: >>> get_transform_output = FlowCal.mef.get_transform_fxn( ... beads_data, ... mef_channels=['FL1', 'FL3'], ... mef_values=[np.array([ 0, 646, 1704, 4827, ... 15991, 47609, 135896, 273006], ... np.array([ 0, 1614, 4035, 12025, ... 31896, 95682, 353225, 1077421]], ... full_output=True) In this case, the output ``get_transform_output`` will be a `MEFOutput` ``namedtuple`` similar to the following:: FlowCal.mef.MEFOutput( transform_fxn=<functools.partial object>, mef_channels=['FL1', 'FL3'], clustering={ 'labels' : [7, 2, 2, ... 4, 3, 5] }, statistic={ 'values' : [np.array([ 101, 150, 231, 433, 1241, 3106, 7774, 9306]), np.array([ 3, 30, 71, 204, 704, 2054, 6732, 9912])] }, selection={ 'rfi' : [np.array([ 101, 150, 231, 433, 1241, 3106, 7774]), np.array([ 30, 71, 204, 704, 2054, 6732])] 'mef' : [np.array([ 0, 646, 1704, 4827, 15991, 47609, 135896]), np.array([ 1614, 4035, 12025, 31896, 95682, 353225])] }, fitting={ 'std_crv' : [<function <lambda>>, <function <lambda>>] 'beads_model' : [<function <lambda>>, <function <lambda>>] 'beads_params' : [np.array([ 1.09e0, 2.02e0, 1.15e3]), np.array([9.66e-1, 4.17e0, 6.63e1])] 'beads_model_str' : ['m*log(fl_rfi) + b =\ log(fl_mef_auto + fl_mef)', 'm*log(fl_rfi) + b =\ log(fl_mef_auto + fl_mef)'] 'beads_params_names' : [['m', 'b', 'fl_mef_auto], ['m', 'b', 'fl_mef_auto]] }, )
entailment
def read_table(filename, sheetname, index_col=None): """ Return the contents of an Excel table as a pandas DataFrame. Parameters ---------- filename : str Name of the Excel file to read. sheetname : str or int Name or index of the sheet inside the Excel file to read. index_col : str, optional Column name or index to be used as row labels of the DataFrame. If None, default index will be used. Returns ------- table : DataFrame A DataFrame containing the data in the specified Excel table. If `index_col` is not None, rows in which their `index_col` field is empty will not be present in `table`. Raises ------ ValueError If `index_col` is specified and two rows contain the same `index_col` field. """ # Catch sheetname as list or None if sheetname is None or \ (hasattr(sheetname, '__iter__') \ and not isinstance(sheetname, six.string_types)): raise TypeError("sheetname should specify a single sheet") # Load excel table using pandas # Parameter specifying sheet name is slightly different depending on pandas' # version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): table = pd.read_excel(filename, sheetname=sheetname, index_col=index_col) else: table = pd.read_excel(filename, sheet_name=sheetname, index_col=index_col) # Eliminate rows whose index are null if index_col is not None: table = table[pd.notnull(table.index)] # Check for duplicated rows if table.index.has_duplicates: raise ValueError("sheet {} on file {} contains duplicated values " "for column {}".format(sheetname, filename, index_col)) return table
Return the contents of an Excel table as a pandas DataFrame. Parameters ---------- filename : str Name of the Excel file to read. sheetname : str or int Name or index of the sheet inside the Excel file to read. index_col : str, optional Column name or index to be used as row labels of the DataFrame. If None, default index will be used. Returns ------- table : DataFrame A DataFrame containing the data in the specified Excel table. If `index_col` is not None, rows in which their `index_col` field is empty will not be present in `table`. Raises ------ ValueError If `index_col` is specified and two rows contain the same `index_col` field.
entailment
def write_workbook(filename, table_list, column_width=None): """ Write an Excel workbook from a list of tables. Parameters ---------- filename : str Name of the Excel file to write. table_list : list of ``(str, DataFrame)`` tuples Tables to be saved as individual sheets in the Excel table. Each tuple contains two values: the name of the sheet to be saved as a string, and the contents of the table as a DataFrame. column_width: int, optional The column width to use when saving the spreadsheet. If None, calculate width automatically from the maximum number of characters in each column. """ # Modify default header format # Pandas' default header format is bold text with thin borders. Here we # use bold text only, without borders. # The header style structure is in pd.core.format in pandas<=0.18.0, # pd.formats.format in 0.18.1<=pandas<0.20, and pd.io.formats.excel in # pandas>=0.20. # Also, wrap in a try-except block in case style structure is not found. format_module_found = False try: # Get format module if packaging.version.parse(pd.__version__) \ <= packaging.version.parse('0.18'): format_module = pd.core.format elif packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.20'): format_module = pd.formats.format else: import pandas.io.formats.excel as format_module # Save previous style, replace, and indicate that previous style should # be restored at the end old_header_style = format_module.header_style format_module.header_style = {"font": {"bold": True}} format_module_found = True except AttributeError as e: pass # Generate output writer object writer = pd.ExcelWriter(filename, engine='xlsxwriter') # Write tables for sheet_name, df in table_list: # Convert index names to regular columns df = df.reset_index() # Write to an Excel sheet df.to_excel(writer, sheet_name=sheet_name, index=False) # Set column width if column_width is None: for i, (col_name, column) in enumerate(six.iteritems(df)): # Get the maximum number of characters in a column max_chars_col = column.astype(str).str.len().max() max_chars_col = max(len(col_name), max_chars_col) # Write width writer.sheets[sheet_name].set_column( i, i, width=1.*max_chars_col) else: writer.sheets[sheet_name].set_column( 0, len(df.columns) - 1, width=column_width) # Write excel file writer.save() # Restore previous header format if format_module_found: format_module.header_style = old_header_style
Write an Excel workbook from a list of tables. Parameters ---------- filename : str Name of the Excel file to write. table_list : list of ``(str, DataFrame)`` tuples Tables to be saved as individual sheets in the Excel table. Each tuple contains two values: the name of the sheet to be saved as a string, and the contents of the table as a DataFrame. column_width: int, optional The column width to use when saving the spreadsheet. If None, calculate width automatically from the maximum number of characters in each column.
entailment
def process_beads_table(beads_table, instruments_table, base_dir=".", verbose=False, plot=False, plot_dir=None, full_output=False, get_transform_fxn_kwargs={}): """ Process calibration bead samples, as specified by an input table. This function processes the entries in `beads_table`. For each row, the function does the following: - Load the FCS file specified in the field "File Path". - Transform the forward scatter/side scatter and fluorescence channels to RFI - Remove the 250 first and 100 last events. - Remove saturated events in the forward scatter and side scatter channels. - Apply density gating on the forward scatter/side scatter channels. - Generate a standard curve transformation function, for each fluorescence channel in which the associated MEF values are specified. - Generate forward/side scatter density plots and fluorescence histograms, and plots of the clustering and fitting steps of standard curve generation, if `plot` = True. Names of forward/side scatter and fluorescence channels are taken from `instruments_table`. Parameters ---------- beads_table : DataFrame Table specifying beads samples to be processed. For more information about the fields required in this table, please consult the module's documentation. instruments_table : DataFrame Table specifying instruments. For more information about the fields required in this table, please consult the module's documentation. base_dir : str, optional Directory from where all the other paths are specified. verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. plot_dir : str, optional Directory relative to `base_dir` into which plots are saved. If `plot` is False, this parameter is ignored. If ``plot==True`` and ``plot_dir is None``, plot without saving. full_output : bool, optional Flag indicating whether to include an additional output, containing intermediate results from the generation of the MEF transformation functions. get_transform_fxn_kwargs : dict, optional Additional parameters passed directly to internal ``mef.get_transform_fxn()`` function call. Returns ------- beads_samples : list of FCSData objects A list of processed, gated, and transformed samples, as specified in `beads_table`, in the order of ``beads_table.index``. mef_transform_fxns : OrderedDict A dictionary of MEF transformation functions, indexed by ``beads_table.index``. mef_outputs : list, only if ``full_output==True`` A list with intermediate results of the generation of the MEF transformation functions. For every entry in `beads_table`, :func:`FlowCal.mef.get_transform_fxn()` is called on the corresponding processed and gated beads sample with ``full_output=True``, and the full output (a `MEFOutput` ``namedtuple``) is appended to `mef_outputs`. Please refer to the output section of :func:`FlowCal.mef.get_transform_fxn()`'s documentation for more information. """ # Initialize output variables beads_samples = [] mef_transform_fxns = collections.OrderedDict() mef_outputs = [] # Return empty structures if beads table is empty if beads_table.empty: if full_output: return beads_samples, mef_transform_fxns, mef_outputs else: return beads_samples, mef_transform_fxns if verbose: msg = "Processing Beads table ({} entries)".format(len(beads_table)) print("") print(msg) print("="*len(msg)) # Check that plotting directory exist, create otherwise if plot and plot_dir is not None \ and not os.path.exists(os.path.join(base_dir, plot_dir)): os.makedirs(os.path.join(base_dir, plot_dir)) # Extract header and channel names for which MEF values are specified. headers = list(beads_table.columns) mef_headers_all = [h for h in headers if re_mef_values.match(h)] mef_channels_all = [re_mef_values.match(h).group(1) for h in mef_headers_all] # Iterate through table # We will look for a ExcelUIException on each iteration. If an exception # is caught, it will be stored in beads_samples. for beads_id, beads_row in beads_table.iterrows(): try: ### # Instrument Data ### # Get the appropriate row in the instrument table instruments_row = instruments_table.loc[beads_row['Instrument ID']] # Scatter channels: Foward Scatter, Side Scatter sc_channels = [instruments_row['Forward Scatter Channel'], instruments_row['Side Scatter Channel'], ] # Fluorescence channels is a comma-separated list fl_channels = instruments_row['Fluorescence Channels'].split(',') fl_channels = [s.strip() for s in fl_channels] ### # Beads Data ### if verbose: print("\nBeads ID {}...".format(beads_id)) print("Loading file \"{}\"...".format(beads_row['File Path'])) # Attempt to open file filename = os.path.join(base_dir, beads_row['File Path']) try: beads_sample = FlowCal.io.FCSData(filename) except IOError: raise ExcelUIException("file \"{}\" not found".format( beads_row['File Path'])) # Check that the number of events is greater than 400 if beads_sample.shape[0] < 400: raise ExcelUIException("number of events is lower than 400") ### # Transform ### if verbose: print("Performing data transformation...") # Transform FSC/SSC and fluorescence channels to linear scale beads_sample = FlowCal.transform.to_rfi(beads_sample, sc_channels + fl_channels) # Parse clustering channels data cluster_channels = beads_row['Clustering Channels'].split(',') cluster_channels = [cc.strip() for cc in cluster_channels] ### # Gate ### if verbose: print("Performing gating...") # Remove first and last events. Transients in fluidics can make the # first few and last events slightly different from the rest. beads_sample_gated = FlowCal.gate.start_end(beads_sample, num_start=250, num_end=100) # Remove saturating events in forward/side scatter, if the FCS data # type is integer. The value of a saturating event is taken # automatically from `beads_sample_gated.range`. if beads_sample_gated.data_type == 'I': beads_sample_gated = FlowCal.gate.high_low( beads_sample_gated, channels=sc_channels) # Density gating try: beads_sample_gated, __, gate_contour = FlowCal.gate.density2d( data=beads_sample_gated, channels=sc_channels, gate_fraction=beads_row['Gate Fraction'], xscale='logicle', yscale='logicle', sigma=5., full_output=True) except ValueError as ve: raise ExcelUIException(ve.message) # Plot forward/side scatter density plot and fluorescence histograms if plot: if verbose: print("Plotting density plot and histogram...") # Density plot parameters density_params = {} density_params['mode'] = 'scatter' density_params["title"] = "{} ({:.1f}% retained)".format( beads_id, beads_sample_gated.shape[0] * 100. / beads_sample.shape[0]) density_params['xscale'] = 'logicle' density_params['yscale'] = 'logicle' # Beads have a tight distribution, so axis limits will be set # from 0.75 decades below the 5th percentile to 0.75 decades # above the 95th percentile. density_params['xlim'] = \ (np.percentile(beads_sample_gated[:, sc_channels[0]], 5) / (10**0.75), np.percentile(beads_sample_gated[:, sc_channels[0]], 95) * (10**0.75), ) density_params['ylim'] = \ (np.percentile(beads_sample_gated[:, sc_channels[1]], 5) / (10**0.75), np.percentile(beads_sample_gated[:, sc_channels[1]], 95) * (10**0.75), ) # Beads have a tight distribution, so less smoothing should be # applied for visualization density_params['sigma'] = 5. # Histogram plot parameters hist_params = {'xscale': 'logicle'} # Plot if plot_dir is not None: figname = os.path.join( base_dir, plot_dir, "density_hist_{}.png".format(beads_id)) else: figname = None plt.figure(figsize=(6,4)) FlowCal.plot.density_and_hist( beads_sample, beads_sample_gated, density_channels=sc_channels, hist_channels=cluster_channels, gate_contour=gate_contour, density_params=density_params, hist_params=hist_params, savefig=figname) ### # Process MEF values ### # For each fluorescence channel, check whether a list of known MEF # values of the bead subpopulations is provided in `beads_row`. This # involves checking that a column named "[channel] MEF Values" # exists and is not empty. If so, store the name of the channel in # `mef_channels`, and the specified MEF values in `mef_values`. ### mef_values = [] mef_channels = [] for fl_channel in fl_channels: if fl_channel in mef_channels_all: # Get header from channel name mef_header = \ mef_headers_all[mef_channels_all.index(fl_channel)] # Extract text. If empty, ignore. mef_str = beads_row[mef_header] if pd.isnull(mef_str): continue # Save channel name mef_channels.append(fl_channel) # Parse list of values mef = mef_str.split(',') mef = [int(e) if e.strip().isdigit() else np.nan for e in mef] mef_values.append(mef) # Ensure matching number of `mef_values` for all channels (this # implies that the calibration beads have the same number of # subpopulations for all channels). if mef_values: if not np.all([len(mef_values_channel)==len(mef_values[0]) for mef_values_channel in mef_values]): raise ExcelUIException("Must specify the same number of" + " MEF Values for each channel." + " Use 'None' to instruct FlowCal" + " to ignore a detected" + " subpopulation.") mef_values = np.array(mef_values) # Obtain standard curve transformation if mef_channels: if verbose: if len(mef_channels) == 1: print("Calculating standard curve for channel {}..." \ .format(mef_channels[0])) else: print("Calculating standard curve for channels {}..." \ .format(", ".join(mef_channels))) mef_output = FlowCal.mef.get_transform_fxn( beads_sample_gated, mef_values, mef_channels=mef_channels, clustering_channels=cluster_channels, verbose=False, plot=plot, plot_filename=beads_id, plot_dir=os.path.join(base_dir, plot_dir) \ if plot_dir is not None else None, full_output=full_output, **get_transform_fxn_kwargs) if full_output: mef_transform_fxn = mef_output.transform_fxn else: mef_transform_fxn = mef_output else: mef_transform_fxn = None mef_output = None except ExcelUIException as e: # Print Exception message if verbose: print("ERROR: {}".format(str(e))) # Append exception to beads_samples array, and None to everything # else beads_samples.append(e) mef_transform_fxns[beads_id] = None if full_output: mef_outputs.append(None) else: # If no errors were found, store results beads_samples.append(beads_sample_gated) mef_transform_fxns[beads_id] = mef_transform_fxn if full_output: mef_outputs.append(mef_output) if full_output: return beads_samples, mef_transform_fxns, mef_outputs else: return beads_samples, mef_transform_fxns
Process calibration bead samples, as specified by an input table. This function processes the entries in `beads_table`. For each row, the function does the following: - Load the FCS file specified in the field "File Path". - Transform the forward scatter/side scatter and fluorescence channels to RFI - Remove the 250 first and 100 last events. - Remove saturated events in the forward scatter and side scatter channels. - Apply density gating on the forward scatter/side scatter channels. - Generate a standard curve transformation function, for each fluorescence channel in which the associated MEF values are specified. - Generate forward/side scatter density plots and fluorescence histograms, and plots of the clustering and fitting steps of standard curve generation, if `plot` = True. Names of forward/side scatter and fluorescence channels are taken from `instruments_table`. Parameters ---------- beads_table : DataFrame Table specifying beads samples to be processed. For more information about the fields required in this table, please consult the module's documentation. instruments_table : DataFrame Table specifying instruments. For more information about the fields required in this table, please consult the module's documentation. base_dir : str, optional Directory from where all the other paths are specified. verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. plot_dir : str, optional Directory relative to `base_dir` into which plots are saved. If `plot` is False, this parameter is ignored. If ``plot==True`` and ``plot_dir is None``, plot without saving. full_output : bool, optional Flag indicating whether to include an additional output, containing intermediate results from the generation of the MEF transformation functions. get_transform_fxn_kwargs : dict, optional Additional parameters passed directly to internal ``mef.get_transform_fxn()`` function call. Returns ------- beads_samples : list of FCSData objects A list of processed, gated, and transformed samples, as specified in `beads_table`, in the order of ``beads_table.index``. mef_transform_fxns : OrderedDict A dictionary of MEF transformation functions, indexed by ``beads_table.index``. mef_outputs : list, only if ``full_output==True`` A list with intermediate results of the generation of the MEF transformation functions. For every entry in `beads_table`, :func:`FlowCal.mef.get_transform_fxn()` is called on the corresponding processed and gated beads sample with ``full_output=True``, and the full output (a `MEFOutput` ``namedtuple``) is appended to `mef_outputs`. Please refer to the output section of :func:`FlowCal.mef.get_transform_fxn()`'s documentation for more information.
entailment
def process_samples_table(samples_table, instruments_table, mef_transform_fxns=None, beads_table=None, base_dir=".", verbose=False, plot=False, plot_dir=None): """ Process flow cytometry samples, as specified by an input table. The function processes each entry in `samples_table`, and does the following: - Load the FCS file specified in the field "File Path". - Transform the forward scatter/side scatter to RFI. - Transform the fluorescence channels to the units specified in the column "<Channel name> Units". - Remove the 250 first and 100 last events. - Remove saturated events in the forward scatter and side scatter channels. - Apply density gating on the forward scatter/side scatter channels. - Plot combined forward/side scatter density plots and fluorescence historgrams, if `plot` = True. Names of forward/side scatter and fluorescence channels are taken from `instruments_table`. Parameters ---------- samples_table : DataFrame Table specifying samples to be processed. For more information about the fields required in this table, please consult the module's documentation. instruments_table : DataFrame Table specifying instruments. For more information about the fields required in this table, please consult the module's documentation. mef_transform_fxns : dict or OrderedDict, optional Dictionary containing MEF transformation functions. If any entry in `samples_table` requires transformation to MEF, a key: value pair must exist in mef_transform_fxns, with the key being equal to the contents of field "Beads ID". beads_table : DataFrame, optional Table specifying beads samples used to generate `mef_transform_fxns`. This is used to check if a beads sample was taken at the same acquisition settings as a sample to be transformed to MEF. For any beads sample and channel for which a MEF transformation function has been generated, the following fields should be populated: ``<channel> Amp. Type`` and ``<channel> Detector Volt``. If `beads_table` is not specified, no checking will be performed. base_dir : str, optional Directory from where all the other paths are specified. verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. plot_dir : str, optional Directory relative to `base_dir` into which plots are saved. If `plot` is False, this parameter is ignored. If ``plot==True`` and ``plot_dir is None``, plot without saving. Returns ------- samples : list of FCSData objects A list of processed, gated, and transformed samples, as specified in `samples_table`, in the order of ``samples_table.index``. """ # Initialize output variable samples = [] # Return empty list if samples table is empty if samples_table.empty: return samples if verbose: msg = "Processing Samples table ({} entries)".format(len(samples_table)) print("") print(msg) print("="*len(msg)) # Check that plotting directory exist, create otherwise if plot and plot_dir is not None \ and not os.path.exists(os.path.join(base_dir, plot_dir)): os.makedirs(os.path.join(base_dir, plot_dir)) # Extract header and channel names for which units are specified. headers = list(samples_table.columns) report_headers_all = [h for h in headers if re_units.match(h)] report_channels_all = [re_units.match(h).group(1) for h in report_headers_all] # Iterate through table # We will look for a ExcelUIException on each iteration. If an exception # is caught, it will be stored in beads_samples. for sample_id, sample_row in samples_table.iterrows(): try: ### # Instrument Data ### # Get the appropriate row in the instrument table instruments_row = instruments_table.loc[sample_row['Instrument ID']] # Scatter channels: Foward Scatter, Side Scatter sc_channels = [instruments_row['Forward Scatter Channel'], instruments_row['Side Scatter Channel'], ] # Fluorescence channels is a comma-separated list fl_channels = instruments_row['Fluorescence Channels'].split(',') fl_channels = [s.strip() for s in fl_channels] ### # Sample Data ### if verbose: print("\nSample ID {}...".format(sample_id)) print("Loading file \"{}\"...".format(sample_row['File Path'])) # Attempt to open file filename = os.path.join(base_dir, sample_row['File Path']) try: sample = FlowCal.io.FCSData(filename) except IOError: raise ExcelUIException("file \"{}\" not found".format( sample_row['File Path'])) # Check that the number of events is greater than 400 if sample.shape[0] < 400: raise ExcelUIException("number of events is lower than 400") ### # Transform ### if verbose: print("Performing data transformation...") # Transform FSC/SSC to linear scale sample = FlowCal.transform.to_rfi(sample, sc_channels) # Parse fluorescence channels in which to transform report_channels = [] report_units = [] for fl_channel in fl_channels: if fl_channel in report_channels_all: # Get header from channel name report_header = report_headers_all[ report_channels_all.index(fl_channel)] # Extract text. If empty, ignore. units_str = sample_row[report_header] if pd.isnull(units_str): continue # Decide what transformation to perform units = units_str.strip() if units.lower() == 'channel': units_label = "Channel Number" elif units.lower() == 'rfi': units_label = "Relative Fluorescence Intensity, RFI" sample = FlowCal.transform.to_rfi(sample, fl_channel) elif units.lower() == 'a.u.' or units.lower() == 'au': units_label = "Arbitrary Units, a.u." sample = FlowCal.transform.to_rfi(sample, fl_channel) elif units.lower() == 'mef': units_label = "Molecules of Equivalent Fluorophore, MEF" # Check if transformation function is available if mef_transform_fxns[sample_row['Beads ID']] is None: raise ExcelUIException("MEF transformation " "function not available") # If beads_table is available, check if the same # settings have been used to acquire the corresponding # beads sample if beads_table is not None: beads_row = beads_table.loc[sample_row['Beads ID']] # Instrument beads_iid = beads_row['Instrument ID'] if beads_iid != sample_row['Instrument ID']: raise ExcelUIException("Instruments for " "acquisition of beads and samples are not " "the same (beads {}'s instrument: {}, " "sample's instrument: {})".format( sample_row['Beads ID'], beads_iid, sample_row['Instrument ID'])) # Amplification type beads_at = beads_row['{} Amp. Type'. \ format(fl_channel)] if sample.amplification_type(fl_channel)[0]: sample_at = "Log" else: sample_at = "Linear" if beads_at != sample_at: raise ExcelUIException("Amplification type for " "acquisition of beads and samples in " "channel {} are not the same (beads {}'s " "amplification: {}, sample's " "amplification: {})".format( fl_channel, sample_row['Beads ID'], beads_at, sample_at)) # Detector voltage beads_dv = beads_row['{} Detector Volt.'. \ format(fl_channel)] if sample.detector_voltage(fl_channel) is not None \ and beads_dv != sample.detector_voltage( fl_channel): raise ExcelUIException("Detector voltage for " "acquisition of beads and samples in " "channel {} are not the same (beads {}'s " "detector voltage: {}, sample's " "detector voltage: {})".format( fl_channel, sample_row['Beads ID'], beads_dv, sample.detector_voltage(fl_channel))) # First, transform to RFI sample = FlowCal.transform.to_rfi(sample, fl_channel) # Attempt to transform to MEF # Transformation function raises a ValueError if a # standard curve does not exist for a channel try: sample = mef_transform_fxns[sample_row['Beads ID']]( sample, fl_channel) except ValueError: raise ExcelUIException("no standard curve for " "channel {}".format(fl_channel)) else: raise ExcelUIException("units \"{}\" not recognized". \ format(units, sample_id)) # Register that reporting in this channel must be done report_channels.append(fl_channel) report_units.append(units_label) ### # Gate ### if verbose: print("Performing gating...") # Remove first and last events. Transients in fluidics can make the # first few and last events slightly different from the rest. sample_gated = FlowCal.gate.start_end(sample, num_start=250, num_end=100) # Remove saturating events in forward/side scatter, and fluorescent # channels to report, if the FCS data type is integer. The value of # a saturating event is taken automatically from # `sample_gated.range`. if sample_gated.data_type == 'I': sample_gated = FlowCal.gate.high_low( sample_gated, sc_channels + report_channels) # Density gating try: sample_gated, __, gate_contour = FlowCal.gate.density2d( data=sample_gated, channels=sc_channels, gate_fraction=sample_row['Gate Fraction'], xscale='logicle', yscale='logicle', full_output=True) except ValueError as ve: raise ExcelUIException(ve.message) # Plot forward/side scatter density plot and fluorescence histograms if plot: if verbose: print("Plotting density plot and histogram...") # Density plot parameters density_params = {} density_params['mode'] = 'scatter' density_params["title"] = "{} ({:.1f}% retained)".format( sample_id, sample_gated.shape[0] * 100. / sample.shape[0]) density_params['xscale'] = 'logicle' density_params['yscale'] = 'logicle' # Histogram plot parameters hist_params = [] for rc, ru in zip(report_channels, report_units): param = {} param['xlabel'] = '{} ({})'.format(rc, ru) # Only channel numbers are plotted in linear scale if (ru == 'Channel Number'): param['xscale'] = 'linear' else: param['xscale'] = 'logicle' hist_params.append(param) # Plot if plot_dir is not None: figname = os.path.join( base_dir, plot_dir, "{}.png".format(sample_id)) else: figname = None FlowCal.plot.density_and_hist( sample, sample_gated, gate_contour=gate_contour, density_channels=sc_channels, density_params=density_params, hist_channels=report_channels, hist_params=hist_params, savefig=figname) except ExcelUIException as e: # Print Exception message if verbose: print("ERROR: {}".format(str(e))) # Append exception to samples array samples.append(e) else: # If no errors were found, store results samples.append(sample_gated) return samples
Process flow cytometry samples, as specified by an input table. The function processes each entry in `samples_table`, and does the following: - Load the FCS file specified in the field "File Path". - Transform the forward scatter/side scatter to RFI. - Transform the fluorescence channels to the units specified in the column "<Channel name> Units". - Remove the 250 first and 100 last events. - Remove saturated events in the forward scatter and side scatter channels. - Apply density gating on the forward scatter/side scatter channels. - Plot combined forward/side scatter density plots and fluorescence historgrams, if `plot` = True. Names of forward/side scatter and fluorescence channels are taken from `instruments_table`. Parameters ---------- samples_table : DataFrame Table specifying samples to be processed. For more information about the fields required in this table, please consult the module's documentation. instruments_table : DataFrame Table specifying instruments. For more information about the fields required in this table, please consult the module's documentation. mef_transform_fxns : dict or OrderedDict, optional Dictionary containing MEF transformation functions. If any entry in `samples_table` requires transformation to MEF, a key: value pair must exist in mef_transform_fxns, with the key being equal to the contents of field "Beads ID". beads_table : DataFrame, optional Table specifying beads samples used to generate `mef_transform_fxns`. This is used to check if a beads sample was taken at the same acquisition settings as a sample to be transformed to MEF. For any beads sample and channel for which a MEF transformation function has been generated, the following fields should be populated: ``<channel> Amp. Type`` and ``<channel> Detector Volt``. If `beads_table` is not specified, no checking will be performed. base_dir : str, optional Directory from where all the other paths are specified. verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. plot_dir : str, optional Directory relative to `base_dir` into which plots are saved. If `plot` is False, this parameter is ignored. If ``plot==True`` and ``plot_dir is None``, plot without saving. Returns ------- samples : list of FCSData objects A list of processed, gated, and transformed samples, as specified in `samples_table`, in the order of ``samples_table.index``.
entailment
def add_beads_stats(beads_table, beads_samples, mef_outputs=None): """ Add stats fields to beads table. The following information is added to each row: - Notes (warnings, errors) resulting from the analysis - Number of Events - Acquisition Time (s) The following information is added for each row, for each channel in which MEF values have been specified: - Detector voltage (gain) - Amplification type - Bead model fitted parameters Parameters ---------- beads_table : DataFrame Table specifying bead samples to analyze. For more information about the fields required in this table, please consult the module's documentation. beads_samples : list FCSData objects from which to calculate statistics. ``beads_samples[i]`` should correspond to ``beads_table.iloc[i]``. mef_outputs : list, optional A list with the intermediate results of the generation of the MEF transformation functions, as given by ``mef.get_transform_fxn()``. This is used to populate the fields ``<channel> Beads Model``, ``<channel> Beads Params. Names``, and ``<channel> Beads Params. Values``. If specified, ``mef_outputs[i]`` should correspond to ``beads_table.iloc[i]``. """ # The index name is not preserved if beads_table is empty. # Save the index name for later beads_table_index_name = beads_table.index.name # Add per-row info notes = [] n_events = [] acq_time = [] for beads_sample in beads_samples: # Check if sample is an exception, otherwise assume it's an FCSData if isinstance(beads_sample, ExcelUIException): # Print error message notes.append("ERROR: {}".format(str(beads_sample))) n_events.append(np.nan) acq_time.append(np.nan) else: notes.append('') n_events.append(beads_sample.shape[0]) acq_time.append(beads_sample.acquisition_time) beads_table['Analysis Notes'] = notes beads_table['Number of Events'] = n_events beads_table['Acquisition Time (s)'] = acq_time # List of channels that require stats columns headers = list(beads_table.columns) stats_headers = [h for h in headers if re_mef_values.match(h)] stats_channels = [re_mef_values.match(h).group(1) for h in stats_headers] # Iterate through channels for header, channel in zip(stats_headers, stats_channels): # Add empty columns to table beads_table[channel + ' Detector Volt.'] = np.nan beads_table[channel + ' Amp. Type'] = "" if mef_outputs: beads_table[channel + ' Beads Model'] = "" beads_table[channel + ' Beads Params. Names'] = "" beads_table[channel + ' Beads Params. Values'] = "" # Iterate for i, row_id in enumerate(beads_table.index): # If error, skip if isinstance(beads_samples[i], ExcelUIException): continue # If MEF values are specified, calculate stats. If not, leave empty. if pd.notnull(beads_table[header][row_id]): # Detector voltage # Dataframes, such as beads_table, are modified differently # depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): beads_table.set_value( row_id, channel + ' Detector Volt.', beads_samples[i].detector_voltage(channel)) else: beads_table.at[row_id, channel + ' Detector Volt.'] = \ beads_samples[i].detector_voltage(channel) # Amplification type if beads_samples[i].amplification_type(channel)[0]: amplification_type = "Log" else: amplification_type = "Linear" # Dataframes, such as beads_table, are modified differently # depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): beads_table.set_value(row_id, channel + ' Amp. Type', amplification_type) else: beads_table.at[row_id, channel + ' Amp. Type'] = \ amplification_type # Bead model and parameters # Only populate if mef_outputs has been provided if mef_outputs: # Try to find the current channel among the mef'd channels. # If successful, extract bead fitted parameters. try: mef_channel_index = mef_outputs[i]. \ mef_channels.index(channel) except ValueError: pass else: # Bead model beads_model_str = mef_outputs[i]. \ fitting['beads_model_str'][mef_channel_index] # Dataframes, such as beads_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): beads_table.set_value(row_id, channel + ' Beads Model', beads_model_str) else: beads_table.at[row_id, channel + ' Beads Model'] = \ beads_model_str # Bead parameter names params_names = mef_outputs[i]. \ fitting['beads_params_names'][mef_channel_index] params_names_str = ", ".join([str(p) for p in params_names]) # Dataframes, such as beads_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): beads_table.set_value( row_id, channel + ' Beads Params. Names', params_names_str) else: beads_table.at[ row_id, channel + ' Beads Params. Names'] = \ params_names_str # Bead parameter values params = mef_outputs[i]. \ fitting['beads_params'][mef_channel_index] params_str = ", ".join([str(p) for p in params]) # Dataframes, such as beads_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): beads_table.set_value( row_id, channel + ' Beads Params. Values', params_str) else: beads_table.at[ row_id, channel + ' Beads Params. Values'] = \ params_str # Restore index name if table is empty if len(beads_table) == 0: beads_table.index.name = beads_table_index_name
Add stats fields to beads table. The following information is added to each row: - Notes (warnings, errors) resulting from the analysis - Number of Events - Acquisition Time (s) The following information is added for each row, for each channel in which MEF values have been specified: - Detector voltage (gain) - Amplification type - Bead model fitted parameters Parameters ---------- beads_table : DataFrame Table specifying bead samples to analyze. For more information about the fields required in this table, please consult the module's documentation. beads_samples : list FCSData objects from which to calculate statistics. ``beads_samples[i]`` should correspond to ``beads_table.iloc[i]``. mef_outputs : list, optional A list with the intermediate results of the generation of the MEF transformation functions, as given by ``mef.get_transform_fxn()``. This is used to populate the fields ``<channel> Beads Model``, ``<channel> Beads Params. Names``, and ``<channel> Beads Params. Values``. If specified, ``mef_outputs[i]`` should correspond to ``beads_table.iloc[i]``.
entailment
def add_samples_stats(samples_table, samples): """ Add stats fields to samples table. The following information is added to each row: - Notes (warnings, errors) resulting from the analysis - Number of Events - Acquisition Time (s) The following information is added for each row, for each channel in which fluorescence units have been specified: - Detector voltage (gain) - Amplification type - Mean - Geometric Mean - Median - Mode - Standard Deviation - Coefficient of Variation (CV) - Geometric Standard Deviation - Geometric Coefficient of Variation - Inter-Quartile Range - Robust Coefficient of Variation (RCV) Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate statistics. ``samples[i]`` should correspond to ``samples_table.iloc[i]``. Notes ----- Geometric statistics (geometric mean, standard deviation, and geometric coefficient of variation) are defined only for positive data. If there are negative events in any relevant channel of any member of `samples`, geometric statistics will only be calculated on the positive events, and a warning message will be written to the "Analysis Notes" field. """ # The index name is not preserved if samples_table is empty. # Save the index name for later samples_table_index_name = samples_table.index.name # Add per-row info notes = [] n_events = [] acq_time = [] for sample in samples: # Check if sample is an exception, otherwise assume it's an FCSData if isinstance(sample, ExcelUIException): # Print error message notes.append("ERROR: {}".format(str(sample))) n_events.append(np.nan) acq_time.append(np.nan) else: notes.append('') n_events.append(sample.shape[0]) acq_time.append(sample.acquisition_time) samples_table['Analysis Notes'] = notes samples_table['Number of Events'] = n_events samples_table['Acquisition Time (s)'] = acq_time # List of channels that require stats columns headers = list(samples_table.columns) stats_headers = [h for h in headers if re_units.match(h)] stats_channels = [re_units.match(h).group(1) for h in stats_headers] # Iterate through channels for header, channel in zip(stats_headers, stats_channels): # Add empty columns to table samples_table[channel + ' Detector Volt.'] = np.nan samples_table[channel + ' Amp. Type'] = "" samples_table[channel + ' Mean'] = np.nan samples_table[channel + ' Geom. Mean'] = np.nan samples_table[channel + ' Median'] = np.nan samples_table[channel + ' Mode'] = np.nan samples_table[channel + ' Std'] = np.nan samples_table[channel + ' CV'] = np.nan samples_table[channel + ' Geom. Std'] = np.nan samples_table[channel + ' Geom. CV'] = np.nan samples_table[channel + ' IQR'] = np.nan samples_table[channel + ' RCV'] = np.nan for row_id, sample in zip(samples_table.index, samples): # If error, skip if isinstance(sample, ExcelUIException): continue # If units are specified, calculate stats. If not, leave empty. if pd.notnull(samples_table[header][row_id]): # Acquisition settings # Detector voltage # Dataframes, such as samples_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): samples_table.set_value(row_id, channel + ' Detector Volt.', sample.detector_voltage(channel)) else: samples_table.at[row_id, channel + ' Detector Volt.'] = \ sample.detector_voltage(channel) # Amplification type if sample.amplification_type(channel)[0]: amplification_type = "Log" else: amplification_type = "Linear" # Dataframes, such as samples_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): samples_table.set_value(row_id, channel + ' Amp. Type', amplification_type) else: samples_table.at[row_id, channel + ' Amp. Type'] = \ amplification_type # Statistics from event list # Dataframes, such as samples_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): samples_table.set_value(row_id, channel + ' Mean', FlowCal.stats.mean(sample, channel)) samples_table.set_value(row_id, channel + ' Median', FlowCal.stats.median(sample, channel)) samples_table.set_value(row_id, channel + ' Mode', FlowCal.stats.mode(sample, channel)) samples_table.set_value(row_id, channel + ' Std', FlowCal.stats.std(sample, channel)) samples_table.set_value(row_id, channel + ' CV', FlowCal.stats.cv(sample, channel)) samples_table.set_value(row_id, channel + ' IQR', FlowCal.stats.iqr(sample, channel)) samples_table.set_value(row_id, channel + ' RCV', FlowCal.stats.rcv(sample, channel)) else: samples_table.at[row_id, channel + ' Mean'] = \ FlowCal.stats.mean(sample, channel) samples_table.at[row_id, channel + ' Median'] = \ FlowCal.stats.median(sample, channel) samples_table.at[row_id, channel + ' Mode'] = \ FlowCal.stats.mode(sample, channel) samples_table.at[row_id, channel + ' Std'] = \ FlowCal.stats.std(sample, channel) samples_table.at[row_id, channel + ' CV'] = \ FlowCal.stats.cv(sample, channel) samples_table.at[row_id, channel + ' IQR'] = \ FlowCal.stats.iqr(sample, channel) samples_table.at[row_id, channel + ' RCV'] = \ FlowCal.stats.rcv(sample, channel) # For geometric statistics, first check for non-positive events. # If found, throw a warning and calculate statistics on positive # events only. if np.any(sample[:, channel] <= 0): # Separate positive events sample_positive = sample[sample[:, channel] > 0] # Throw warning msg = "Geometric statistics for channel" + \ " {} calculated on positive events".format(channel) + \ " only ({:.1f}%). ".format( 100.*sample_positive.shape[0]/sample.shape[0]) warnings.warn("On sample {}: {}".format(row_id, msg)) # Write warning message to table if samples_table.loc[row_id, 'Analysis Notes']: msg = samples_table.loc[row_id, 'Analysis Notes'] + msg # Dataframes, such as samples_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): samples_table.set_value(row_id, 'Analysis Notes', msg) else: samples_table.at[row_id, 'Analysis Notes'] = msg else: sample_positive = sample # Calculate and write geometric statistics # Dataframes, such as samples_table, are modified # differently depending on pandas' version. if packaging.version.parse(pd.__version__) \ < packaging.version.parse('0.21'): samples_table.set_value( row_id, channel + ' Geom. Mean', FlowCal.stats.gmean(sample_positive, channel)) samples_table.set_value( row_id, channel + ' Geom. Std', FlowCal.stats.gstd(sample_positive, channel)) samples_table.set_value( row_id, channel + ' Geom. CV', FlowCal.stats.gcv(sample_positive, channel)) else: samples_table.at[row_id, channel + ' Geom. Mean'] = \ FlowCal.stats.gmean(sample_positive, channel) samples_table.at[row_id, channel + ' Geom. Std'] = \ FlowCal.stats.gstd(sample_positive, channel) samples_table.at[row_id, channel + ' Geom. CV'] = \ FlowCal.stats.gcv(sample_positive, channel) # Restore index name if table is empty if len(samples_table) == 0: samples_table.index.name = samples_table_index_name
Add stats fields to samples table. The following information is added to each row: - Notes (warnings, errors) resulting from the analysis - Number of Events - Acquisition Time (s) The following information is added for each row, for each channel in which fluorescence units have been specified: - Detector voltage (gain) - Amplification type - Mean - Geometric Mean - Median - Mode - Standard Deviation - Coefficient of Variation (CV) - Geometric Standard Deviation - Geometric Coefficient of Variation - Inter-Quartile Range - Robust Coefficient of Variation (RCV) Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate statistics. ``samples[i]`` should correspond to ``samples_table.iloc[i]``. Notes ----- Geometric statistics (geometric mean, standard deviation, and geometric coefficient of variation) are defined only for positive data. If there are negative events in any relevant channel of any member of `samples`, geometric statistics will only be calculated on the positive events, and a warning message will be written to the "Analysis Notes" field.
entailment
def generate_histograms_table(samples_table, samples, max_bins=1024): """ Generate a table of histograms as a DataFrame. Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate histograms. ``samples[i]`` should correspond to ``samples_table.iloc[i]`` max_bins : int, optional Maximum number of bins to use. Returns ------- hist_table : DataFrame A multi-indexed DataFrame. Rows cotain the histogram bins and counts for every sample and channel specified in samples_table. `hist_table` is indexed by the sample's ID, the channel name, and whether the row corresponds to bins or counts. """ # Extract channels that require stats histograms headers = list(samples_table.columns) hist_headers = [h for h in headers if re_units.match(h)] hist_channels = [re_units.match(h).group(1) for h in hist_headers] # The number of columns in the DataFrame has to be set to the maximum # number of bins of any of the histograms about to be generated. # The following iterates through these histograms and finds the # largest. n_columns = 0 for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): if n_columns < sample.resolution(channel): n_columns = sample.resolution(channel) # Saturate at max_bins if n_columns > max_bins: n_columns = max_bins # Declare multi-indexed DataFrame index = pd.MultiIndex.from_arrays([[],[],[]], names = ['Sample ID', 'Channel', '']) columns = ['Bin {}'.format(i + 1) for i in range(n_columns)] hist_table = pd.DataFrame([], index=index, columns=columns) # Generate histograms for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): # Get units in which bins are being reported unit = samples_table[header][sample_id] # Decide which scale to use # Channel units result in linear scale. Otherwise, use logicle. if unit == 'Channel': scale = 'linear' else: scale = 'logicle' # Define number of bins nbins = min(sample.resolution(channel), max_bins) # Calculate bin edges and centers # We generate twice the necessary number of bins. We then take # every other value as the proper bin edges, and the remaining # values as the bin centers. bins_extended = sample.hist_bins(channel, 2*nbins, scale) bin_edges = bins_extended[::2] bin_centers = bins_extended[1::2] # Store bin centers hist_table.loc[(sample_id, channel, 'Bin Centers ({})'.format(unit)), columns[0:len(bin_centers)]] = bin_centers # Calculate and store histogram counts hist, __ = np.histogram(sample[:,channel], bins=bin_edges) hist_table.loc[(sample_id, channel, 'Counts'), columns[0:len(bin_centers)]] = hist return hist_table
Generate a table of histograms as a DataFrame. Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate histograms. ``samples[i]`` should correspond to ``samples_table.iloc[i]`` max_bins : int, optional Maximum number of bins to use. Returns ------- hist_table : DataFrame A multi-indexed DataFrame. Rows cotain the histogram bins and counts for every sample and channel specified in samples_table. `hist_table` is indexed by the sample's ID, the channel name, and whether the row corresponds to bins or counts.
entailment
def generate_about_table(extra_info={}): """ Make a table with information about FlowCal and the current analysis. Parameters ---------- extra_info : dict, optional Additional keyword:value pairs to include in the table. Returns ------- about_table : DataFrame Table with information about FlowCal and the current analysis, as keyword:value pairs. The following keywords are included: FlowCal version, and date and time of analysis. Keywords and values from `extra_info` are also included. """ # Make keyword and value arrays keywords = [] values = [] # FlowCal version keywords.append('FlowCal version') values.append(FlowCal.__version__) # Analysis date and time keywords.append('Date of analysis') values.append(time.strftime("%Y/%m/%d")) keywords.append('Time of analysis') values.append(time.strftime("%I:%M:%S%p")) # Add additional keyword:value pairs for k, v in six.iteritems(extra_info): keywords.append(k) values.append(v) # Make table as data frame about_table = pd.DataFrame(values, index=keywords) # Set column names about_table.columns = ['Value'] about_table.index.name = 'Keyword' return about_table
Make a table with information about FlowCal and the current analysis. Parameters ---------- extra_info : dict, optional Additional keyword:value pairs to include in the table. Returns ------- about_table : DataFrame Table with information about FlowCal and the current analysis, as keyword:value pairs. The following keywords are included: FlowCal version, and date and time of analysis. Keywords and values from `extra_info` are also included.
entailment
def show_open_file_dialog(filetypes): """ Show an open file dialog and return the path of the file selected. Parameters ---------- filetypes : list of tuples Types of file to show on the dialog. Each tuple on the list must have two elements associated with a filetype: the first element is a description, and the second is the associated extension. Returns ------- filename : str The path of the filename selected, or an empty string if no file was chosen. """ # The following line is used to Tk's main window is not shown Tk().withdraw() # OSX ONLY: Call bash script to prevent file select window from sticking # after use. if platform.system() == 'Darwin': subprocess.call("defaults write org.python.python " + "ApplePersistenceIgnoreState YES", shell=True) filename = askopenfilename(filetypes=filetypes) subprocess.call("defaults write org.python.python " + "ApplePersistenceIgnoreState NO", shell=True) else: filename = askopenfilename(filetypes=filetypes) return filename
Show an open file dialog and return the path of the file selected. Parameters ---------- filetypes : list of tuples Types of file to show on the dialog. Each tuple on the list must have two elements associated with a filetype: the first element is a description, and the second is the associated extension. Returns ------- filename : str The path of the filename selected, or an empty string if no file was chosen.
entailment
def run(input_path=None, output_path=None, verbose=True, plot=True, hist_sheet=False): """ Run the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use "<input_path>_output". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information. """ # If input file has not been specified, show open file dialog if input_path is None: input_path = show_open_file_dialog(filetypes=[('Excel files', '*.xlsx')]) if not input_path: if verbose: print("No input file selected.") return # Extract directory, filename, and filename with no extension from path input_dir, input_filename = os.path.split(input_path) input_filename_no_ext, __ = os.path.splitext(input_filename) # Read relevant tables from workbook if verbose: print("Reading {}...".format(input_filename)) instruments_table = read_table(input_path, sheetname='Instruments', index_col='ID') beads_table = read_table(input_path, sheetname='Beads', index_col='ID') samples_table = read_table(input_path, sheetname='Samples', index_col='ID') # Process beads samples beads_samples, mef_transform_fxns, mef_outputs = process_beads_table( beads_table, instruments_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir='plot_beads', full_output=True) # Add stats to beads table if verbose: print("") print("Calculating statistics for beads...") add_beads_stats(beads_table, beads_samples, mef_outputs) # Process samples samples = process_samples_table( samples_table, instruments_table, mef_transform_fxns=mef_transform_fxns, beads_table=beads_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir='plot_samples') # Add stats to samples table if verbose: print("") print("Calculating statistics for all samples...") add_samples_stats(samples_table, samples) # Generate histograms if hist_sheet: if verbose: print("Generating histograms table...") histograms_table = generate_histograms_table(samples_table, samples) # Generate about table about_table = generate_about_table({'Input file path': input_path}) # Generate list of tables to save table_list = [] table_list.append(('Instruments', instruments_table)) table_list.append(('Beads', beads_table)) table_list.append(('Samples', samples_table)) if hist_sheet: table_list.append(('Histograms', histograms_table)) table_list.append(('About Analysis', about_table)) # Write output excel file if verbose: print("Saving output Excel file...") if output_path is None: output_filename = "{}_output.xlsx".format(input_filename_no_ext) output_path = os.path.join(input_dir, output_filename) write_workbook(output_path, table_list) if verbose: print("\nDone.")
Run the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use "<input_path>_output". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information.
entailment
def run_command_line(args=None): """ Entry point for the FlowCal and flowcal console scripts. Parameters ---------- args: list of strings, optional Command line arguments. If None or not specified, get arguments from ``sys.argv``. See Also ---------- FlowCal.excel_ui.run() http://amir.rachum.com/blog/2017/07/28/python-entry-points/ """ # Get arguments from ``sys.argv`` if necessary. # ``sys.argv`` has the name of the script as its first element. We remove # this element because it will break ``parser.parse_args()`` later. In fact, # ``parser.parse_args()``, if provided with no arguments, will also use # ``sys.argv`` after removing the first element. if args is None: args = sys.argv[1:] import argparse # Read command line arguments parser = argparse.ArgumentParser( description="process flow cytometry files with FlowCal's Excel UI.") parser.add_argument( "-i", "--inputpath", type=str, nargs='?', help="input Excel file name. If not specified, show open file window") parser.add_argument( "-o", "--outputpath", type=str, nargs='?', help="output Excel file name. If not specified, use [INPUTPATH]_output") parser.add_argument( "-v", "--verbose", action="store_true", help="print information about individual processing steps") parser.add_argument( "-p", "--plot", action="store_true", help="generate and save density plots/histograms of beads and samples") parser.add_argument( "-H", "--histogram-sheet", action="store_true", help="generate sheet in output Excel file specifying histogram bins") args = parser.parse_args(args=args) # Run Excel UI run(input_path=args.inputpath, output_path=args.outputpath, verbose=args.verbose, plot=args.plot, hist_sheet=args.histogram_sheet)
Entry point for the FlowCal and flowcal console scripts. Parameters ---------- args: list of strings, optional Command line arguments. If None or not specified, get arguments from ``sys.argv``. See Also ---------- FlowCal.excel_ui.run() http://amir.rachum.com/blog/2017/07/28/python-entry-points/
entailment
def read_fcs_header_segment(buf, begin=0): """ Read HEADER segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as HEADER segment. begin : int Offset (in bytes) to first byte of HEADER segment in `buf`. Returns ------- header : namedtuple Version information and byte offset values of other FCS segments (see FCS standards for more information) in the following order: - version : str - text_begin : int - text_end : int - data_begin : int - data_end : int - analysis_begin : int - analysis_end : int Notes ----- Blank ANALYSIS segment offsets are converted to zeros. OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_). References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951. """ fields = [ 'version', 'text_begin', 'text_end', 'data_begin', 'data_end', 'analysis_begin', 'analysis_end'] FCSHeader = collections.namedtuple('FCSHeader', fields) field_values = [] buf.seek(begin) field_values.append(buf.read(10).decode(encoding).rstrip()) # version field_values.append(int(buf.read(8))) # text_begin field_values.append(int(buf.read(8))) # text_end field_values.append(int(buf.read(8))) # data_begin field_values.append(int(buf.read(8))) # data_end fv = buf.read(8).decode(encoding) # analysis_begin field_values.append(0 if fv == ' '*8 else int(fv)) fv = buf.read(8).decode(encoding) # analysis_end field_values.append(0 if fv == ' '*8 else int(fv)) header = FCSHeader._make(field_values) return header
Read HEADER segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as HEADER segment. begin : int Offset (in bytes) to first byte of HEADER segment in `buf`. Returns ------- header : namedtuple Version information and byte offset values of other FCS segments (see FCS standards for more information) in the following order: - version : str - text_begin : int - text_end : int - data_begin : int - data_end : int - analysis_begin : int - analysis_end : int Notes ----- Blank ANALYSIS segment offsets are converted to zeros. OTHER segment offsets are ignored (see [1]_, [2]_, and [3]_). References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951.
entailment
def read_fcs_text_segment(buf, begin, end, delim=None, supplemental=False): """ Read TEXT segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as TEXT segment. begin : int Offset (in bytes) to first byte of TEXT segment in `buf`. end : int Offset (in bytes) to last byte of TEXT segment in `buf`. delim : str, optional 1-byte delimiter character which delimits key-value entries of TEXT segment. If None and ``supplemental==False``, will extract delimiter as first byte of TEXT segment. supplemental : bool, optional Flag specifying that segment is a supplemental TEXT segment (see FCS3.0 and FCS3.1), in which case a delimiter (``delim``) must be specified. Returns ------- text : dict Dictionary of key-value entries extracted from TEXT segment. delim : str or None String containing delimiter or None if TEXT segment is empty. Raises ------ ValueError If supplemental TEXT segment (``supplemental==True``) but ``delim`` is not specified. ValueError If primary TEXT segment (``supplemental==False``) does not start with delimiter. ValueError If first keyword starts with delimiter (e.g. a primary TEXT segment with the following contents: ///k1/v1/k2/v2/). ValueError If odd number of keys + values detected (indicating an unpaired key or value). ValueError If TEXT segment is ill-formed (unable to be parsed according to the FCS standards). Notes ----- ANALYSIS segments and supplemental TEXT segments are parsed the same way, so this function can also be used to parse ANALYSIS segments. This function does *not* automatically parse and accumulate additional TEXT-like segments (e.g. supplemental TEXT segments or ANALYSIS segments) referenced in the originally specified TEXT segment. References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951. """ if delim is None: if supplemental: raise ValueError("must specify ``delim`` if reading supplemental" + " TEXT segment") else: buf.seek(begin) delim = buf.read(1).decode(encoding) # The offsets are inclusive (meaning they specify first and last byte # WITHIN segment) and seeking is inclusive (read() after seek() reads the # byte which was seeked to). This means the length of the segment is # ((end+1) - begin). buf.seek(begin) raw = buf.read((end+1)-begin).decode(encoding) # If segment is empty, return empty dictionary as text if not raw: return {}, None if not supplemental: # Check that the first character of the TEXT segment is equal to the # delimiter. if raw[0] != delim: raise ValueError("primary TEXT segment should start with" + " delimiter") # The FCS standards indicate that keyword values must be flanked by the # delimiter character, but they do not require that the TEXT segment end # with the delimiter. As such, look for the last instance of the delimiter # in the segment and retain everything before it (potentially removing # TEXT segment characters which occur after the last instance of the # delimiter). end_index = raw.rfind(delim) if supplemental and end_index == -1: # Delimiter was not found. This should only be permitted for an empty # supplemental TEXT segment (primary TEXT segment should fail above # if first character doesn't match delimiter). return {}, delim else: raw = raw[:end_index] pairs_list = raw.split(delim) ### # Reconstruct Keys and Values By Aggregating Escaped Delimiters ### # According to the FCS standards, delimiter characters are permitted in # keywords and keyword values as long as they are escaped by being # immediately repeated. Delimiter characters are not permitted as the # first character of a keyword or keyword value, however. Null (zero # length) keywords or keyword values are also not permitted. According to # these restrictions, a delimiter character should manifest itself as an # empty element in ``pairs_list``. As such, scan through ``pairs_list`` # looking for empty elements and reconstruct keywords or keyword values # containing delimiters. ### # Start scanning from the end of the list since the end of the list is # well defined (i.e. doesn't depend on whether the TEXT segment is a # primary segment, which MUST start with the delimiter, or a supplemental # segment, which is not required to start with the delimiter). reconstructed_KV_accumulator = [] idx = len(pairs_list) - 1 while idx >= 0: if pairs_list[idx] == '': # Count the number of consecutive empty elements to determine how # many escaped delimiters exist and whether or not a true boundary # delimiter exists. num_empty_elements = 1 idx = idx - 1 while idx >=0 and pairs_list[idx] == '': num_empty_elements = num_empty_elements + 1 idx = idx - 1 # Need to differentiate between rolling off the bottom of the list # and hitting a non-empty element. Assessing pairs_list[-1] can # still be evaluated (by wrapping around the list, which is not # what we want to check) so check if we've finished scanning the # list first. if idx < 0: # We rolled off the bottom of the list. if num_empty_elements == 1: # If we only hit 1 empty element before rolling off the # list, then there were no escaped delimiters and the # segment started with a delimiter. break elif (num_empty_elements % 2) == 0: # Even number of empty elements. # # If this is a supplemental TEXT segment, this can be # interpreted as *not* starting the TEXT segment with the # delimiter (which is permitted for supplemental TEXT # segments) and starting the first keyword with one or # more delimiters, which is prohibited. if supplemental: raise ValueError("starting a TEXT segment keyword" + " with a delimiter is prohibited") # If this is a primary TEXT segment, this is an ill-formed # segment. Rationale: 1 empty element will always be # consumed as the initial delimiter which a primary TEXT # segment is required to start with. After that delimiter # is accounted for, you now have either an unescaped # delimiter, which is prohibited, or a boundary delimiter, # which would imply that the entire first keyword was # composed of delimiters, which is prohibited because # keywords are not allowed to start with the delimiter). raise ValueError("ill-formed TEXT segment") else: # Odd number of empty elements. This can be interpreted as # starting the segment with a delimiter and then having # one or more delimiters starting the first keyword, which # is prohibited. raise ValueError("starting a TEXT segment keyword with a" + " delimiter is prohibited") else: # We encountered a non-empty element. Calculate the number of # escaped delimiters and whether or not a true boundary # delimiter is present. num_delim = (num_empty_elements+1)//2 boundary_delim = (num_empty_elements % 2) == 0 if boundary_delim: # A boundary delimiter exists. We know that the boundary # has to be on the right side (end of the sequence), # because keywords and keyword values are prohibited from # starting with a delimiter, which would be the case if the # boundary delimiter was anywhere BUT the last character. # This means we need to postpend the appropriate number of # delim characters to the end of the non-empty list # element that ``idx`` currently points to. pairs_list[idx] = pairs_list[idx] + (num_delim*delim) # We can now add the reconstructed keyword or keyword # value to the accumulator and move on. It's possible that # this keyword or keyword value is incompletely # reconstructed (e.g. /key//1///value1/ # => {'key/1/':'value1'}; there are other delimiters in # this keyword or keyword value), but that case is now # handled independently of this case and just like any # other instance of escaped delimiters *without* a # boundary delimiter. reconstructed_KV_accumulator.append(pairs_list[idx]) idx = idx - 1 else: # No boundary delimiters exist, so we need to glue the # list elements before and after this sequence of # consecutive delimiters together with the appropriate # number of delimiters. if len(reconstructed_KV_accumulator) == 0: # Edge Case: The accumulator should always have at # least 1 element in it at this point. If it doesn't, # the last value ends with the delimiter, which will # result in two consecutive empty elements, which # won't fall into this case. Only 1 empty element # indicates an ill-formed TEXT segment with an # unpaired non-boundary delimiter (e.g. /k1/v1//), # which is not permitted. The ill-formed TEXT segment # implied by 1 empty element is recoverable, though, # and a use case which is known to exist, so throw a # warning and ignore the 2nd copy of the delimiter. warnings.warn("detected ill-formed TEXT segment (ends" + " with two delimiter characters)." + " Ignoring last delimiter character") reconstructed_KV_accumulator.append(pairs_list[idx]) else: reconstructed_KV_accumulator[-1] = pairs_list[idx] + \ (num_delim*delim) + reconstructed_KV_accumulator[-1] idx = idx - 1 else: # Non-empty element, just append reconstructed_KV_accumulator.append(pairs_list[idx]) idx = idx - 1 pairs_list_reconstructed = list(reversed(reconstructed_KV_accumulator)) # List length should be even since all key-value entries should be pairs if len(pairs_list_reconstructed) % 2 != 0: raise ValueError("odd # of (keys + values); unpaired key or value") text = dict(zip(pairs_list_reconstructed[0::2], pairs_list_reconstructed[1::2])) return text, delim
Read TEXT segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as TEXT segment. begin : int Offset (in bytes) to first byte of TEXT segment in `buf`. end : int Offset (in bytes) to last byte of TEXT segment in `buf`. delim : str, optional 1-byte delimiter character which delimits key-value entries of TEXT segment. If None and ``supplemental==False``, will extract delimiter as first byte of TEXT segment. supplemental : bool, optional Flag specifying that segment is a supplemental TEXT segment (see FCS3.0 and FCS3.1), in which case a delimiter (``delim``) must be specified. Returns ------- text : dict Dictionary of key-value entries extracted from TEXT segment. delim : str or None String containing delimiter or None if TEXT segment is empty. Raises ------ ValueError If supplemental TEXT segment (``supplemental==True``) but ``delim`` is not specified. ValueError If primary TEXT segment (``supplemental==False``) does not start with delimiter. ValueError If first keyword starts with delimiter (e.g. a primary TEXT segment with the following contents: ///k1/v1/k2/v2/). ValueError If odd number of keys + values detected (indicating an unpaired key or value). ValueError If TEXT segment is ill-formed (unable to be parsed according to the FCS standards). Notes ----- ANALYSIS segments and supplemental TEXT segments are parsed the same way, so this function can also be used to parse ANALYSIS segments. This function does *not* automatically parse and accumulate additional TEXT-like segments (e.g. supplemental TEXT segments or ANALYSIS segments) referenced in the originally specified TEXT segment. References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951.
entailment
def read_fcs_data_segment(buf, begin, end, datatype, num_events, param_bit_widths, big_endian, param_ranges=None): """ Read DATA segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as DATA segment. begin : int Offset (in bytes) to first byte of DATA segment in `buf`. end : int Offset (in bytes) to last byte of DATA segment in `buf`. datatype : {'I', 'F', 'D', 'A'} String specifying FCS file datatype (see $DATATYPE keyword from FCS standards). Supported datatypes include 'I' (unsigned binary integer), 'F' (single precision floating point), and 'D' (double precision floating point). 'A' (ASCII) is recognized but not supported. num_events : int Total number of events (see $TOT keyword from FCS standards). param_bit_widths : array-like Array specifying parameter (aka channel) bit width for each parameter (see $PnB keywords from FCS standards). The length of `param_bit_widths` should match the $PAR keyword value from the FCS standards (which indicates the total number of parameters). If `datatype` is 'I', data must be byte aligned (i.e. all parameter bit widths should be divisible by 8), and data are upcast to the nearest uint8, uint16, uint32, or uint64 data type. Bit widths larger than 64 bits are not supported. big_endian : bool Endianness of computer used to acquire data (see $BYTEORD keyword from FCS standards). True implies big endian; False implies little endian. param_ranges : array-like, optional Array specifying parameter (aka channel) range for each parameter (see $PnR keywords from FCS standards). Used to ensure erroneous values are not read from DATA segment by applying a bit mask to remove unused bits. The length of `param_ranges` should match the $PAR keyword value from the FCS standards (which indicates the total number of parameters). If None, no masking is performed. Returns ------- data : numpy array NxD numpy array describing N cytometry events observing D data dimensions. Raises ------ ValueError If lengths of `param_bit_widths` and `param_ranges` don't match. ValueError If calculated DATA segment size (as determined from the number of events, the number of parameters, and the number of bytes per data point) does not match size specified by `begin` and `end`. ValueError If `param_bit_widths` doesn't agree with `datatype` for single precision or double precision floating point (i.e. they should all be 32 or 64, respectively). ValueError If `datatype` is unrecognized. NotImplementedError If `datatype` is 'A'. NotImplementedError If `datatype` is 'I' but data is not byte aligned. References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951. """ num_params = len(param_bit_widths) if (param_ranges is not None and len(param_ranges) != num_params): raise ValueError("param_bit_widths and param_ranges must have same" + " length") shape = (int(num_events), num_params) if datatype == 'I': # Check if all parameters fit into preexisting data type if (all(bw == 8 for bw in param_bit_widths) or all(bw == 16 for bw in param_bit_widths) or all(bw == 32 for bw in param_bit_widths) or all(bw == 64 for bw in param_bit_widths)): num_bits = param_bit_widths[0] # Sanity check that the total # of bytes that we're about to # interpret is exactly the # of bytes in the DATA segment. # In some FCS files, the offset to the last byte (end) actually # points to the first byte of the next segment, in which case the # # of bytes specified in the header exceeds the # of bytes that we # should read by one. if (shape[0]*shape[1]*(num_bits//8)) != ((end+1)-begin) and \ (shape[0]*shape[1]*(num_bits//8)) != (end-begin): raise ValueError("DATA size does not match expected array" + " size (array size =" + " {0} bytes,".format(shape[0]*shape[1]*(num_bits//8)) + " DATA segment size = {0} bytes)".format((end+1)-begin)) dtype = np.dtype('{0}u{1}'.format('>' if big_endian else '<', num_bits//8)) data = np.memmap( buf, dtype=dtype, mode='r', offset=begin, shape=shape, order='C') # Cast memmap object to regular numpy array stored in memory (as # opposed to being backed by disk) data = np.array(data) else: # The FCS standards technically allows for parameters to NOT be # byte aligned, but parsing a DATA segment which is not byte # aligned requires significantly more computation (and probably an # external library which exposes bit level resolution to a block # of memory). I don't think this is a common use case, so I'm just # going to detect it and raise an error. if (not all(bw % 8 == 0 for bw in param_bit_widths) or any(bw > 64 for bw in param_bit_widths)): raise NotImplementedError("only byte aligned parameter bit" + " widths (bw % 8 = 0) <= 64 are supported" + " (param_bit_widths={0})".format(param_bit_widths)) # Read data in as a byte array byte_shape = (int(num_events), np.sum(np.array(param_bit_widths)//8)) # Sanity check that the total # of bytes that we're about to # interpret is exactly the # of bytes in the DATA segment. # In some FCS files, the offset to the last byte (end) actually # points to the first byte of the next segment, in which case the # # of bytes specified in the header exceeds the # of bytes that we # should read by one. if (byte_shape[0]*byte_shape[1]) != ((end+1)-begin) and \ (byte_shape[0]*byte_shape[1]) != (end-begin): raise ValueError("DATA size does not match expected array" + " size (array size =" + " {0} bytes,".format(byte_shape[0]*byte_shape[1]) + " DATA segment size = {0} bytes)".format((end+1)-begin)) byte_data = np.memmap( buf, dtype='uint8', # endianness doesn't matter for 1 byte mode='r', offset=begin, shape=byte_shape, order='C') # Upcast all data to fit nearest supported data type of largest # bit width upcast_bw = int(2**np.max(np.ceil(np.log2(param_bit_widths)))) # Create new array of upcast data type and use byte data to # populate it. The new array will have endianness native to user's # machine; does not preserve endianness of stored FCS data. upcast_dtype = 'u{0}'.format(upcast_bw//8) data = np.zeros(shape,dtype=upcast_dtype) # Array mapping each column of data to first corresponding column # in byte_data byte_boundaries = np.roll(np.cumsum(param_bit_widths)//8,1) byte_boundaries[0] = 0 # Reconstitute columns of data by bit shifting appropriate columns # in byte_data and accumulating them for col in range(data.shape[1]): num_bytes = param_bit_widths[col]//8 for b in range(num_bytes): byte_data_col = byte_boundaries[col] + b byteshift = (num_bytes-b-1) if big_endian else b if byteshift > 0: # byte_data must be upcast or else bit shift fails data[:,col] += \ byte_data[:,byte_data_col].astype(upcast_dtype) \ << (byteshift*8) else: data[:,col] += byte_data[:,byte_data_col] if param_ranges is not None: # To strictly follow the FCS standards, mask off the unused high bits # as specified by param_ranges. for col in range(data.shape[1]): # bits_used should be related to resolution of cytometer ADC bits_used = int(np.ceil(np.log2(param_ranges[col]))) # Create a bit mask to mask off all but the lowest bits_used bits. # bitmask is a native python int type which does not have an # underlying size. The int type is effectively left-padded with # 0s (infinitely), and the '&' operation preserves the dataype of # the array, so this shouldn't be an issue. bitmask = ~((~0) << bits_used) data[:,col] &= bitmask elif datatype in ('F','D'): num_bits = 32 if datatype == 'F' else 64 # Confirm that bit widths are consistent with data type if not all(bw == num_bits for bw in param_bit_widths): raise ValueError("all param_bit_widths should be" + " {0} if datatype =".format(num_bits) + " \'{0}\' (param_bit_widths=".format(datatype) + "{0})".format(param_bit_widths)) # Sanity check that the total # of bytes that we're about to interpret # is exactly the # of bytes in the DATA segment. # In some FCS files, the offset to the last byte (end) actually points # to the first byte of the next segment, in which case the # of bytes # specified in the header exceeds the # of bytes that we should read by # one. if (shape[0]*shape[1]*(num_bits//8)) != ((end+1)-begin) and \ (shape[0]*shape[1]*(num_bits//8)) != (end-begin): raise ValueError("DATA size does not match expected array size" + " (array size = {0}".format(shape[0]*shape[1]*(num_bits//8)) + " bytes, DATA segment size =" + " {0} bytes)".format((end+1)-begin)) dtype = np.dtype('{0}f{1}'.format('>' if big_endian else '<', num_bits//8)) data = np.memmap( buf, dtype=dtype, mode='r', offset=begin, shape=shape, order='C') # Cast memmap object to regular numpy array stored in memory (as # opposed to being backed by disk) data = np.array(data) elif datatype == 'A': raise NotImplementedError("only \'I\' (unsigned binary integer)," + " \'F\' (single precision floating point), and \'D\' (double" + " precision floating point) data types are supported (detected" + " datatype=\'{0}\')".format(datatype)) else: raise ValueError("unrecognized datatype (detected datatype=" + "\'{0}\')".format(datatype)) return data
Read DATA segment of FCS file. Parameters ---------- buf : file-like object Buffer containing data to interpret as DATA segment. begin : int Offset (in bytes) to first byte of DATA segment in `buf`. end : int Offset (in bytes) to last byte of DATA segment in `buf`. datatype : {'I', 'F', 'D', 'A'} String specifying FCS file datatype (see $DATATYPE keyword from FCS standards). Supported datatypes include 'I' (unsigned binary integer), 'F' (single precision floating point), and 'D' (double precision floating point). 'A' (ASCII) is recognized but not supported. num_events : int Total number of events (see $TOT keyword from FCS standards). param_bit_widths : array-like Array specifying parameter (aka channel) bit width for each parameter (see $PnB keywords from FCS standards). The length of `param_bit_widths` should match the $PAR keyword value from the FCS standards (which indicates the total number of parameters). If `datatype` is 'I', data must be byte aligned (i.e. all parameter bit widths should be divisible by 8), and data are upcast to the nearest uint8, uint16, uint32, or uint64 data type. Bit widths larger than 64 bits are not supported. big_endian : bool Endianness of computer used to acquire data (see $BYTEORD keyword from FCS standards). True implies big endian; False implies little endian. param_ranges : array-like, optional Array specifying parameter (aka channel) range for each parameter (see $PnR keywords from FCS standards). Used to ensure erroneous values are not read from DATA segment by applying a bit mask to remove unused bits. The length of `param_ranges` should match the $PAR keyword value from the FCS standards (which indicates the total number of parameters). If None, no masking is performed. Returns ------- data : numpy array NxD numpy array describing N cytometry events observing D data dimensions. Raises ------ ValueError If lengths of `param_bit_widths` and `param_ranges` don't match. ValueError If calculated DATA segment size (as determined from the number of events, the number of parameters, and the number of bytes per data point) does not match size specified by `begin` and `end`. ValueError If `param_bit_widths` doesn't agree with `datatype` for single precision or double precision floating point (i.e. they should all be 32 or 64, respectively). ValueError If `datatype` is unrecognized. NotImplementedError If `datatype` is 'A'. NotImplementedError If `datatype` is 'I' but data is not byte aligned. References ---------- .. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman, "Data file standard for flow cytometry. Data File Standards Committee of the Society for Analytical Cytology," Cytometry vol 11, pp 323-332, 1990, PMID 2340769. .. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman, J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997, PMID 9181300. .. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry, version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID 19937951.
entailment
def acquisition_time(self): """ Acquisition time, in seconds. The acquisition time is calculated using the 'time' channel by default (channel name is case independent). If the 'time' channel is not available, the acquisition_start_time and acquisition_end_time, extracted from the $BTIM and $ETIM keyword parameters will be used. If these are not found, None will be returned. """ # Get time channels indices time_channel_idx = [idx for idx, channel in enumerate(self.channels) if channel.lower() == 'time'] if len(time_channel_idx) > 1: raise KeyError("more than one time channel in data") # Check if the time channel is available elif len(time_channel_idx) == 1: # Use the event list time_channel = self.channels[time_channel_idx[0]] return (self[-1, time_channel] - self[0, time_channel]) \ * self.time_step elif (self._acquisition_start_time is not None and self._acquisition_end_time is not None): # Use start_time and end_time: dt = (self._acquisition_end_time - self._acquisition_start_time) return dt.total_seconds() else: return None
Acquisition time, in seconds. The acquisition time is calculated using the 'time' channel by default (channel name is case independent). If the 'time' channel is not available, the acquisition_start_time and acquisition_end_time, extracted from the $BTIM and $ETIM keyword parameters will be used. If these are not found, None will be returned.
entailment
def amplification_type(self, channels=None): """ Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear. """ # Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._amplification_type[ch] for ch in channels] else: return self._amplification_type[channels]
Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear.
entailment
def detector_voltage(self, channels=None): """ Get the detector voltage used for the specified channel(s). The detector voltage for channel "n" is extracted from the $PnV parameter, if available. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the detector voltage. If None, return a list with the detector voltage of all channels, in the order of ``FCSData.channels``. Return ------ float or list of float The detector voltage of the specified channel(s). If no information about the detector voltage is found for a channel, return None. """ # Check default if channels is None: channels = self._channels # Get numerical indices of channels channels = self._name_to_index(channels) # Get detector type of the specified channels if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._detector_voltage[ch] for ch in channels] else: return self._detector_voltage[channels]
Get the detector voltage used for the specified channel(s). The detector voltage for channel "n" is extracted from the $PnV parameter, if available. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the detector voltage. If None, return a list with the detector voltage of all channels, in the order of ``FCSData.channels``. Return ------ float or list of float The detector voltage of the specified channel(s). If no information about the detector voltage is found for a channel, return None.
entailment