sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def amplifier_gain(self, channels=None):
"""
Get the amplifier gain used for the specified channel(s).
The amplifier gain for channel "n" is extracted from the $PnG
parameter, if available.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the amplifier gain. If None,
return a list with the amplifier gain of all channels, in the
order of ``FCSData.channels``.
Return
------
float or list of float
The amplifier gain of the specified channel(s). If no
information about the amplifier gain is found for a channel,
return None.
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get detector type of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._amplifier_gain[ch] for ch in channels]
else:
return self._amplifier_gain[channels]
|
Get the amplifier gain used for the specified channel(s).
The amplifier gain for channel "n" is extracted from the $PnG
parameter, if available.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the amplifier gain. If None,
return a list with the amplifier gain of all channels, in the
order of ``FCSData.channels``.
Return
------
float or list of float
The amplifier gain of the specified channel(s). If no
information about the amplifier gain is found for a channel,
return None.
|
entailment
|
def range(self, channels=None):
"""
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get the range of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._range[ch] for ch in channels]
else:
return self._range[channels]
|
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
|
entailment
|
def resolution(self, channels=None):
"""
Get the resolution of the specified channel(s).
The resolution specifies the number of different values that the
events can take. The resolution is directly obtained from the $PnR
parameter.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the resolution. If None, return a
list with the resolution of all channels, in the order of
``FCSData.channels``.
Return
------
int or list of ints
Resolution of the specified channel(s).
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get resolution of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._resolution[ch] for ch in channels]
else:
return self._resolution[channels]
|
Get the resolution of the specified channel(s).
The resolution specifies the number of different values that the
events can take. The resolution is directly obtained from the $PnR
parameter.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the resolution. If None, return a
list with the resolution of all channels, in the order of
``FCSData.channels``.
Return
------
int or list of ints
Resolution of the specified channel(s).
|
entailment
|
def hist_bins(self, channels=None, nbins=None, scale='logicle', **kwargs):
"""
Get histogram bin edges for the specified channel(s).
These cover the range specified in ``FCSData.range(channels)`` with
a number of bins `nbins`, with linear, logarithmic, or logicle
spacing.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to generate histogram bins. If None,
return a list with bins for all channels, in the order of
``FCSData.channels``.
nbins : int or list of ints, optional
The number of bins to calculate. If `channels` specifies a list
of channels, `nbins` should be a list of integers. If `nbins`
is None, use ``FCSData.resolution(channel)``.
scale : str, optional
Scale in which to generate bins. Can be either ``linear``,
``log``, or ``logicle``.
kwargs : optional
Keyword arguments specific to the selected bin scaling. Linear
and logarithmic scaling do not use additional arguments.
For logicle scaling, the following parameters can be provided:
T : float, optional
Maximum range of data. If not provided, use ``range[1]``.
M : float, optional
(Asymptotic) number of decades in scaled units. If not
provided, calculate from the following::
max(4.5, 4.5 / np.log10(262144) * np.log10(T))
W : float, optional
Width of linear range in scaled units. If not provided,
calculate using the following relationship::
W = (M - log10(T / abs(r))) / 2
Where ``r`` is the minimum negative event. If no negative
events are present, W is set to zero.
Return
------
array or list of arrays
Histogram bin edges for the specified channel(s).
Notes
-----
If ``range[0]`` is equal or less than zero and `scale` is ``log``,
the lower limit of the range is replaced with one.
Logicle scaling uses the LogicleTransform class in the plot module.
References
----------
.. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low
Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006,
PMID 16604519.
"""
# Default: all channels
if channels is None:
channels = list(self._channels)
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Convert to list if necessary
channel_list = channels
if not isinstance(channel_list, list):
channel_list = [channel_list]
if not isinstance(nbins, list):
nbins = [nbins]*len(channel_list)
if not isinstance(scale, list):
scale = [scale]*len(channel_list)
# Iterate
bins = []
for channel, nbins_channel, scale_channel in \
zip(channel_list, nbins, scale):
# Get channel resolution
res_channel = self.resolution(channel)
# Get default nbins
if nbins_channel is None:
nbins_channel = res_channel
# Get range of channel
range_channel = self.range(channel)
# Generate bins according to specified scale
if scale_channel == 'linear':
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / \
(res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res/2,
range_channel[1] + delta_res/2,
nbins_channel + 1)
elif scale_channel == 'log':
# Check if the lower limit is equal or less than zero. If so,
# change the lower limit to one or some lower value, such that
# the range covers at least five decades.
if range_channel[0] <= 0:
range_channel[0] = min(1., range_channel[1]/1e5)
# Log range
range_channel = [np.log10(range_channel[0]),
np.log10(range_channel[1])]
# We will now generate ``nbins`` uniformly spaced bins centered
# at ``linspace(range_channel[0], range_channel[1], nbins)``. To
# do so, we need to generate ``nbins + 1`` uniformly spaced
# points.
delta_res = (range_channel[1] - range_channel[0]) / \
(res_channel - 1)
bins_channel = np.linspace(range_channel[0] - delta_res/2,
range_channel[1] + delta_res/2,
nbins_channel + 1)
# Exponentiate bins
bins_channel = 10**(bins_channel)
elif scale_channel == 'logicle':
# Create transform class
# Use the LogicleTransform class from the plot module
t = FlowCal.plot._LogicleTransform(data=self,
channel=channel,
**kwargs)
# We now generate ``nbins`` uniformly spaced bins centered at
# ``linspace(0, M, nbins)``. To do so, we need to generate
# ``nbins + 1`` uniformly spaced points.
delta_res = float(t.M) / (res_channel - 1)
s = np.linspace(- delta_res/2.,
t.M + delta_res/2.,
nbins_channel + 1)
# Finally, apply the logicle transformation to generate bins
bins_channel = t.transform_non_affine(s)
else:
# Scale not supported
raise ValueError('scale "{}" not supported'.format(
scale_channel))
# Accumulate
bins.append(bins_channel)
# Extract from list if channels was not a list
if not isinstance(channels, list):
bins = bins[0]
return bins
|
Get histogram bin edges for the specified channel(s).
These cover the range specified in ``FCSData.range(channels)`` with
a number of bins `nbins`, with linear, logarithmic, or logicle
spacing.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to generate histogram bins. If None,
return a list with bins for all channels, in the order of
``FCSData.channels``.
nbins : int or list of ints, optional
The number of bins to calculate. If `channels` specifies a list
of channels, `nbins` should be a list of integers. If `nbins`
is None, use ``FCSData.resolution(channel)``.
scale : str, optional
Scale in which to generate bins. Can be either ``linear``,
``log``, or ``logicle``.
kwargs : optional
Keyword arguments specific to the selected bin scaling. Linear
and logarithmic scaling do not use additional arguments.
For logicle scaling, the following parameters can be provided:
T : float, optional
Maximum range of data. If not provided, use ``range[1]``.
M : float, optional
(Asymptotic) number of decades in scaled units. If not
provided, calculate from the following::
max(4.5, 4.5 / np.log10(262144) * np.log10(T))
W : float, optional
Width of linear range in scaled units. If not provided,
calculate using the following relationship::
W = (M - log10(T / abs(r))) / 2
Where ``r`` is the minimum negative event. If no negative
events are present, W is set to zero.
Return
------
array or list of arrays
Histogram bin edges for the specified channel(s).
Notes
-----
If ``range[0]`` is equal or less than zero and `scale` is ``log``,
the lower limit of the range is replaced with one.
Logicle scaling uses the LogicleTransform class in the plot module.
References
----------
.. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low
Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006,
PMID 16604519.
|
entailment
|
def _parse_time_string(time_str):
"""
Get a datetime.time object from a string time representation.
The start and end of acquisition are stored in the optional keyword
parameters $BTIM and $ETIM. The following formats are used
according to the FCS standard:
- FCS 2.0: 'hh:mm:ss'
- FCS 3.0: 'hh:mm:ss[:tt]', where 'tt' is optional, and
represents fractional seconds in 1/60ths.
- FCS 3.1: 'hh:mm:ss[.cc]', where 'cc' is optional, and
represents fractional seconds in 1/100ths.
This function attempts to transform these formats to
'hh:mm:ss:ffffff', where 'ffffff' is in microseconds, and then
parse it using the datetime module.
Parameters:
-----------
time_str : str, or None
String representation of time, or None.
Returns:
--------
t : datetime.time, or None
Time parsed from `time_str`. If parsing was not possible,
return None. If `time_str` is None, return None
"""
# If input is None, return None
if time_str is None:
return None
time_l = time_str.split(':')
if len(time_l) == 3:
# Either 'hh:mm:ss' or 'hh:mm:ss.cc'
if '.' in time_l[2]:
# 'hh:mm:ss.cc' format
time_str = time_str.replace('.', ':')
else:
# 'hh:mm:ss' format
time_str = time_str + ':0'
# Attempt to parse string, return None if not possible
try:
t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time()
except:
t = None
elif len(time_l) == 4:
# 'hh:mm:ss:tt' format
time_l[3] = '{:06d}'.format(int(float(time_l[3])*1e6/60))
time_str = ':'.join(time_l)
# Attempt to parse string, return None if not possible
try:
t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time()
except:
t = None
else:
# Unknown format
t = None
return t
|
Get a datetime.time object from a string time representation.
The start and end of acquisition are stored in the optional keyword
parameters $BTIM and $ETIM. The following formats are used
according to the FCS standard:
- FCS 2.0: 'hh:mm:ss'
- FCS 3.0: 'hh:mm:ss[:tt]', where 'tt' is optional, and
represents fractional seconds in 1/60ths.
- FCS 3.1: 'hh:mm:ss[.cc]', where 'cc' is optional, and
represents fractional seconds in 1/100ths.
This function attempts to transform these formats to
'hh:mm:ss:ffffff', where 'ffffff' is in microseconds, and then
parse it using the datetime module.
Parameters:
-----------
time_str : str, or None
String representation of time, or None.
Returns:
--------
t : datetime.time, or None
Time parsed from `time_str`. If parsing was not possible,
return None. If `time_str` is None, return None
|
entailment
|
def _parse_date_string(date_str):
"""
Get a datetime.date object from a string date representation.
The FCS standard includes an optional keyword parameter $DATE in
which the acquistion date is stored. In FCS 2.0, the date is saved
as 'dd-mmm-yy', whereas in FCS 3.0 and 3.1 the date is saved as
'dd-mmm-yyyy'.
This function attempts to parse these formats, along with a couple
of nonstandard ones, using the datetime module.
Parameters:
-----------
date_str : str, or None
String representation of date, or None.
Returns:
--------
t : datetime.datetime, or None
Date parsed from `date_str`. If parsing was not possible,
return None. If `date_str` is None, return None
"""
# If input is None, return None
if date_str is None:
return None
# Standard format for FCS2.0
try:
return datetime.datetime.strptime(date_str, '%d-%b-%y')
except ValueError:
pass
# Standard format for FCS3.0
try:
return datetime.datetime.strptime(date_str, '%d-%b-%Y')
except ValueError:
pass
# Nonstandard format 1
try:
return datetime.datetime.strptime(date_str, '%y-%b-%d')
except ValueError:
pass
# Nonstandard format 2
try:
return datetime.datetime.strptime(date_str, '%Y-%b-%d')
except ValueError:
pass
# If none of these formats work, return None
return None
|
Get a datetime.date object from a string date representation.
The FCS standard includes an optional keyword parameter $DATE in
which the acquistion date is stored. In FCS 2.0, the date is saved
as 'dd-mmm-yy', whereas in FCS 3.0 and 3.1 the date is saved as
'dd-mmm-yyyy'.
This function attempts to parse these formats, along with a couple
of nonstandard ones, using the datetime module.
Parameters:
-----------
date_str : str, or None
String representation of date, or None.
Returns:
--------
t : datetime.datetime, or None
Date parsed from `date_str`. If parsing was not possible,
return None. If `date_str` is None, return None
|
entailment
|
def _name_to_index(self, channels):
"""
Return the channel indices for the specified channel names.
Integers contained in `channel` are returned unmodified, if they
are within the range of ``self.channels``.
Parameters
----------
channels : int or str or list of int or list of str
Name(s) of the channel(s) of interest.
Returns
-------
int or list of int
Numerical index(ces) of the specified channels.
"""
# Check if list, then run recursively
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._name_to_index(ch) for ch in channels]
if isinstance(channels, six.string_types):
# channels is a string containing a channel name
if channels in self.channels:
return self.channels.index(channels)
else:
raise ValueError("{} is not a valid channel name."
.format(channels))
if isinstance(channels, int):
if (channels < len(self.channels)
and channels >= -len(self.channels)):
return channels
else:
raise ValueError("index out of range")
else:
raise TypeError("input argument should be an integer, string or "
"list of integers or strings")
|
Return the channel indices for the specified channel names.
Integers contained in `channel` are returned unmodified, if they
are within the range of ``self.channels``.
Parameters
----------
channels : int or str or list of int or list of str
Name(s) of the channel(s) of interest.
Returns
-------
int or list of int
Numerical index(ces) of the specified channels.
|
entailment
|
def find_version(file_path):
"""
Scrape version information from specified file path.
"""
with open(file_path, 'r') as f:
file_contents = f.read()
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
file_contents, re.M)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("unable to find version string")
|
Scrape version information from specified file path.
|
entailment
|
def start_end(data, num_start=250, num_end=100, full_output=False):
"""
Gate out first and last events.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
num_start, num_end : int, optional
Number of events to gate out from beginning and end of `data`.
Ignored if less than 0.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
Raises
------
ValueError
If the number of events to discard is greater than the total
number of events in `data`.
"""
if num_start < 0:
num_start = 0
if num_end < 0:
num_end = 0
if data.shape[0] < (num_start + num_end):
raise ValueError('Number of events to discard greater than total' +
' number of events.')
mask = np.ones(shape=data.shape[0],dtype=bool)
mask[:num_start] = False
if num_end > 0:
# catch the edge case where `num_end=0` causes mask[-num_end:] to mask
# off all events
mask[-num_end:] = False
gated_data = data[mask]
if full_output:
StartEndGateOutput = collections.namedtuple(
'StartEndGateOutput',
['gated_data', 'mask'])
return StartEndGateOutput(gated_data=gated_data, mask=mask)
else:
return gated_data
|
Gate out first and last events.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
num_start, num_end : int, optional
Number of events to gate out from beginning and end of `data`.
Ignored if less than 0.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
Raises
------
ValueError
If the number of events to discard is greater than the total
number of events in `data`.
|
entailment
|
def high_low(data, channels=None, high=None, low=None, full_output=False):
"""
Gate out high and low values across all specified channels.
Gate out events in `data` with values in the specified channels which
are larger than or equal to `high` or less than or equal to `low`.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform gating. If None, use all channels.
high, low : int, float, optional
High and low threshold values. If None, `high` and `low` will be
taken from ``data.range`` if available, otherwise
``np.inf`` and ``-np.inf`` will be used.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
"""
# Extract channels in which to gate
if channels is None:
data_ch = data
else:
data_ch = data[:,channels]
if data_ch.ndim == 1:
data_ch = data_ch.reshape((-1,1))
# Default values for high and low
if high is None:
if hasattr(data_ch, 'range'):
high = [np.Inf if di is None else di[1] for di in data_ch.range()]
high = np.array(high)
else:
high = np.Inf
if low is None:
if hasattr(data_ch, 'range'):
low = [-np.Inf if di is None else di[0] for di in data_ch.range()]
low = np.array(low)
else:
low = -np.Inf
# Gate
mask = np.all((data_ch < high) & (data_ch > low), axis = 1)
gated_data = data[mask]
if full_output:
HighLowGateOutput = collections.namedtuple(
'HighLowGateOutput',
['gated_data', 'mask'])
return HighLowGateOutput(gated_data=gated_data, mask=mask)
else:
return gated_data
|
Gate out high and low values across all specified channels.
Gate out events in `data` with values in the specified channels which
are larger than or equal to `high` or less than or equal to `low`.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform gating. If None, use all channels.
high, low : int, float, optional
High and low threshold values. If None, `high` and `low` will be
taken from ``data.range`` if available, otherwise
``np.inf`` and ``-np.inf`` will be used.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
|
entailment
|
def ellipse(data, channels,
center, a, b, theta=0,
log=False, full_output=False):
"""
Gate that preserves events inside an ellipse-shaped region.
Events are kept if they satisfy the following relationship::
(x/a)**2 + (y/b)**2 <= 1
where `x` and `y` are the coordinates of the event list, after
substracting `center` and rotating by -`theta`. This is mathematically
equivalent to maintaining the events inside an ellipse with major
axis `a`, minor axis `b`, center at `center`, and tilted by `theta`.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str
Two channels on which to perform gating.
center, a, b, theta (optional) : float
Ellipse parameters. `a` is the major axis, `b` is the minor axis.
log : bool, optional
Flag specifying that log10 transformation should be applied to
`data` before gating.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
"""
# Extract channels in which to gate
if len(channels) != 2:
raise ValueError('2 channels should be specified.')
data_ch = data[:,channels].view(np.ndarray)
# Log if necessary
if log:
data_ch = np.log10(data_ch)
# Center
center = np.array(center)
data_centered = data_ch - center
# Rotate
R = np.array([[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
data_rotated = np.dot(data_centered, R.T)
# Generate mask
mask = ((data_rotated[:,0]/a)**2 + (data_rotated[:,1]/b)**2 <= 1)
# Gate
data_gated = data[mask]
if full_output:
# Calculate contour
t = np.linspace(0,1,100)*2*np.pi
ci = np.array([a*np.cos(t), b*np.sin(t)]).T
ci = np.dot(ci, R) + center
if log:
ci = 10**ci
cntr = [ci]
# Build output namedtuple
EllipseGateOutput = collections.namedtuple(
'EllipseGateOutput',
['gated_data', 'mask', 'contour'])
return EllipseGateOutput(
gated_data=data_gated, mask=mask, contour=cntr)
else:
return data_gated
|
Gate that preserves events inside an ellipse-shaped region.
Events are kept if they satisfy the following relationship::
(x/a)**2 + (y/b)**2 <= 1
where `x` and `y` are the coordinates of the event list, after
substracting `center` and rotating by -`theta`. This is mathematically
equivalent to maintaining the events inside an ellipse with major
axis `a`, minor axis `b`, center at `center`, and tilted by `theta`.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str
Two channels on which to perform gating.
center, a, b, theta (optional) : float
Ellipse parameters. `a` is the major axis, `b` is the minor axis.
log : bool, optional
Flag specifying that log10 transformation should be applied to
`data` before gating.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
|
entailment
|
def density2d(data,
channels=[0,1],
bins=1024,
gate_fraction=0.65,
xscale='logicle',
yscale='logicle',
sigma=10.0,
full_output=False):
"""
Gate that preserves events in the region with highest density.
Gate out all events in `data` but those near regions of highest
density for the two specified channels.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str, optional
Two channels on which to perform gating.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for gating:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
gate_fraction : float, optional
Fraction of events to retain after gating. Should be between 0 and
1, inclusive.
xscale : str, optional
Scale of the bins generated for the x axis, either ``linear``,
``log``, or ``logicle``. `xscale` is ignored in `bins` is an array
or a list of arrays.
yscale : str, optional
Scale of the bins generated for the y axis, either ``linear``,
``log``, or ``logicle``. `yscale` is ignored in `bins` is an array
or a list of arrays.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel used by
`scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram
into a density.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
ValueError
If `data` has less than 2 dimensions or less than 2 events.
Exception
If an unrecognized matplotlib Path code is encountered when
attempting to generate contours.
Notes
-----
The algorithm for gating based on density works as follows:
1) Calculate 2D histogram of `data` in the specified channels.
2) Map each event from `data` to its histogram bin (implicitly
gating out any events which exist outside specified `bins`).
3) Use `gate_fraction` to determine number of events to retain
(rounded up). Only events which are not implicitly gated out
are considered.
4) Smooth 2D histogram using a 2D Gaussian filter.
5) Normalize smoothed histogram to obtain valid probability mass
function (PMF).
6) Sort bins by probability.
7) Accumulate events (starting with events belonging to bin with
highest probability ("densest") and proceeding to events
belonging to bins with lowest probability) until at least the
desired number of events is achieved. While the algorithm
attempts to get as close to `gate_fraction` fraction of events
as possible, more events may be retained based on how many
events fall into each histogram bin (since entire bins are
retained at a time, not individual events).
"""
# Extract channels in which to gate
if len(channels) != 2:
raise ValueError('2 channels should be specified')
data_ch = data[:,channels]
if data_ch.ndim == 1:
data_ch = data_ch.reshape((-1,1))
# Check gating fraction
if gate_fraction < 0 or gate_fraction > 1:
raise ValueError('gate fraction should be between 0 and 1, inclusive')
# Check dimensions
if data_ch.ndim < 2:
raise ValueError('data should have at least 2 dimensions')
if data_ch.shape[0] <= 1:
raise ValueError('data should have more than one event')
# Build output namedtuple if necessary
if full_output:
Density2dGateOutput = collections.namedtuple(
'Density2dGateOutput',
['gated_data', 'mask', 'contour'])
# If ``data_ch.hist_bins()`` exists, obtain bin edges from it if
# necessary.
if hasattr(data_ch, 'hist_bins') and \
hasattr(data_ch.hist_bins, '__call__'):
# Check whether `bins` contains information for one or two axes
if hasattr(bins, '__iter__') and len(bins)==2:
# `bins` contains separate information for both axes
# If bins for the X axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[0], '__iter__'):
bins[0] = data_ch.hist_bins(channels=0,
nbins=bins[0],
scale=xscale)
# If bins for the Y axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[1], '__iter__'):
bins[1] = data_ch.hist_bins(channels=1,
nbins=bins[1],
scale=yscale)
else:
# `bins` contains information for one axis, which will be used
# twice.
# If bins is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins, '__iter__'):
bins = [data_ch.hist_bins(channels=0,
nbins=bins,
scale=xscale),
data_ch.hist_bins(channels=1,
nbins=bins,
scale=yscale)]
# Make 2D histogram
H,xe,ye = np.histogram2d(data_ch[:,0], data_ch[:,1], bins=bins)
# Map each event to its histogram bin by sorting events into a 2D array of
# lists which mimics the histogram.
#
# Use np.digitize to calculate the histogram bin index for each event
# given the histogram bin edges. Note that the index returned by
# np.digitize is such that bins[i-1] <= x < bins[i], whereas indexing the
# histogram will result in the following: hist[i,j] = bin corresponding to
# xedges[i] <= x < xedges[i+1] and yedges[i] <= y < yedges[i+1].
# Therefore, we need to subtract 1 from the np.digitize result to be able
# to index into the appropriate bin in the histogram.
event_indices = np.arange(data_ch.shape[0])
x_bin_indices = np.digitize(data_ch[:,0], bins=xe) - 1
y_bin_indices = np.digitize(data_ch[:,1], bins=ye) - 1
# In the current version of numpy, there exists a disparity in how
# np.histogram and np.digitize treat the rightmost bin edge (np.digitize
# is not the strict inverse of np.histogram). Specifically, np.histogram
# treats the rightmost bin interval as fully closed (rightmost bin edge is
# included in rightmost bin), whereas np.digitize treats all bins as
# half-open (you can specify which side is closed and which side is open;
# `right` parameter). The expected behavior for this gating function is to
# mimic np.histogram behavior, so we must reconcile this disparity.
x_bin_indices[data_ch[:,0] == xe[-1]] = len(xe)-2
y_bin_indices[data_ch[:,1] == ye[-1]] = len(ye)-2
# Ignore (gate out) events which exist outside specified bins.
# `np.digitize()-1` will assign events less than `bins` to bin "-1" and
# events greater than `bins` to len(bins)-1.
outlier_mask = (
(x_bin_indices == -1) |
(x_bin_indices == len(xe)-1) |
(y_bin_indices == -1) |
(y_bin_indices == len(ye)-1))
event_indices = event_indices[~outlier_mask]
x_bin_indices = x_bin_indices[~outlier_mask]
y_bin_indices = y_bin_indices[~outlier_mask]
# Create a 2D array of lists mimicking the histogram to accumulate events
# associated with each bin.
filler = np.frompyfunc(lambda x: list(), 1, 1)
H_events = np.empty_like(H, dtype=np.object)
filler(H_events, H_events)
for event_idx, x_bin_idx, y_bin_idx in \
zip(event_indices, x_bin_indices, y_bin_indices):
H_events[x_bin_idx, y_bin_idx].append(event_idx)
# Determine number of events to keep. Only consider events which have not
# been thrown out as outliers.
n = int(np.ceil(gate_fraction*float(len(event_indices))))
# n = 0 edge case (e.g. if gate_fraction = 0.0); incorrectly handled below
if n == 0:
mask = np.zeros(shape=data_ch.shape[0], dtype=bool)
gated_data = data[mask]
if full_output:
return Density2dGateOutput(
gated_data=gated_data, mask=mask, contour=[])
else:
return gated_data
# Smooth 2D histogram
sH = scipy.ndimage.filters.gaussian_filter(
H,
sigma=sigma,
order=0,
mode='constant',
cval=0.0,
truncate=6.0)
# Normalize smoothed histogram to make it a valid probability mass function
D = sH / np.sum(sH)
# Sort bins by density
vD = D.ravel()
vH = H.ravel()
sidx = np.argsort(vD)[::-1]
svH = vH[sidx] # linearized counts array sorted by density
# Find minimum number of accepted bins needed to reach specified number
# of events
csvH = np.cumsum(svH)
Nidx = np.nonzero(csvH >= n)[0][0] # we want to include this index
# Get indices of events to keep
vH_events = H_events.ravel()
accepted_indices = vH_events[sidx[:(Nidx+1)]]
accepted_indices = np.array([item # flatten list of lists
for sublist in accepted_indices
for item in sublist])
accepted_indices = np.sort(accepted_indices)
# Convert list of accepted indices to boolean mask array
mask = np.zeros(shape=data.shape[0], dtype=bool)
mask[accepted_indices] = True
gated_data = data[mask]
if full_output:
# Use scikit-image to find the contour of the gated region
#
# To find the contour of the gated region, values in the 2D probability
# mass function ``D`` are used to trace contours at the level of the
# probability associated with the last accepted bin, ``vD[sidx[Nidx]]``.
# find_contours() specifies contours as collections of row and column
# indices into the density matrix. The row or column index may be
# interpolated (i.e. non-integer) for greater precision.
contours_ij = skimage.measure.find_contours(D, vD[sidx[Nidx]])
# Map contours from indices into density matrix to histogram x and y
# coordinate spaces (assume values in the density matrix are associated
# with histogram bin centers).
xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers
yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers
contours = [np.array([np.interp(contour_ij[:,0],
np.arange(len(xc)),
xc),
np.interp(contour_ij[:,1],
np.arange(len(yc)),
yc)]).T
for contour_ij in contours_ij]
return Density2dGateOutput(
gated_data=gated_data, mask=mask, contour=contours)
else:
return gated_data
|
Gate that preserves events in the region with highest density.
Gate out all events in `data` but those near regions of highest
density for the two specified channels.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str, optional
Two channels on which to perform gating.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for gating:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
gate_fraction : float, optional
Fraction of events to retain after gating. Should be between 0 and
1, inclusive.
xscale : str, optional
Scale of the bins generated for the x axis, either ``linear``,
``log``, or ``logicle``. `xscale` is ignored in `bins` is an array
or a list of arrays.
yscale : str, optional
Scale of the bins generated for the y axis, either ``linear``,
``log``, or ``logicle``. `yscale` is ignored in `bins` is an array
or a list of arrays.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel used by
`scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram
into a density.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
ValueError
If `data` has less than 2 dimensions or less than 2 events.
Exception
If an unrecognized matplotlib Path code is encountered when
attempting to generate contours.
Notes
-----
The algorithm for gating based on density works as follows:
1) Calculate 2D histogram of `data` in the specified channels.
2) Map each event from `data` to its histogram bin (implicitly
gating out any events which exist outside specified `bins`).
3) Use `gate_fraction` to determine number of events to retain
(rounded up). Only events which are not implicitly gated out
are considered.
4) Smooth 2D histogram using a 2D Gaussian filter.
5) Normalize smoothed histogram to obtain valid probability mass
function (PMF).
6) Sort bins by probability.
7) Accumulate events (starting with events belonging to bin with
highest probability ("densest") and proceeding to events
belonging to bins with lowest probability) until at least the
desired number of events is achieved. While the algorithm
attempts to get as close to `gate_fraction` fraction of events
as possible, more events may be retained based on how many
events fall into each histogram bin (since entire bins are
retained at a time, not individual events).
|
entailment
|
def transform(data, channels, transform_fxn, def_channels = None):
"""
Apply some transformation function to flow cytometry data.
This function is a template transformation function, intended to be
used by other specific transformation functions. It performs basic
checks on `channels` and `data`. It then applies `transform_fxn` to the
specified channels. Finally, it rescales ``data.range`` and if
necessary.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, use def_channels.
transform_fxn : function
Function that performs the actual transformation.
def_channels : int, str, list of int, list of str, optional
Default set of channels in which to perform the transformation.
If `def_channels` is None, use all channels.
Returns
-------
data_t : FCSData or numpy array
NxD transformed flow cytometry data.
"""
# Copy data array
data_t = data.copy().astype(np.float64)
# Default
if channels is None:
if def_channels is None:
channels = range(data_t.shape[1])
else:
channels = def_channels
# Convert channels to iterable
if not (hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types)):
channels = [channels]
# Apply transformation
data_t[:,channels] = transform_fxn(data_t[:,channels])
# Apply transformation to ``data.range``
if hasattr(data_t, '_range'):
for channel in channels:
# Transform channel name to index if necessary
channel_idx = data_t._name_to_index(channel)
if data_t._range[channel_idx] is not None:
data_t._range[channel_idx] = \
transform_fxn(data_t._range[channel_idx])
return data_t
|
Apply some transformation function to flow cytometry data.
This function is a template transformation function, intended to be
used by other specific transformation functions. It performs basic
checks on `channels` and `data`. It then applies `transform_fxn` to the
specified channels. Finally, it rescales ``data.range`` and if
necessary.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, use def_channels.
transform_fxn : function
Function that performs the actual transformation.
def_channels : int, str, list of int, list of str, optional
Default set of channels in which to perform the transformation.
If `def_channels` is None, use all channels.
Returns
-------
data_t : FCSData or numpy array
NxD transformed flow cytometry data.
|
entailment
|
def to_rfi(data,
channels=None,
amplification_type=None,
amplifier_gain=None,
resolution=None):
"""
Transform flow cytometry data to Relative Fluorescence Units (RFI).
If ``amplification_type[0]`` is different from zero, data has been
taken using a log amplifier. Therefore, to transform to RFI, the
following operation is applied::
y = a[1]*10^(a[0] * (x/r))
Where ``x`` and ``y`` are the original and transformed data,
respectively; ``a`` is `amplification_type` argument, and ``r`` is
`resolution`. This will transform flow cytometry data taken with a log
amplifier and an ADC of range ``r`` to linear RFIs, such
that it covers ``a[0]`` decades of signal with a minimum value of
``a[1]``.
If ``amplification_type[0]==0``, however, a linear amplifier has been
used and the following operation is applied instead::
y = x/g
Where ``g`` is `amplifier_gain`. This will transform flow cytometry
data taken with a linear amplifier of gain ``g`` back to RFIs.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels.
amplification_type : tuple or list of tuple
The amplification type of the specified channel(s). This should be
reported as a tuple, in which the first element indicates how many
decades the logarithmic amplifier covers, and the second indicates
the linear value that corresponds to a channel value of zero. If
the first element is zero, the amplification type is linear. This
is similar to the $PnE keyword from the FCS standard. If None, take
`amplification_type` from ``data.amplification_type(channel)``.
amplifier_gain : float or list of floats, optional
The linear amplifier gain of the specified channel(s). Only used if
``amplification_type[0]==0`` (linear amplifier). If None,
take `amplifier_gain` from ``data.amplifier_gain(channel)``. If
`data` does not contain ``amplifier_gain()``, use 1.0.
resolution : int, float, or list of int or float, optional
Maximum range, for each specified channel. Only needed if
``amplification_type[0]!=0`` (log amplifier). If None, take
`resolution` from ``len(data.domain(channel))``.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
"""
# Default: all channels
if channels is None:
channels = range(data.shape[1])
if not (hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types)):
# If channels is not an iterable, convert it, along with resolution,
# amplification_type, and amplifier_gain.
channels = [channels]
amplification_type = [amplification_type]
amplifier_gain = [amplifier_gain]
resolution = [resolution]
else:
# If channels is an iterable, check that the other attributes are either
# None, or iterables of the same length.
if amplification_type is None:
# If None, propagate None for all channels
amplification_type = [None]*len(channels)
elif hasattr(amplification_type, '__iter__'):
# If it's a list, it should be the same length as channels
if len(amplification_type) != len(channels):
raise ValueError("channels and amplification_type should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and amplification_type should have the "
"same length")
if amplifier_gain is None:
# If None, propagate None for all channels
amplifier_gain = [None]*len(channels)
elif hasattr(amplifier_gain, '__iter__'):
# If it's a list, it should be the same length as channels
if len(amplifier_gain) != len(channels):
raise ValueError("channels and amplifier_gain should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and amplifier_gain should have the "
"same length")
if resolution is None:
# If None, propagate None for all channels
resolution = [None]*len(channels)
elif hasattr(resolution, '__iter__'):
# If it's a list, it should be the same length as channels
if len(resolution) != len(channels):
raise ValueError("channels and resolution should have "
"the same length")
else:
# If it's not a list or None, raise error
raise ValueError("channels and resolution should have the "
"same length")
# Convert channels to integers
if hasattr(data, '_name_to_index'):
channels = data._name_to_index(channels)
else:
channels = channels
# Copy data array
data_t = data.copy().astype(np.float64)
# Iterate over channels
for channel, r, at, ag in \
zip(channels, resolution, amplification_type, amplifier_gain):
# If amplification type is None, try to obtain from data
if at is None:
if hasattr(data, 'amplification_type'):
at = data.amplification_type(channel)
else:
raise ValueError('amplification_type should be specified')
# Define transformation, depending on at[0]
if at[0]==0:
# Linear amplifier
# If no amplifier gain has been specified, try to obtain from data,
# otherwise assume one
if ag is None:
if hasattr(data, 'amplifier_gain') and \
hasattr(data.amplifier_gain, '__call__'):
ag = data.amplifier_gain(channel)
# If the linear gain has not been specified, it should be
# assumed to be one.
if ag is None:
ag = 1.
else:
ag = 1.
tf = lambda x: x/ag
else:
# Log amplifier
# If no range has been specified, try to obtain from data.
if r is None:
if hasattr(data, 'resolution'):
r = data.resolution(channel)
else:
raise ValueError('range should be specified')
tf = lambda x: at[1] * 10**(at[0]/float(r) * x)
# Apply transformation to event list
data_t[:,channel] = tf(data_t[:,channel])
# Apply transformation to range
if hasattr(data_t, '_range') and data_t._range[channel] is not None:
data_t._range[channel] = [tf(data_t._range[channel][0]),
tf(data_t._range[channel][1])]
return data_t
|
Transform flow cytometry data to Relative Fluorescence Units (RFI).
If ``amplification_type[0]`` is different from zero, data has been
taken using a log amplifier. Therefore, to transform to RFI, the
following operation is applied::
y = a[1]*10^(a[0] * (x/r))
Where ``x`` and ``y`` are the original and transformed data,
respectively; ``a`` is `amplification_type` argument, and ``r`` is
`resolution`. This will transform flow cytometry data taken with a log
amplifier and an ADC of range ``r`` to linear RFIs, such
that it covers ``a[0]`` decades of signal with a minimum value of
``a[1]``.
If ``amplification_type[0]==0``, however, a linear amplifier has been
used and the following operation is applied instead::
y = x/g
Where ``g`` is `amplifier_gain`. This will transform flow cytometry
data taken with a linear amplifier of gain ``g`` back to RFIs.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str, optional
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels.
amplification_type : tuple or list of tuple
The amplification type of the specified channel(s). This should be
reported as a tuple, in which the first element indicates how many
decades the logarithmic amplifier covers, and the second indicates
the linear value that corresponds to a channel value of zero. If
the first element is zero, the amplification type is linear. This
is similar to the $PnE keyword from the FCS standard. If None, take
`amplification_type` from ``data.amplification_type(channel)``.
amplifier_gain : float or list of floats, optional
The linear amplifier gain of the specified channel(s). Only used if
``amplification_type[0]==0`` (linear amplifier). If None,
take `amplifier_gain` from ``data.amplifier_gain(channel)``. If
`data` does not contain ``amplifier_gain()``, use 1.0.
resolution : int, float, or list of int or float, optional
Maximum range, for each specified channel. Only needed if
``amplification_type[0]!=0`` (log amplifier). If None, take
`resolution` from ``len(data.domain(channel))``.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
|
entailment
|
def to_mef(data, channels, sc_list, sc_channels = None):
"""
Transform flow cytometry data using a standard curve function.
This function accepts a list of standard curves (`sc_list`) and a list
of channels to which those standard curves should be applied
(`sc_channels`). `to_mef` automatically checks whether a standard curve
is available for each channel specified in `channels`, and throws an
error otherwise.
This function is intended to be reduced to the following signature::
to_mef_reduced(data, channels)
by using ``functools.partial`` once a list of standard curves and their
respective channels is available.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels specified on
`sc_channels`.
sc_list : list of functions
Functions implementing the standard curves for each channel in
`sc_channels`.
sc_channels : list of int or list of str, optional
List of channels corresponding to each function in `sc_list`. If
None, use all channels in `data`.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
Raises
------
ValueError
If any channel specified in `channels` is not in `sc_channels`.
"""
# Default sc_channels
if sc_channels is None:
if data.ndim == 1:
sc_channels = range(data.shape[0])
else:
sc_channels = range(data.shape[1])
# Check that sc_channels and sc_list have the same length
if len(sc_channels) != len(sc_list):
raise ValueError("sc_channels and sc_list should have the same length")
# Convert sc_channels to indices
if hasattr(data, '_name_to_index'):
sc_channels = data._name_to_index(sc_channels)
# Default channels
if channels is None:
channels = sc_channels
# Convert channels to iterable
if not (hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types)):
channels = [channels]
# Convert channels to index
if hasattr(data, '_name_to_index'):
channels_ind = data._name_to_index(channels)
else:
channels_ind = channels
# Check if every channel is in sc_channels
for chi, chs in zip(channels_ind, channels):
if chi not in sc_channels:
raise ValueError("no standard curve for channel {}".format(chs))
# Copy data array
data_t = data.copy().astype(np.float64)
# Iterate over channels
for chi, sc in zip(sc_channels, sc_list):
if chi not in channels_ind:
continue
# Apply transformation
data_t[:,chi] = sc(data_t[:,chi])
# Apply transformation to range
if hasattr(data_t, '_range') and data_t._range[chi] is not None:
data_t._range[chi] = [sc(data_t._range[chi][0]),
sc(data_t._range[chi][1])]
return data_t
|
Transform flow cytometry data using a standard curve function.
This function accepts a list of standard curves (`sc_list`) and a list
of channels to which those standard curves should be applied
(`sc_channels`). `to_mef` automatically checks whether a standard curve
is available for each channel specified in `channels`, and throws an
error otherwise.
This function is intended to be reduced to the following signature::
to_mef_reduced(data, channels)
by using ``functools.partial`` once a list of standard curves and their
respective channels is available.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels specified on
`sc_channels`.
sc_list : list of functions
Functions implementing the standard curves for each channel in
`sc_channels`.
sc_channels : list of int or list of str, optional
List of channels corresponding to each function in `sc_list`. If
None, use all channels in `data`.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
Raises
------
ValueError
If any channel specified in `channels` is not in `sc_channels`.
|
entailment
|
def hist1d(data_list,
channel=0,
xscale='logicle',
bins=256,
histtype='stepfilled',
normed_area=False,
normed_height=False,
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
title=None,
legend=False,
legend_loc='best',
legend_fontsize='medium',
legend_labels=None,
facecolor=None,
edgecolor=None,
savefig=None,
**kwargs):
"""
Plot one 1D histogram from one or more flow cytometry data sets.
Parameters
----------
data_list : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data to plot.
channel : int or str, optional
Channel from where to take the events to plot. If ndim == 1,
channel is ignored. String channel specifications are only
supported for data types which support string-based indexing
(e.g. FCSData).
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
bins : int or array_like, optional
If `bins` is an integer, it specifies the number of bins to use.
If `bins` is an array, it specifies the bin edges to use. If `bins`
is None or an integer, `hist1d` will attempt to use
``data.hist_bins`` to generate the bins automatically.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional
Histogram type. Directly passed to ``plt.hist``.
normed_area : bool, optional
Flag indicating whether to normalize the histogram such that the
area under the curve is equal to one. The resulting plot is
equivalent to a probability density function.
normed_height : bool, optional
Flag indicating whether to normalize the histogram such that the
sum of all bins' heights is equal to one. The resulting plot is
equivalent to a probability mass function. `normed_height` is
ignored if `normed_area` is True.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None and ``normed_area==True``, use
'Probability'. If None, ``normed_area==False``, and
``normed_height==True``, use 'Counts (normalized)'. If None,
``normed_area==False``, and ``normed_height==False``, use 'Counts'.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis.
title : str, optional
Plot title.
legend : bool, optional
Flag specifying whether to include a legend. If `legend` is True,
the legend labels will be taken from `legend_labels` if present,
else they will be taken from ``str(data_list[i])``.
legend_loc : str, optional
Location of the legend.
legend_fontsize : int or str, optional
Font size for the legend.
legend_labels : list, optional
Labels to use for the legend.
facecolor : matplotlib color or list of matplotlib colors, optional
The histogram's facecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'stepfilled'``, the facecolor will be taken from the
module-level variable `cmap_default`.
edgecolor : matplotlib color or list of matplotlib colors, optional
The histogram's edgecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'step'``, the edgecolor will be taken from the
module-level variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``hist``.
Notes
-----
`hist1d` calls matplotlib's ``hist`` function for each object in
`data_list`. `hist_type`, the type of histogram to draw, is directly
passed to ``plt.hist``. Additional keyword arguments provided to
`hist1d` are passed directly to ``plt.hist``.
If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with
``density`` (or ``normed``, if matplotlib's version is older than
2.2.0) set to True. There is a bug in matplotlib 2.1.0 that
produces an incorrect plot in these conditions. We do not recommend
using matplotlib 2.1.0 if `normed_area` is expected to be used.
"""
# Using `normed_area` with matplotlib 2.1.0 causes an incorrect plot to be
# produced. Raise warning in these conditions.
if normed_area and packaging.version.parse(matplotlib.__version__) \
== packaging.version.parse('2.1.0'):
warnings.warn("bug in matplotlib 2.1.0 will result in an incorrect plot"
" when normed_area is set to True")
# Convert to list if necessary
if not isinstance(data_list, list):
data_list = [data_list]
# Default colors
if histtype == 'stepfilled':
if facecolor is None:
facecolor = [cmap_default(i)
for i in np.linspace(0, 1, len(data_list))]
if edgecolor is None:
edgecolor = ['black']*len(data_list)
elif histtype == 'step':
if edgecolor is None:
edgecolor = [cmap_default(i)
for i in np.linspace(0, 1, len(data_list))]
# Convert colors to lists if necessary
if not isinstance(edgecolor, list):
edgecolor = [edgecolor]*len(data_list)
if not isinstance(facecolor, list):
facecolor = [facecolor]*len(data_list)
# Collect scale parameters that depend on all elements in data_list
xscale_kwargs = {}
if xscale=='logicle':
t = _LogicleTransform(data=data_list, channel=channel)
xscale_kwargs['T'] = t.T
xscale_kwargs['M'] = t.M
xscale_kwargs['W'] = t.W
# Iterate through data_list
for i, data in enumerate(data_list):
# Extract channel
if data.ndim > 1:
y = data[:, channel]
else:
y = data
# If ``data_plot.hist_bins()`` exists, obtain bin edges from it if
# necessary. If it does not exist, do not modify ``bins``.
if hasattr(y, 'hist_bins') and hasattr(y.hist_bins, '__call__'):
# If bins is None or an integer, get bin edges from
# ``data_plot.hist_bins()``.
if bins is None or isinstance(bins, int):
bins = y.hist_bins(channels=0,
nbins=bins,
scale=xscale,
**xscale_kwargs)
# Decide whether to normalize
if normed_height and not normed_area:
weights = np.ones_like(y)/float(len(y))
else:
weights = None
# Actually plot
if packaging.version.parse(matplotlib.__version__) \
>= packaging.version.parse('2.2'):
if bins is not None:
n, edges, patches = plt.hist(y,
bins,
weights=weights,
density=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
n, edges, patches = plt.hist(y,
weights=weights,
density=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
if bins is not None:
n, edges, patches = plt.hist(y,
bins,
weights=weights,
normed=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
n, edges, patches = plt.hist(y,
weights=weights,
normed=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
# Set scale of x axis
if xscale=='logicle':
plt.gca().set_xscale(xscale, data=data_list, channel=channel)
else:
plt.gca().set_xscale(xscale)
###
# Final configuration
###
# x and y labels
if xlabel is not None:
# Highest priority is user-provided label
plt.xlabel(xlabel)
elif hasattr(y, 'channels'):
# Attempt to use channel name
plt.xlabel(y.channels[0])
if ylabel is not None:
# Highest priority is user-provided label
plt.ylabel(ylabel)
elif normed_area:
plt.ylabel('Probability')
elif normed_height:
plt.ylabel('Counts (normalized)')
else:
# Default is "Counts"
plt.ylabel('Counts')
# x and y limits
if xlim is not None:
# Highest priority is user-provided limits
plt.xlim(xlim)
elif bins is not None:
# Use bins if specified
plt.xlim((edges[0], edges[-1]))
if ylim is not None:
plt.ylim(ylim)
# Title
if title is not None:
plt.title(title)
# Legend
if legend:
if legend_labels is None:
legend_labels = [str(data) for data in data_list]
plt.legend(legend_labels,
loc=legend_loc,
prop={'size': legend_fontsize})
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Plot one 1D histogram from one or more flow cytometry data sets.
Parameters
----------
data_list : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data to plot.
channel : int or str, optional
Channel from where to take the events to plot. If ndim == 1,
channel is ignored. String channel specifications are only
supported for data types which support string-based indexing
(e.g. FCSData).
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
bins : int or array_like, optional
If `bins` is an integer, it specifies the number of bins to use.
If `bins` is an array, it specifies the bin edges to use. If `bins`
is None or an integer, `hist1d` will attempt to use
``data.hist_bins`` to generate the bins automatically.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional
Histogram type. Directly passed to ``plt.hist``.
normed_area : bool, optional
Flag indicating whether to normalize the histogram such that the
area under the curve is equal to one. The resulting plot is
equivalent to a probability density function.
normed_height : bool, optional
Flag indicating whether to normalize the histogram such that the
sum of all bins' heights is equal to one. The resulting plot is
equivalent to a probability mass function. `normed_height` is
ignored if `normed_area` is True.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None and ``normed_area==True``, use
'Probability'. If None, ``normed_area==False``, and
``normed_height==True``, use 'Counts (normalized)'. If None,
``normed_area==False``, and ``normed_height==False``, use 'Counts'.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis.
title : str, optional
Plot title.
legend : bool, optional
Flag specifying whether to include a legend. If `legend` is True,
the legend labels will be taken from `legend_labels` if present,
else they will be taken from ``str(data_list[i])``.
legend_loc : str, optional
Location of the legend.
legend_fontsize : int or str, optional
Font size for the legend.
legend_labels : list, optional
Labels to use for the legend.
facecolor : matplotlib color or list of matplotlib colors, optional
The histogram's facecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'stepfilled'``, the facecolor will be taken from the
module-level variable `cmap_default`.
edgecolor : matplotlib color or list of matplotlib colors, optional
The histogram's edgecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'step'``, the edgecolor will be taken from the
module-level variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``hist``.
Notes
-----
`hist1d` calls matplotlib's ``hist`` function for each object in
`data_list`. `hist_type`, the type of histogram to draw, is directly
passed to ``plt.hist``. Additional keyword arguments provided to
`hist1d` are passed directly to ``plt.hist``.
If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with
``density`` (or ``normed``, if matplotlib's version is older than
2.2.0) set to True. There is a bug in matplotlib 2.1.0 that
produces an incorrect plot in these conditions. We do not recommend
using matplotlib 2.1.0 if `normed_area` is expected to be used.
|
entailment
|
def density2d(data,
channels=[0,1],
bins=1024,
mode='mesh',
normed=False,
smooth=True,
sigma=10.0,
colorbar=False,
xscale='logicle',
yscale='logicle',
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
title=None,
savefig=None,
**kwargs):
"""
Plot a 2D density plot from two channels of a flow cytometry data set.
`density2d` has two plotting modes which are selected using the `mode`
argument. With ``mode=='mesh'``, this function plots the data as a true
2D histogram, in which a plane is divided into bins and the color of
each bin is directly related to the number of elements therein. With
``mode=='scatter'``, this function also calculates a 2D histogram,
but it plots a 2D scatter plot in which each dot corresponds to a bin,
colored according to the number elements therein. The most important
difference is that the ``scatter`` mode does not color regions
corresponding to empty bins. This allows for easy identification of
regions with low number of events. For both modes, the calculated
histogram can be smoothed using a Gaussian kernel by specifying
``smooth=True``. The width of the kernel is, in this case, given by
`sigma`.
Parameters
----------
data : FCSData or numpy array
Flow cytometry data to plot.
channels : list of int, list of str, optional
Two channels to use for the plot.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for plotting:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
mode : {'mesh', 'scatter'}, str, optional
Plotting mode. 'mesh' produces a 2D-histogram whereas 'scatter'
produces a scatterplot colored by histogram bin value.
normed : bool, optional
Flag indicating whether to plot a normed histogram (probability
mass function instead of a counts-based histogram).
smooth : bool, optional
Flag indicating whether to apply Gaussian smoothing to the
histogram.
colorbar : bool, optional
Flag indicating whether to add a colorbar to the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
sigma : float, optional
The sigma parameter for the Gaussian kernel to use when smoothing.
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from `data`.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from `data`.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
title : str, optional
Plot title.
kwargs : dict, optional
Additional parameters passed directly to the underlying matplotlib
functions: ``plt.scatter`` if ``mode==scatter``, and
``plt.pcolormesh`` if ``mode==mesh``.
"""
# Extract channels to plot
if len(channels) != 2:
raise ValueError('two channels need to be specified')
data_plot = data[:, channels]
# If ``data_plot.hist_bins()`` exists, obtain bin edges from it if
# necessary.
if hasattr(data_plot, 'hist_bins') and \
hasattr(data_plot.hist_bins, '__call__'):
# Check whether `bins` contains information for one or two axes
if hasattr(bins, '__iter__') and len(bins)==2:
# `bins` contains separate information for both axes
# If bins for the X axis is not an iterable, get bin edges from
# ``data_plot.hist_bins()``.
if not hasattr(bins[0], '__iter__'):
bins[0] = data_plot.hist_bins(channels=0,
nbins=bins[0],
scale=xscale)
# If bins for the Y axis is not an iterable, get bin edges from
# ``data_plot.hist_bins()``.
if not hasattr(bins[1], '__iter__'):
bins[1] = data_plot.hist_bins(channels=1,
nbins=bins[1],
scale=yscale)
else:
# `bins` contains information for one axis, which will be used
# twice.
# If bins is not an iterable, get bin edges from
# ``data_plot.hist_bins()``.
if not hasattr(bins, '__iter__'):
bins = [data_plot.hist_bins(channels=0,
nbins=bins,
scale=xscale),
data_plot.hist_bins(channels=1,
nbins=bins,
scale=yscale)]
else:
# Check if ``bins`` is None and raise error
if bins is None:
raise ValueError("bins should be specified")
# If colormap is not specified, use the default of this module
if 'cmap' not in kwargs:
kwargs['cmap'] = cmap_default
# Calculate histogram
H,xe,ye = np.histogram2d(data_plot[:,0], data_plot[:,1], bins=bins)
# Smooth
if smooth:
sH = scipy.ndimage.filters.gaussian_filter(
H,
sigma=sigma,
order=0,
mode='constant',
cval=0.0)
else:
sH = None
# Normalize
if normed:
H = H / np.sum(H)
sH = sH / np.sum(sH) if sH is not None else None
###
# Plot
###
# numpy histograms are organized such that the 1st dimension (eg. FSC) =
# rows (1st index) and the 2nd dimension (eg. SSC) = columns (2nd index).
# Visualized as is, this results in x-axis = SSC and y-axis = FSC, which
# is not what we're used to. Transpose the histogram array to fix the
# axes.
H = H.T
sH = sH.T if sH is not None else None
if mode == 'scatter':
Hind = np.ravel(H)
xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers
yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers
xv, yv = np.meshgrid(xc, yc)
x = np.ravel(xv)[Hind != 0]
y = np.ravel(yv)[Hind != 0]
z = np.ravel(H if sH is None else sH)[Hind != 0]
plt.scatter(x, y, s=1.5, edgecolor='none', c=z, **kwargs)
elif mode == 'mesh':
plt.pcolormesh(xe, ye, H if sH is None else sH, **kwargs)
else:
raise ValueError("mode {} not recognized".format(mode))
if colorbar:
cbar = plt.colorbar()
if normed:
cbar.ax.set_ylabel('Probability')
else:
cbar.ax.set_ylabel('Counts')
# Set scale of axes
if xscale=='logicle':
plt.gca().set_xscale(xscale, data=data_plot, channel=0)
else:
plt.gca().set_xscale(xscale)
if yscale=='logicle':
plt.gca().set_yscale(yscale, data=data_plot, channel=1)
else:
plt.gca().set_yscale(yscale)
# x and y limits
if xlim is not None:
# Highest priority is user-provided limits
plt.xlim(xlim)
else:
# Use histogram edges
plt.xlim((xe[0], xe[-1]))
if ylim is not None:
# Highest priority is user-provided limits
plt.ylim(ylim)
else:
# Use histogram edges
plt.ylim((ye[0], ye[-1]))
# x and y labels
if xlabel is not None:
# Highest priority is user-provided label
plt.xlabel(xlabel)
elif hasattr(data_plot, 'channels'):
# Attempt to use channel name
plt.xlabel(data_plot.channels[0])
if ylabel is not None:
# Highest priority is user-provided label
plt.ylabel(ylabel)
elif hasattr(data_plot, 'channels'):
# Attempt to use channel name
plt.ylabel(data_plot.channels[1])
# title
if title is not None:
plt.title(title)
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Plot a 2D density plot from two channels of a flow cytometry data set.
`density2d` has two plotting modes which are selected using the `mode`
argument. With ``mode=='mesh'``, this function plots the data as a true
2D histogram, in which a plane is divided into bins and the color of
each bin is directly related to the number of elements therein. With
``mode=='scatter'``, this function also calculates a 2D histogram,
but it plots a 2D scatter plot in which each dot corresponds to a bin,
colored according to the number elements therein. The most important
difference is that the ``scatter`` mode does not color regions
corresponding to empty bins. This allows for easy identification of
regions with low number of events. For both modes, the calculated
histogram can be smoothed using a Gaussian kernel by specifying
``smooth=True``. The width of the kernel is, in this case, given by
`sigma`.
Parameters
----------
data : FCSData or numpy array
Flow cytometry data to plot.
channels : list of int, list of str, optional
Two channels to use for the plot.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for plotting:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
mode : {'mesh', 'scatter'}, str, optional
Plotting mode. 'mesh' produces a 2D-histogram whereas 'scatter'
produces a scatterplot colored by histogram bin value.
normed : bool, optional
Flag indicating whether to plot a normed histogram (probability
mass function instead of a counts-based histogram).
smooth : bool, optional
Flag indicating whether to apply Gaussian smoothing to the
histogram.
colorbar : bool, optional
Flag indicating whether to add a colorbar to the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
sigma : float, optional
The sigma parameter for the Gaussian kernel to use when smoothing.
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from `data`.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from `data`.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
title : str, optional
Plot title.
kwargs : dict, optional
Additional parameters passed directly to the underlying matplotlib
functions: ``plt.scatter`` if ``mode==scatter``, and
``plt.pcolormesh`` if ``mode==mesh``.
|
entailment
|
def scatter2d(data_list,
channels=[0,1],
xscale='logicle',
yscale='logicle',
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
title=None,
color=None,
savefig=None,
**kwargs):
"""
Plot 2D scatter plot from one or more FCSData objects or numpy arrays.
Parameters
----------
data_list : array or FCSData or list of array or list of FCSData
Flow cytometry data to plot.
channels : list of int, list of str
Two channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
title : str, optional
Plot title.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter2d` calls matplotlib's ``scatter`` function for each object in
data_list. Additional keyword arguments provided to `scatter2d` are
passed directly to ``plt.scatter``.
"""
# Check appropriate number of channels
if len(channels) != 2:
raise ValueError('two channels need to be specified')
# Convert to list if necessary
if not isinstance(data_list, list):
data_list = [data_list]
# Default colors
if color is None:
color = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))]
# Convert color to list, if necessary
if not isinstance(color, list):
color = [color]*len(data_list)
# Iterate through data_list
for i, data in enumerate(data_list):
# Get channels to plot
data_plot = data[:, channels]
# Make scatter plot
plt.scatter(data_plot[:,0],
data_plot[:,1],
s=5,
alpha=0.25,
color=color[i],
**kwargs)
# Set labels if specified, else try to extract channel names
if xlabel is not None:
plt.xlabel(xlabel)
elif hasattr(data_plot, 'channels'):
plt.xlabel(data_plot.channels[0])
if ylabel is not None:
plt.ylabel(ylabel)
elif hasattr(data_plot, 'channels'):
plt.ylabel(data_plot.channels[1])
# Set scale of axes
if xscale=='logicle':
plt.gca().set_xscale(xscale, data=data_list, channel=channels[0])
else:
plt.gca().set_xscale(xscale)
if yscale=='logicle':
plt.gca().set_yscale(yscale, data=data_list, channel=channels[1])
else:
plt.gca().set_yscale(yscale)
# Set plot limits if specified, else extract range from data_list.
# ``.hist_bins`` with one bin works better for visualization that
# ``.range``, because it deals with two issues. First, it automatically
# deals with range values that are outside the domain of the current scaling
# (e.g. when the lower range value is zero and the scaling is logarithmic).
# Second, it takes into account events that are outside the limits specified
# by .range (e.g. negative events will be shown with logicle scaling, even
# when the lower range is zero).
if xlim is None:
xlim = [np.inf, -np.inf]
for data in data_list:
if hasattr(data, 'hist_bins') and \
hasattr(data.hist_bins, '__call__'):
xlim_data = data.hist_bins(channels=channels[0],
nbins=1,
scale=xscale)
xlim[0] = xlim_data[0] if xlim_data[0] < xlim[0] else xlim[0]
xlim[1] = xlim_data[1] if xlim_data[1] > xlim[1] else xlim[1]
plt.xlim(xlim)
if ylim is None:
ylim = [np.inf, -np.inf]
for data in data_list:
if hasattr(data, 'hist_bins') and \
hasattr(data.hist_bins, '__call__'):
ylim_data = data.hist_bins(channels=channels[1],
nbins=1,
scale=yscale)
ylim[0] = ylim_data[0] if ylim_data[0] < ylim[0] else ylim[0]
ylim[1] = ylim_data[1] if ylim_data[1] > ylim[1] else ylim[1]
plt.ylim(ylim)
# Title
if title is not None:
plt.title(title)
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Plot 2D scatter plot from one or more FCSData objects or numpy arrays.
Parameters
----------
data_list : array or FCSData or list of array or list of FCSData
Flow cytometry data to plot.
channels : list of int, list of str
Two channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
title : str, optional
Plot title.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter2d` calls matplotlib's ``scatter`` function for each object in
data_list. Additional keyword arguments provided to `scatter2d` are
passed directly to ``plt.scatter``.
|
entailment
|
def scatter3d(data_list,
channels=[0,1,2],
xscale='logicle',
yscale='logicle',
zscale='logicle',
xlabel=None,
ylabel=None,
zlabel=None,
xlim=None,
ylim=None,
zlim=None,
title=None,
color=None,
savefig=None,
**kwargs):
"""
Plot 3D scatter plot from one or more FCSData objects or numpy arrays.
Parameters
----------
data_list : array or FCSData or list of array or list of FCSData
Flow cytometry data to plot.
channels : list of int, list of str
Three channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
zscale : str, optional
Scale of the z axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
zlabel : str, optional
Label to use on the z axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
zlim : tuple, optional
Limits for the z axis. If None, attempts to extract limits from the
range of the last data object.
title : str, optional
Plot title.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter3d` uses matplotlib's ``scatter`` with a 3D projection.
Additional keyword arguments provided to `scatter3d` are passed
directly to ``scatter``.
"""
# Check appropriate number of channels
if len(channels) != 3:
raise ValueError('three channels need to be specified')
# Convert to list if necessary
if not isinstance(data_list, list):
data_list = [data_list]
# Default colors
if color is None:
color = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))]
# Convert color to list, if necessary
if not isinstance(color, list):
color = [color]*len(data_list)
# Get transformation functions for each axis
# Explicit rescaling is required for non-linear scales because mplot3d does
# not natively support anything but linear scale.
if xscale == 'linear':
xscale_transform = lambda x: x
elif xscale == 'log':
xscale_transform = np.log10
elif xscale == 'logicle':
t = _LogicleTransform(data=data_list, channel=channels[0])
it = _InterpolatedInverseTransform(t, 0, t.M)
xscale_transform = it.transform_non_affine
else:
raise ValueError('scale {} not supported'.format(xscale))
if yscale == 'linear':
yscale_transform = lambda x: x
elif yscale == 'log':
yscale_transform = np.log10
elif yscale == 'logicle':
t = _LogicleTransform(data=data_list, channel=channels[1])
it = _InterpolatedInverseTransform(t, 0, t.M)
yscale_transform = it.transform_non_affine
else:
raise ValueError('scale {} not supported'.format(yscale))
if zscale == 'linear':
zscale_transform = lambda x: x
elif zscale == 'log':
zscale_transform = np.log10
elif zscale == 'logicle':
t = _LogicleTransform(data=data_list, channel=channels[2])
it = _InterpolatedInverseTransform(t, 0, t.M)
zscale_transform = it.transform_non_affine
else:
raise ValueError('scale {} not supported'.format(zscale))
# Make 3d axis if necessary
ax_3d = plt.gca(projection='3d')
# Iterate through data_list
for i, data in enumerate(data_list):
# Get channels to plot
data_plot = data[:, channels]
# Make scatter plot
ax_3d.scatter(xscale_transform(data_plot[:, 0]),
yscale_transform(data_plot[:, 1]),
zscale_transform(data_plot[:, 2]),
marker='o',
alpha=0.1,
color=color[i],
**kwargs)
# Remove tick labels
ax_3d.xaxis.set_ticklabels([])
ax_3d.yaxis.set_ticklabels([])
ax_3d.zaxis.set_ticklabels([])
# Set labels if specified, else try to extract channel names
if xlabel is not None:
ax_3d.set_xlabel(xlabel)
elif hasattr(data_plot, 'channels'):
ax_3d.set_xlabel(data_plot.channels[0])
if ylabel is not None:
ax_3d.set_ylabel(ylabel)
elif hasattr(data_plot, 'channels'):
ax_3d.set_ylabel(data_plot.channels[1])
if zlabel is not None:
ax_3d.set_zlabel(zlabel)
elif hasattr(data_plot, 'channels'):
ax_3d.set_zlabel(data_plot.channels[2])
# Set plot limits if specified, else extract range from data_plot
# ``.hist_bins`` with one bin works better for visualization that
# ``.range``, because it deals with two issues. First, it automatically
# deals with range values that are outside the domain of the current scaling
# (e.g. when the lower range value is zero and the scaling is logarithmic).
# Second, it takes into account events that are outside the limits specified
# by .range (e.g. negative events will be shown with logicle scaling, even
# when the lower range is zero).
if xlim is None:
xlim = np.array([np.inf, -np.inf])
for data in data_list:
if hasattr(data, 'hist_bins') and \
hasattr(data.hist_bins, '__call__'):
xlim_data = data.hist_bins(channels=channels[0],
nbins=1,
scale=xscale)
xlim[0] = xlim_data[0] if xlim_data[0] < xlim[0] else xlim[0]
xlim[1] = xlim_data[1] if xlim_data[1] > xlim[1] else xlim[1]
xlim = xscale_transform(xlim)
ax_3d.set_xlim(xlim)
if ylim is None:
ylim = np.array([np.inf, -np.inf])
for data in data_list:
if hasattr(data, 'hist_bins') and \
hasattr(data.hist_bins, '__call__'):
ylim_data = data.hist_bins(channels=channels[1],
nbins=1,
scale=yscale)
ylim[0] = ylim_data[0] if ylim_data[0] < ylim[0] else ylim[0]
ylim[1] = ylim_data[1] if ylim_data[1] > ylim[1] else ylim[1]
ylim = yscale_transform(ylim)
ax_3d.set_ylim(ylim)
if zlim is None:
zlim = np.array([np.inf, -np.inf])
for data in data_list:
if hasattr(data, 'hist_bins') and \
hasattr(data.hist_bins, '__call__'):
zlim_data = data.hist_bins(channels=channels[2],
nbins=1,
scale=zscale)
zlim[0] = zlim_data[0] if zlim_data[0] < zlim[0] else zlim[0]
zlim[1] = zlim_data[1] if zlim_data[1] > zlim[1] else zlim[1]
zlim = zscale_transform(zlim)
ax_3d.set_zlim(zlim)
# Title
if title is not None:
plt.title(title)
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Plot 3D scatter plot from one or more FCSData objects or numpy arrays.
Parameters
----------
data_list : array or FCSData or list of array or list of FCSData
Flow cytometry data to plot.
channels : list of int, list of str
Three channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
zscale : str, optional
Scale of the z axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
zlabel : str, optional
Label to use on the z axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
zlim : tuple, optional
Limits for the z axis. If None, attempts to extract limits from the
range of the last data object.
title : str, optional
Plot title.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter3d` uses matplotlib's ``scatter`` with a 3D projection.
Additional keyword arguments provided to `scatter3d` are passed
directly to ``scatter``.
|
entailment
|
def density_and_hist(data,
gated_data=None,
gate_contour=None,
density_channels=None,
density_params={},
hist_channels=None,
hist_params={},
figsize=None,
savefig=None):
"""
Make a combined density/histogram plot of a FCSData object.
This function calls `hist1d` and `density2d` to plot a density diagram
and a number of histograms in different subplots of the same plot using
one single function call. Setting `density_channels` to None will not
produce a density diagram, and setting `hist_channels` to None will not
produce any histograms. Setting both to None will raise an error.
Additional parameters can be provided to `density2d` and `hist1d` by
using `density_params` and `hist_params`.
If `gated_data` is provided, this function will plot the histograms
corresponding to `gated_data` on top of `data`'s histograms, with some
transparency on `data`. In addition, a legend will be added with the
labels 'Ungated' and 'Gated'. If `gate_contour` is provided and it
contains a valid list of 2D curves, these will be plotted on top of the
density plot.
Parameters
----------
data : FCSData object
Flow cytometry data object to plot.
gated_data : FCSData object, optional
Flow cytometry data object. If `gated_data` is specified, the
histograms of `data` are plotted with an alpha value of 0.5, and
the histograms of `gated_data` are plotted on top of those with
an alpha value of 1.0.
gate_contour : list, optional
List of Nx2 curves, representing a gate contour to be plotted in
the density diagram.
density_channels : list
Two channels to use for the density plot. If `density_channels` is
None, do not plot a density plot.
density_params : dict, optional
Parameters to pass to `density2d`.
hist_channels : list
Channels to use for each histogram. If `hist_channels` is None,
do not plot histograms.
hist_params : list, optional
List of dictionaries with the parameters to pass to each call of
`hist1d`.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
figsize : tuple, optional
Figure size. If None, calculate a default based on the number of
subplots.
Raises
------
ValueError
If both `density_channels` and `hist_channels` are None.
"""
# Check number of plots
if density_channels is None and hist_channels is None:
raise ValueError("density_channels and hist_channels cannot be both "
"None")
# Change hist_channels to iterable if necessary
if not hasattr(hist_channels, "__iter__"):
hist_channels = [hist_channels]
if isinstance(hist_params, dict):
hist_params = [hist_params]*len(hist_channels)
plot_density = not(density_channels is None)
n_plots = plot_density + len(hist_channels)
# Calculate plot size if necessary
if figsize is None:
height = 0.315 + 2.935*n_plots
figsize = (6, height)
# Create plot
plt.figure(figsize=figsize)
# Density plot
if plot_density:
plt.subplot(n_plots, 1, 1)
# Plot density diagram
density2d(data, channels=density_channels, **density_params)
# Plot gate contour
if gate_contour is not None:
for g in gate_contour:
plt.plot(g[:,0], g[:,1], color='k', linewidth=1.25)
# Add title
if 'title' not in density_params:
if gated_data is not None:
ret = gated_data.shape[0] * 100. / data.shape[0]
title = "{} ({:.1f}% retained)".format(str(data), ret)
else:
title = str(data)
plt.title(title)
# Colors
n_colors = n_plots - 1
colors = [cmap_default(i) for i in np.linspace(0, 1, n_colors)]
# Histogram
for i, hist_channel in enumerate(hist_channels):
# Define subplot
plt.subplot(n_plots, 1, plot_density + i + 1)
# Default colors
hist_params_i = hist_params[i].copy()
if 'facecolor' not in hist_params_i:
hist_params_i['facecolor'] = colors[i]
# Plots
if gated_data is not None:
hist1d(data,
channel=hist_channel,
alpha=0.5,
**hist_params_i)
hist1d(gated_data,
channel=hist_channel,
alpha=1.0,
**hist_params_i)
plt.legend(['Ungated', 'Gated'], loc='best', fontsize='medium')
else:
hist1d(data, channel=hist_channel, **hist_params_i)
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Make a combined density/histogram plot of a FCSData object.
This function calls `hist1d` and `density2d` to plot a density diagram
and a number of histograms in different subplots of the same plot using
one single function call. Setting `density_channels` to None will not
produce a density diagram, and setting `hist_channels` to None will not
produce any histograms. Setting both to None will raise an error.
Additional parameters can be provided to `density2d` and `hist1d` by
using `density_params` and `hist_params`.
If `gated_data` is provided, this function will plot the histograms
corresponding to `gated_data` on top of `data`'s histograms, with some
transparency on `data`. In addition, a legend will be added with the
labels 'Ungated' and 'Gated'. If `gate_contour` is provided and it
contains a valid list of 2D curves, these will be plotted on top of the
density plot.
Parameters
----------
data : FCSData object
Flow cytometry data object to plot.
gated_data : FCSData object, optional
Flow cytometry data object. If `gated_data` is specified, the
histograms of `data` are plotted with an alpha value of 0.5, and
the histograms of `gated_data` are plotted on top of those with
an alpha value of 1.0.
gate_contour : list, optional
List of Nx2 curves, representing a gate contour to be plotted in
the density diagram.
density_channels : list
Two channels to use for the density plot. If `density_channels` is
None, do not plot a density plot.
density_params : dict, optional
Parameters to pass to `density2d`.
hist_channels : list
Channels to use for each histogram. If `hist_channels` is None,
do not plot histograms.
hist_params : list, optional
List of dictionaries with the parameters to pass to each call of
`hist1d`.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
figsize : tuple, optional
Figure size. If None, calculate a default based on the number of
subplots.
Raises
------
ValueError
If both `density_channels` and `hist_channels` are None.
|
entailment
|
def scatter3d_and_projections(data_list,
channels=[0,1,2],
xscale='logicle',
yscale='logicle',
zscale='logicle',
xlabel=None,
ylabel=None,
zlabel=None,
xlim=None,
ylim=None,
zlim=None,
color=None,
figsize=None,
savefig=None,
**kwargs):
"""
Plot a 3D scatter plot and 2D projections from FCSData objects.
`scatter3d_and_projections` creates a 3D scatter plot and three 2D
projected scatter plots in four different axes for each FCSData object
in `data_list`, in the same figure.
Parameters
----------
data_list : FCSData object, or list of FCSData objects
Flow cytometry data to plot.
channels : list of int, list of str
Three channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
zscale : str, optional
Scale of the z axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
zlabel : str, optional
Label to use on the z axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
zlim : tuple, optional
Limits for the z axis. If None, attempts to extract limits from the
range of the last data object.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
figsize : tuple, optional
Figure size. If None, use matplotlib's default.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter3d_and_projections` uses matplotlib's ``scatter``, with the 3D
scatter plot using a 3D projection. Additional keyword arguments
provided to `scatter3d_and_projections` are passed directly to
``scatter``.
"""
# Check appropriate number of channels
if len(channels) != 3:
raise ValueError('three channels need to be specified')
# Create figure
plt.figure(figsize=figsize)
# Axis 1: channel 0 vs channel 2
plt.subplot(221)
scatter2d(data_list,
channels=[channels[0], channels[2]],
xscale=xscale,
yscale=zscale,
xlabel=xlabel,
ylabel=zlabel,
xlim=xlim,
ylim=zlim,
color=color,
**kwargs)
# Axis 2: 3d plot
ax_3d = plt.gcf().add_subplot(222, projection='3d')
scatter3d(data_list,
channels=channels,
xscale=xscale,
yscale=yscale,
zscale=zscale,
xlabel=xlabel,
ylabel=ylabel,
zlabel=zlabel,
xlim=xlim,
ylim=ylim,
zlim=zlim,
color=color,
**kwargs)
# Axis 3: channel 0 vs channel 1
plt.subplot(223)
scatter2d(data_list,
channels=[channels[0], channels[1]],
xscale=xscale,
yscale=yscale,
xlabel=xlabel,
ylabel=ylabel,
xlim=xlim,
ylim=ylim,
color=color,
**kwargs)
# Axis 4: channel 2 vs channel 1
plt.subplot(224)
scatter2d(data_list,
channels=[channels[2], channels[1]],
xscale=zscale,
yscale=yscale,
xlabel=zlabel,
ylabel=ylabel,
xlim=zlim,
ylim=ylim,
color=color,
**kwargs)
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close()
|
Plot a 3D scatter plot and 2D projections from FCSData objects.
`scatter3d_and_projections` creates a 3D scatter plot and three 2D
projected scatter plots in four different axes for each FCSData object
in `data_list`, in the same figure.
Parameters
----------
data_list : FCSData object, or list of FCSData objects
Flow cytometry data to plot.
channels : list of int, list of str
Three channels to use for the plot.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
yscale : str, optional
Scale of the y axis, either ``linear``, ``log``, or ``logicle``.
zscale : str, optional
Scale of the z axis, either ``linear``, ``log``, or ``logicle``.
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None, attempts to extract channel
name from last data object.
zlabel : str, optional
Label to use on the z axis. If None, attempts to extract channel
name from last data object.
xlim : tuple, optional
Limits for the x axis. If None, attempts to extract limits from the
range of the last data object.
ylim : tuple, optional
Limits for the y axis. If None, attempts to extract limits from the
range of the last data object.
zlim : tuple, optional
Limits for the z axis. If None, attempts to extract limits from the
range of the last data object.
color : matplotlib color or list of matplotlib colors, optional
Color for the scatter plot. It can be a list with the same length
as `data_list`. If `color` is not specified, elements from
`data_list` are plotted with colors taken from the module-level
variable `cmap_default`.
figsize : tuple, optional
Figure size. If None, use matplotlib's default.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``scatter``.
Notes
-----
`scatter3d_and_projections` uses matplotlib's ``scatter``, with the 3D
scatter plot using a 3D projection. Additional keyword arguments
provided to `scatter3d_and_projections` are passed directly to
``scatter``.
|
entailment
|
def transform_non_affine(self, x, mask_out_of_range=True):
"""
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
"""
# Mask out-of-range values
if mask_out_of_range:
x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),
x)
else:
x_masked = x
# Calculate s and return
return np.interp(x_masked, self._x_range, self._s_range)
|
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
|
entailment
|
def transform_non_affine(self, s):
"""
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
"""
T = self._T
M = self._M
W = self._W
p = self._p
# Calculate x
return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)
|
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
|
entailment
|
def set_params(self, subs=None, numticks=None):
"""
Set parameters within this locator.
Parameters
----------
subs : array, optional
Subtick values, as multiples of the main ticks.
numticks : array, optional
Number of ticks.
"""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
|
Set parameters within this locator.
Parameters
----------
subs : array, optional
Subtick values, as multiples of the main ticks.
numticks : array, optional
Number of ticks.
|
entailment
|
def tick_values(self, vmin, vmax):
"""
Get a set of tick values properly spaced for logicle axis.
"""
# Extract base from transform object
b = self._transform.base
# The logicle domain is divided into two regions: A "linear" region,
# which may include negative numbers, and a "logarithmic" region, which
# only includes positive numbers. These two regions are separated by a
# value t, given by the logicle equations. An illustration is given
# below.
#
# -t ==0== t ========>
# lin log
#
# vmin and vmax can be anywhere in this domain, meaning that both should
# be greater than -t.
#
# The logarithmic region will only have major ticks at integral log
# positions. The linear region will have a major tick at zero, and one
# major tick at the largest absolute integral log value in screen
# inside this region. Subticks will be added at multiples of the
# integral log positions.
# If the linear range is too small, create new transformation object
# with slightly wider linear range. Otherwise, the number of decades
# below will be infinite
if self._transform.W == 0 or \
self._transform.M / self._transform.W > self.numticks:
self._transform = _LogicleTransform(
T=self._transform.T,
M=self._transform.M,
W=self._transform.M / self.numticks)
# Calculate t
t = - self._transform.transform_non_affine(0)
# Swap vmin and vmax if necessary
if vmax < vmin:
vmin, vmax = vmax, vmin
# Calculate minimum and maximum limits in scale units
vmins = self._transform.inverted().transform_non_affine(vmin)
vmaxs = self._transform.inverted().transform_non_affine(vmax)
# Check whether linear or log regions are present
has_linear = has_log = False
if vmin <= t:
has_linear = True
if vmax > t:
has_log = True
else:
has_log = True
# Calculate number of ticks in linear and log regions
# The number of ticks is distributed by the fraction that each region
# occupies in scale units
if has_linear:
fraction_linear = (min(vmaxs, 2*self._transform.W) - vmins) / \
(vmaxs - vmins)
numticks_linear = np.round(self.numticks*fraction_linear)
else:
numticks_linear = 0
if has_log:
fraction_log = (vmaxs - max(vmins, 2*self._transform.W)) / \
(vmaxs - vmins)
numticks_log = np.round(self.numticks*fraction_log)
else:
numticks_log = 0
# Calculate extended ranges and step size for tick location
# Extended ranges take into account discretization.
if has_log:
# The logarithmic region's range will include from the decade
# immediately below the lower end of the region to the decade
# immediately above the upper end.
# Note that this may extend the logarithmic region to the left.
log_ext_range = [np.floor(np.log(max(vmin, t)) / np.log(b)),
np.ceil(np.log(vmax) / np.log(b))]
# Since a major tick will be located at the lower end of the
# extended range, make sure that it is not too close to zero.
if vmin <= 0:
zero_s = self._transform.inverted().\
transform_non_affine(0)
min_tick_space = 1./self.numticks
while True:
min_tick_s = self._transform.inverted().\
transform_non_affine(b**log_ext_range[0])
if (min_tick_s - zero_s)/(vmaxs - vmins) < min_tick_space \
and ((log_ext_range[0] + 1) < log_ext_range[1]):
log_ext_range[0] += 1
else:
break
# Number of decades in the extended region
log_decades = log_ext_range[1] - log_ext_range[0]
# The step is at least one decade.
if numticks_log > 1:
log_step = max(np.floor(float(log_decades)/(numticks_log-1)), 1)
else:
log_step = 1
else:
# Linear region only
linear_range = [vmin, vmax]
# Initial step size will be one decade below the maximum whole
# decade in the range
linear_step = matplotlib.ticker.decade_down(
linear_range[1] - linear_range[0], b) / b
# Reduce the step size according to specified number of ticks
while (linear_range[1] - linear_range[0])/linear_step > \
numticks_linear:
linear_step *= b
# Get extended range by discretizing the region limits
vmin_ext = np.floor(linear_range[0]/linear_step)*linear_step
vmax_ext = np.ceil(linear_range[1]/linear_step)*linear_step
linear_range_ext = [vmin_ext, vmax_ext]
# Calculate major tick positions
major_ticklocs = []
if has_log:
# Logarithmic region present
# If a linear region is present, add the negative of the lower limit
# of the extended log region and zero. Then, add ticks for each
# logarithmic step as calculated above.
if has_linear:
major_ticklocs.append(- b**log_ext_range[0])
major_ticklocs.append(0)
# Use nextafter to pick the next floating point number, and try to
# include the upper limit in the generated range.
major_ticklocs.extend(b ** (np.arange(
log_ext_range[0],
np.nextafter(log_ext_range[1], np.inf),
log_step)))
else:
# Only linear region present
# Draw ticks according to linear step calculated above.
# Use nextafter to pick the next floating point number, and try to
# include the upper limit in the generated range.
major_ticklocs.extend(np.arange(
linear_range_ext[0],
np.nextafter(linear_range_ext[1], np.inf),
linear_step))
major_ticklocs = np.array(major_ticklocs)
# Add subticks if requested
subs = self._subs
if (subs is not None) and (len(subs) > 1 or subs[0] != 1.0):
ticklocs = []
if has_log:
# Subticks for each major tickloc present
for major_tickloc in major_ticklocs:
ticklocs.extend(subs * major_tickloc)
# Subticks from one decade below the lowest
major_ticklocs_pos = major_ticklocs[major_ticklocs > 0]
if len(major_ticklocs_pos):
tickloc_next_low = np.min(major_ticklocs_pos)/b
ticklocs.append(tickloc_next_low)
ticklocs.extend(subs * tickloc_next_low)
# Subticks for the negative linear range
if vmin < 0:
ticklocs.extend([(-ti) for ti in ticklocs if ti < -vmin ])
else:
ticklocs = list(major_ticklocs)
# If zero is present, add ticks from a decade below the lowest
if (vmin < 0) and (vmax > 0):
major_ticklocs_nonzero = major_ticklocs[
np.nonzero(major_ticklocs)]
tickloc_next_low = np.min(np.abs(major_ticklocs_nonzero))/b
ticklocs.append(tickloc_next_low)
ticklocs.extend(subs * tickloc_next_low)
ticklocs.append(-tickloc_next_low)
ticklocs.extend(subs * - tickloc_next_low)
else:
# Subticks not requested
ticklocs = major_ticklocs
return self.raise_if_exceeds(np.array(ticklocs))
|
Get a set of tick values properly spaced for logicle axis.
|
entailment
|
def view_limits(self, vmin, vmax):
"""
Try to choose the view limits intelligently.
"""
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not matplotlib.ticker.is_decade(abs(vmin), b):
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b)
else:
vmin = matplotlib.ticker.decade_down(vmin, b)
if not matplotlib.ticker.is_decade(abs(vmax), b):
if vmax < 0:
vmax = -matplotlib.ticker.decade_down(-vmax, b)
else:
vmax = matplotlib.ticker.decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -matplotlib.ticker.decade_up(-vmin, b)
vmax = -matplotlib.ticker.decade_down(-vmax, b)
else:
vmin = matplotlib.ticker.decade_down(vmin, b)
vmax = matplotlib.ticker.decade_up(vmax, b)
result = matplotlib.transforms.nonsingular(vmin, vmax)
return result
|
Try to choose the view limits intelligently.
|
entailment
|
def get_transform(self):
"""
Get a new object to perform the scaling transformation.
"""
return _InterpolatedInverseTransform(transform=self._transform,
smin=0,
smax=self._transform._M)
|
Get a new object to perform the scaling transformation.
|
entailment
|
def set_default_locators_and_formatters(self, axis):
"""
Set up the locators and formatters for the scale.
Parameters
----------
axis: matplotlib.axis
Axis for which to set locators and formatters.
"""
axis.set_major_locator(_LogicleLocator(self._transform))
axis.set_minor_locator(_LogicleLocator(self._transform,
subs=np.arange(2.0, 10.)))
axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation(
labelOnlyBase=True))
|
Set up the locators and formatters for the scale.
Parameters
----------
axis: matplotlib.axis
Axis for which to set locators and formatters.
|
entailment
|
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Return minimum and maximum bounds for the logicle axis.
Parameters
----------
vmin : float
Minimum data value.
vmax : float
Maximum data value.
minpos : float
Minimum positive value in the data. Ignored by this function.
Return
------
float
Minimum axis bound.
float
Maximum axis bound.
"""
vmin_bound = self._transform.transform_non_affine(0)
vmax_bound = self._transform.transform_non_affine(self._transform.M)
vmin = max(vmin, vmin_bound)
vmax = min(vmax, vmax_bound)
return vmin, vmax
|
Return minimum and maximum bounds for the logicle axis.
Parameters
----------
vmin : float
Minimum data value.
vmax : float
Maximum data value.
minpos : float
Minimum positive value in the data. Ignored by this function.
Return
------
float
Minimum axis bound.
float
Maximum axis bound.
|
entailment
|
def mean(data, channels=None):
"""
Calculate the mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The mean of the events in the specified channels of `data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.mean(data_stats, axis=0)
|
Calculate the mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The mean of the events in the specified channels of `data`.
|
entailment
|
def gmean(data, channels=None):
"""
Calculate the geometric mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric mean of the events in the specified channels of
`data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return scipy.stats.gmean(data_stats, axis=0)
|
Calculate the geometric mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric mean of the events in the specified channels of
`data`.
|
entailment
|
def median(data, channels=None):
"""
Calculate the median of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The median of the events in the specified channels of `data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.median(data_stats, axis=0)
|
Calculate the median of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The median of the events in the specified channels of `data`.
|
entailment
|
def mode(data, channels=None):
"""
Calculate the mode of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The mode of the events in the specified channels of `data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
# scipy.stats.mode returns two outputs, the first of which is an array
# containing the modal values. This array has the same number of
# dimensions as the input, and with only one element in the first
# dimension. We extract this fist element to make it match the other
# functions in this module.
return scipy.stats.mode(data_stats, axis=0)[0][0]
|
Calculate the mode of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The mode of the events in the specified channels of `data`.
|
entailment
|
def std(data, channels=None):
"""
Calculate the standard deviation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The standard deviation of the events in the specified channels of
`data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.std(data_stats, axis=0)
|
Calculate the standard deviation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The standard deviation of the events in the specified channels of
`data`.
|
entailment
|
def cv(data, channels=None):
"""
Calculate the Coeff. of Variation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Coefficient of Variation (CV) of a dataset is defined as the
standard deviation divided by the mean of such dataset.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.std(data_stats, axis=0) / np.mean(data_stats, axis=0)
|
Calculate the Coeff. of Variation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Coefficient of Variation (CV) of a dataset is defined as the
standard deviation divided by the mean of such dataset.
|
entailment
|
def gstd(data, channels=None):
"""
Calculate the geometric std. dev. of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric standard deviation of the events in the specified
channels of `data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.exp(np.std(np.log(data_stats), axis=0))
|
Calculate the geometric std. dev. of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric standard deviation of the events in the specified
channels of `data`.
|
entailment
|
def gcv(data, channels=None):
"""
Calculate the geometric CV of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric coefficient of variation of the events in the
specified channels of `data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.sqrt(np.exp(np.std(np.log(data_stats), axis=0)**2) - 1)
|
Calculate the geometric CV of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric coefficient of variation of the events in the
specified channels of `data`.
|
entailment
|
def iqr(data, channels=None):
"""
Calculate the Interquartile Range of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Interquartile Range of the events in the specified channels of
`data`.
Notes
-----
The Interquartile Range (IQR) of a dataset is defined as the interval
between the 25% and the 75% percentiles of such dataset.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
q75, q25 = np.percentile(data_stats, [75 ,25], axis=0)
return q75 - q25
|
Calculate the Interquartile Range of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Interquartile Range of the events in the specified channels of
`data`.
Notes
-----
The Interquartile Range (IQR) of a dataset is defined as the interval
between the 25% and the 75% percentiles of such dataset.
|
entailment
|
def rcv(data, channels=None):
"""
Calculate the RCV of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Robust Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Robust Coefficient of Variation (RCV) of a dataset is defined as
the Interquartile Range (IQR) divided by the median of such dataset.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
q75, q25 = np.percentile(data_stats, [75 ,25], axis=0)
return (q75 - q25)/np.median(data_stats, axis=0)
|
Calculate the RCV of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Robust Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Robust Coefficient of Variation (RCV) of a dataset is defined as
the Interquartile Range (IQR) divided by the median of such dataset.
|
entailment
|
def dados_qrcode(cfe):
"""Compila os dados que compõem o QRCode do CF-e-SAT, conforme a
documentação técnica oficial **Guia para Geração do QRCode pelo Aplicativo
Comercial**, a partir de uma instância de ``ElementTree`` que represente a
árvore do XML do CF-e-SAT.
:param cfe: Instância de :py:mod:`xml.etree.ElementTree.ElementTree`.
:return: String contendo a massa de dados para ser usada ao gerar o QRCode.
:rtype: str
Por exemplo, para gerar a imagem do QRCode [#qrcode]_:
.. sourcecode:: python
import xml.etree.ElementTree as ET
import qrcode
with open('CFe_1.xml', 'r') as fp:
tree = ET.parse(fp)
imagem = qrcode.make(dados_qrcode(tree))
.. [#qrcode] https://pypi.python.org/pypi/qrcode
"""
infCFe = cfe.getroot().find('./infCFe')
cnpjcpf_consumidor = infCFe.findtext('dest/CNPJ') or \
infCFe.findtext('dest/CPF') or ''
return '|'.join([
infCFe.attrib['Id'][3:], # remove prefixo "CFe"
'{}{}'.format(
infCFe.findtext('ide/dEmi'),
infCFe.findtext('ide/hEmi')),
infCFe.findtext('total/vCFe'),
cnpjcpf_consumidor,
infCFe.findtext('ide/assinaturaQRCODE'),])
|
Compila os dados que compõem o QRCode do CF-e-SAT, conforme a
documentação técnica oficial **Guia para Geração do QRCode pelo Aplicativo
Comercial**, a partir de uma instância de ``ElementTree`` que represente a
árvore do XML do CF-e-SAT.
:param cfe: Instância de :py:mod:`xml.etree.ElementTree.ElementTree`.
:return: String contendo a massa de dados para ser usada ao gerar o QRCode.
:rtype: str
Por exemplo, para gerar a imagem do QRCode [#qrcode]_:
.. sourcecode:: python
import xml.etree.ElementTree as ET
import qrcode
with open('CFe_1.xml', 'r') as fp:
tree = ET.parse(fp)
imagem = qrcode.make(dados_qrcode(tree))
.. [#qrcode] https://pypi.python.org/pypi/qrcode
|
entailment
|
def partes(self, num_partes=11):
"""Particiona a chave do CF-e-SAT em uma lista de *n* segmentos.
:param int num_partes: O número de segmentos (partes) em que os digitos
da chave do CF-e-SAT serão particionados. **Esse número deverá
resultar em uma divisão inteira por 44 (o comprimento da chave)**.
Se não for informado, assume ``11`` partes, comumente utilizado
para apresentar a chave do CF-e-SAT no extrato.
:return: Lista de strings contendo a chave do CF-e-SAT particionada.
:rtype: list
"""
assert 44 % num_partes == 0, 'O numero de partes nao produz um '\
'resultado inteiro (partes por 44 digitos): '\
'num_partes=%s' % num_partes
salto = 44 // num_partes
return [self._campos[n:(n + salto)] for n in range(0, 44, salto)]
|
Particiona a chave do CF-e-SAT em uma lista de *n* segmentos.
:param int num_partes: O número de segmentos (partes) em que os digitos
da chave do CF-e-SAT serão particionados. **Esse número deverá
resultar em uma divisão inteira por 44 (o comprimento da chave)**.
Se não for informado, assume ``11`` partes, comumente utilizado
para apresentar a chave do CF-e-SAT no extrato.
:return: Lista de strings contendo a chave do CF-e-SAT particionada.
:rtype: list
|
entailment
|
def texto_decimal(valor, remover_zeros=True):
"""Converte um valor :py:class:`decimal.Decimal` para texto, com a opção de
remover os zeros à direita não significativos. A conversão para texto irá
considerar o :py:module:`locale` para converter o texto pronto para
apresentação.
:param decimal.Decimal valor: Valor a converter para texto.
:param bool remover_zeros: *Opcional* Indica se os zeros à direita não
significativos devem ser removidos do texto, o que irá incluir o
separador decimal se for o caso.
"""
texto = '{:n}'.format(valor)
if remover_zeros:
dp = locale.localeconv().get('decimal_point')
texto = texto.rstrip('0').rstrip(dp) if dp in texto else texto
return texto
|
Converte um valor :py:class:`decimal.Decimal` para texto, com a opção de
remover os zeros à direita não significativos. A conversão para texto irá
considerar o :py:module:`locale` para converter o texto pronto para
apresentação.
:param decimal.Decimal valor: Valor a converter para texto.
:param bool remover_zeros: *Opcional* Indica se os zeros à direita não
significativos devem ser removidos do texto, o que irá incluir o
separador decimal se for o caso.
|
entailment
|
def modulo11(base):
"""Calcula o dígito verificador (DV) para o argumento usando "Módulo 11".
:param str base: String contendo os dígitos sobre os quais o DV será
calculado, assumindo que o DV não está incluído no argumento.
:return: O dígito verificador calculado.
:rtype: int
"""
pesos = '23456789' * ((len(base) // 8) + 1)
acumulado = sum([int(a) * int(b) for a, b in zip(base[::-1], pesos)])
digito = 11 - (acumulado % 11)
return 0 if digito >= 10 else digito
|
Calcula o dígito verificador (DV) para o argumento usando "Módulo 11".
:param str base: String contendo os dígitos sobre os quais o DV será
calculado, assumindo que o DV não está incluído no argumento.
:return: O dígito verificador calculado.
:rtype: int
|
entailment
|
def validar_casas_decimais(valor, minimo=1, maximo=2):
"""Valida o número de casas decimais. Se o número de casas decimais não
estiver dentro do mínimo e máximo, será lançada uma exceção do tipo
:py:exc:`ValueError`.
:param valor: Um objeto :py:class:`~decimal.Decimal`.
:param minimo: Valor inteiro maior ou igual a zero indicando o número
mínimo de casas decimais. Se não informado, ``1`` é o mínimo.
:param maximo: Valor inteiro maior ou igual a zero indicando o número
máximo de casas decimais. Se não informado, ``2`` é o máximo.
:raises ValueError: Se o valor possuir um número de casas decimais fora dos
limites mínimo e máximo informados.
"""
atributos = valor.as_tuple()
if not (minimo <= abs(atributos.exponent) <= maximo):
raise ValueError('Numero de casas decimais fora dos limites esperados '
'(valor={!r}, minimo={!r}, maximo={!r}): {!r}'.format(
valor, minimo, maximo, atributos))
|
Valida o número de casas decimais. Se o número de casas decimais não
estiver dentro do mínimo e máximo, será lançada uma exceção do tipo
:py:exc:`ValueError`.
:param valor: Um objeto :py:class:`~decimal.Decimal`.
:param minimo: Valor inteiro maior ou igual a zero indicando o número
mínimo de casas decimais. Se não informado, ``1`` é o mínimo.
:param maximo: Valor inteiro maior ou igual a zero indicando o número
máximo de casas decimais. Se não informado, ``2`` é o máximo.
:raises ValueError: Se o valor possuir um número de casas decimais fora dos
limites mínimo e máximo informados.
|
entailment
|
def uf(sigla):
"""
Valida a sigla da Unidade Federativa. Se não for uma sigla de UF válida,
será lançada a exceção :exc:`UnidadeFederativaError`.
"""
if not sigla in [s for s, i, n, r in UNIDADES_FEDERACAO]:
raise UnidadeFederativaError('Estado (sigla) UF "%s" '
'inexistente' % sigla)
|
Valida a sigla da Unidade Federativa. Se não for uma sigla de UF válida,
será lançada a exceção :exc:`UnidadeFederativaError`.
|
entailment
|
def uf_pelo_codigo(codigo_ibge):
"""Retorna a UF para o código do IBGE informado."""
idx = [i for s, i, n, r in UNIDADES_FEDERACAO].index(codigo_ibge)
return UNIDADES_FEDERACAO[idx][_UF_SIGLA]
|
Retorna a UF para o código do IBGE informado.
|
entailment
|
def codigo_ibge_uf(sigla):
"""Retorna o código do IBGE para a UF informada."""
idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla)
return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE]
|
Retorna o código do IBGE para a UF informada.
|
entailment
|
def cnpj(numero):
"""Valida um número de CNPJ. O número deverá ser informado como uma string
contendo 14 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCNPJError`. Esta implementação da validação
foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`."""
_digitos = [int(c) for c in numero if c.isdigit()]
if len(_digitos) != 14 or len(numero) != 14:
raise NumeroCNPJError('Nao possui 14 digitos: {!r}'.format(numero))
if numero == numero[0] * 14:
raise NumeroCNPJError('Todos os digitos iguais: {!r}'.format(numero))
multiplicadores = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(12)])
soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(13)])
digito1 = 11 - (soma1 % 11)
digito2 = 11 - (soma2 % 11)
if digito1 >= 10:
digito1 = 0
if digito2 >= 10:
digito2 = 0
if _digitos[12] != digito1 or _digitos[13] != digito2:
raise NumeroCNPJError('Digitos verificadores invalidos: {!r}'.format(numero))
|
Valida um número de CNPJ. O número deverá ser informado como uma string
contendo 14 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCNPJError`. Esta implementação da validação
foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`.
|
entailment
|
def is_cnpj(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
try:
cnpj(digitos(numero) if not estrito else numero)
return True
except NumeroCNPJError:
pass
return False
|
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
entailment
|
def as_cnpj(numero):
"""Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cnpj(_num):
return '{}.{}.{}/{}-{}'.format(
_num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:])
return numero
|
Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
|
entailment
|
def cpf(numero):
"""Valida um número de CPF. O número deverá ser informado como uma string
contendo 11 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCPFError`. Esta implementação da validação
foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`.
"""
_digitos = [int(c) for c in numero if c.isdigit()]
if len(_digitos) != 11 or len(numero) != 11:
raise NumeroCPFError('Nao possui 11 digitos: {!r}'.format(numero))
if numero == numero[0] * 11:
raise NumeroCPFError('Todos os digitos iguais: {!r}'.format(numero))
multiplicadores = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(9)])
soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(10)])
digito1 = 11 - (soma1 % 11)
digito2 = 11 - (soma2 % 11)
if digito1 >= 10:
digito1 = 0
if digito2 >= 10:
digito2 = 0
if _digitos[9] != digito1 or _digitos[10] != digito2:
raise NumeroCPFError('Digitos verificadores invalidos: {!r}'.format(numero))
|
Valida um número de CPF. O número deverá ser informado como uma string
contendo 11 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCPFError`. Esta implementação da validação
foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`.
|
entailment
|
def is_cpf(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
try:
cpf(digitos(numero) if not estrito else numero)
return True
except NumeroCPFError:
pass
return False
|
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
entailment
|
def as_cpf(numero):
"""Formata um número de CPF. Se o número não for um CPF válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cpf(_num):
return '{}.{}.{}-{}'.format(_num[:3], _num[3:6], _num[6:9], _num[9:])
return numero
|
Formata um número de CPF. Se o número não for um CPF válido apenas
retorna o argumento sem qualquer modificação.
|
entailment
|
def cnpjcpf(numero):
"""Valida um número de CNPJ ou CPF. Veja :func:`cnpj` e/ou :func:`cpf`."""
try:
cnpj(numero)
except NumeroCNPJError:
try:
cpf(numero)
except NumeroCPFError:
raise NumeroCNPJCPFError('numero "%s" nao valida como '
'CNPJ nem como CPF' % numero)
|
Valida um número de CNPJ ou CPF. Veja :func:`cnpj` e/ou :func:`cpf`.
|
entailment
|
def is_cnpjcpf(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
_numero = digitos(numero) if not estrito else numero
try:
cnpj(_numero)
return True
except NumeroCNPJError:
try:
cpf(_numero)
return True
except NumeroCPFError:
pass
return False
|
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
entailment
|
def as_cnpjcpf(numero):
"""Formata um número de CNPJ ou CPF. Se o número não for um CNPJ ou CPF
válidos apenas retorna o argumento sem qualquer modificação.
"""
if is_cnpj(numero):
return as_cnpj(numero)
elif is_cpf(numero):
return as_cpf(numero)
return numero
|
Formata um número de CNPJ ou CPF. Se o número não for um CNPJ ou CPF
válidos apenas retorna o argumento sem qualquer modificação.
|
entailment
|
def cep(numero):
"""Valida um número de CEP. O número deverá ser informado como uma string
contendo 8 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCEPError`.
.. warning::
Qualquer string que contenha 8 dígitos será considerada como um CEP
válido, desde que os dígitos não sejam todos iguais.
"""
_digitos = digitos(numero)
if len(_digitos) != 8 or len(numero) != 8:
raise NumeroCEPError('CEP "%s" nao possui 8 digitos' % numero)
elif _digitos[0] * 8 == _digitos:
raise NumeroCEPError('CEP "%s" considerado invalido' % numero)
|
Valida um número de CEP. O número deverá ser informado como uma string
contendo 8 dígitos numéricos. Se o número informado for inválido será
lançada a exceção :exc:`NumeroCEPError`.
.. warning::
Qualquer string que contenha 8 dígitos será considerada como um CEP
válido, desde que os dígitos não sejam todos iguais.
|
entailment
|
def is_cep(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
try:
cep(digitos(numero) if not estrito else numero)
return True
except NumeroCEPError:
pass
return False
|
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
entailment
|
def as_cep(numero):
"""Formata um número de CEP. Se o argumento não for um CEP válido apenas
retorna o argumento sem qualquer modificação.
"""
_numero = digitos(numero)
if is_cep(_numero):
return '{}-{}'.format(_numero[:5], _numero[5:])
return numero
|
Formata um número de CEP. Se o argumento não for um CEP válido apenas
retorna o argumento sem qualquer modificação.
|
entailment
|
def convert_tree(message, config, indent=0, wrap_alternative=True, charset=None):
"""Recursively convert a potentially-multipart tree.
Returns a tuple of (the converted tree, whether any markdown was found)
"""
ct = message.get_content_type()
cs = message.get_content_subtype()
if charset is None:
charset = get_charset_from_message_fragment(message)
if not message.is_multipart():
# we're on a leaf
converted = None
disposition = message.get('Content-Disposition', 'inline')
if disposition == 'inline' and ct in ('text/plain', 'text/markdown'):
converted = convert_one(message, config, charset)
if converted is not None:
if wrap_alternative:
new_tree = MIMEMultipart('alternative')
_move_headers(message, new_tree)
new_tree.attach(message)
new_tree.attach(converted)
return new_tree, True
else:
return converted, True
return message, False
else:
if ct == 'multipart/signed':
# if this is a multipart/signed message, then let's just
# recurse into the non-signature part
new_root = MIMEMultipart('alternative')
if message.preamble:
new_root.preamble = message.preamble
_move_headers(message, new_root)
converted = None
for part in message.get_payload():
if part.get_content_type() != 'application/pgp-signature':
converted, did_conversion = convert_tree(part, config, indent=indent + 1,
wrap_alternative=False,
charset=charset)
if did_conversion:
new_root.attach(converted)
new_root.attach(message)
return new_root, did_conversion
else:
did_conversion = False
new_root = MIMEMultipart(cs, message.get_charset())
if message.preamble:
new_root.preamble = message.preamble
_move_headers(message, new_root)
for part in message.get_payload():
part, did_this_conversion = convert_tree(part, config, indent=indent + 1, charset=charset)
did_conversion |= did_this_conversion
new_root.attach(part)
return new_root, did_conversion
|
Recursively convert a potentially-multipart tree.
Returns a tuple of (the converted tree, whether any markdown was found)
|
entailment
|
def smtp_connection(c):
"""Create an SMTP connection from a Config object"""
if c.smtp_ssl:
klass = smtplib.SMTP_SSL
else:
klass = smtplib.SMTP
conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout)
if not c.smtp_ssl:
conn.ehlo()
conn.starttls()
conn.ehlo()
if c.smtp_username:
conn.login(c.smtp_username, c.smtp_password)
return conn
|
Create an SMTP connection from a Config object
|
entailment
|
def issuers(self):
"""Return the list of available issuers for this payment method."""
issuers = self._get_property('issuers') or []
result = {
'_embedded': {
'issuers': issuers,
},
'count': len(issuers),
}
return List(result, Issuer)
|
Return the list of available issuers for this payment method.
|
entailment
|
def delete(self, payment_id, data=None):
"""Cancel payment and return the payment object.
Deleting a payment causes the payment status to change to canceled.
The updated payment object is returned.
"""
if not payment_id or not payment_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid payment ID: '{id}'. A payment ID should start with '{prefix}'.".format(
id=payment_id, prefix=self.RESOURCE_ID_PREFIX)
)
result = super(Payments, self).delete(payment_id, data)
return self.get_resource_object(result)
|
Cancel payment and return the payment object.
Deleting a payment causes the payment status to change to canceled.
The updated payment object is returned.
|
entailment
|
def mandate(self):
"""Return the mandate for this payment."""
return self.client.customer_mandates.with_parent_id(self.customer_id).get(self.mandate_id)
|
Return the mandate for this payment.
|
entailment
|
def subscription(self):
"""Return the subscription for this payment."""
return self.client.customer_subscriptions.with_parent_id(self.customer_id).get(self.subscription_id)
|
Return the subscription for this payment.
|
entailment
|
def order(self):
"""Return the order for this payment. """
from ..resources.orders import Order
url = self._get_link('order')
if url:
resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)
return Order(resp, self.client)
|
Return the order for this payment.
|
entailment
|
def get_version():
"""
Read the version from a file (mollie/api/version.py) in the repository.
We can't import here since we might import from an installed version.
"""
try:
version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py'), encoding='utf=8')
except TypeError:
# support python 2
version_file = open(os.path.join(ROOT_DIR, 'mollie', 'api', 'version.py'))
contents = version_file.read()
match = re.search(r'VERSION = [\'"]([^\'"]+)', contents)
if match:
return match.group(1)
else:
raise RuntimeError("Can't determine package version")
|
Read the version from a file (mollie/api/version.py) in the repository.
We can't import here since we might import from an installed version.
|
entailment
|
def create_refund(self, data=None, **params):
"""Create a refund for the order. When no data arg is given, a refund for all order lines is assumed."""
if data is None:
data = {'lines': []}
refund = OrderRefunds(self.client).on(self).create(data, **params)
return refund
|
Create a refund for the order. When no data arg is given, a refund for all order lines is assumed.
|
entailment
|
def cancel_lines(self, data=None):
"""Cancel the lines given. When no lines are given, cancel all the lines.
Canceling an order line causes the order line status to change to canceled.
An empty dictionary will be returned.
"""
from ..resources.order_lines import OrderLines
if data is None:
data = {'lines': []}
canceled = OrderLines(self.client).on(self).delete(data)
return canceled
|
Cancel the lines given. When no lines are given, cancel all the lines.
Canceling an order line causes the order line status to change to canceled.
An empty dictionary will be returned.
|
entailment
|
def update_line(self, resource_id, data):
"""Update a line for an order."""
return OrderLines(self.client).on(self).update(resource_id, data)
|
Update a line for an order.
|
entailment
|
def create_shipment(self, data=None):
"""Create a shipment for an order. When no data arg is given, a shipment for all order lines is assumed."""
if data is None:
data = {'lines': []}
return Shipments(self.client).on(self).create(data)
|
Create a shipment for an order. When no data arg is given, a shipment for all order lines is assumed.
|
entailment
|
def get_shipment(self, resource_id):
"""Retrieve a single shipment by a shipment's ID."""
return Shipments(self.client).on(self).get(resource_id)
|
Retrieve a single shipment by a shipment's ID.
|
entailment
|
def update_shipment(self, resource_id, data):
"""Update the tracking information of a shipment."""
return Shipments(self.client).on(self).update(resource_id, data)
|
Update the tracking information of a shipment.
|
entailment
|
def create_payment(self, data):
""" Creates a new payment object for an order. """
return OrderPayments(self.client).on(self).create(data)
|
Creates a new payment object for an order.
|
entailment
|
def delete(self, data, *args):
"""
Custom handling for deleting orderlines.
Orderlines are deleted by issuing a DELETE on the orders/*/lines endpoint,
with the orderline IDs and quantities in the request body.
"""
path = self.get_resource_name()
result = self.perform_api_call(self.REST_DELETE, path, data=data)
return result
|
Custom handling for deleting orderlines.
Orderlines are deleted by issuing a DELETE on the orders/*/lines endpoint,
with the orderline IDs and quantities in the request body.
|
entailment
|
def update(self, resource_id, data=None, **params):
"""
Custom handling for updating orderlines.
The API returns an Order object. Since we are sending the request through an orderline object, it makes more
sense to convert the returned object to to the updated orderline object.
If you wish to retrieve the order object, you can do so by using the order_id property of the orderline.
"""
path = self.get_resource_name() + '/' + str(resource_id)
result = self.perform_api_call(self.REST_UPDATE, path, data=data)
for line in result['lines']:
if line['id'] == resource_id:
return self.get_resource_object(line)
raise DataConsistencyError('Line id {resource_id} not found in response.'.format(resource_id=resource_id))
|
Custom handling for updating orderlines.
The API returns an Order object. Since we are sending the request through an orderline object, it makes more
sense to convert the returned object to to the updated orderline object.
If you wish to retrieve the order object, you can do so by using the order_id property of the orderline.
|
entailment
|
def get_next(self):
"""Return the next set of objects in a list"""
url = self._get_link('next')
resource = self.object_type.get_resource_class(self.client)
resp = resource.perform_api_call(resource.REST_READ, url)
return List(resp, self.object_type, self.client)
|
Return the next set of objects in a list
|
entailment
|
def delete(self, subscription_id, data=None):
"""Cancel subscription and return the subscription object.
Deleting a subscription causes the subscription status to changed to 'canceled'.
The updated subscription object is returned.
"""
if not subscription_id or not subscription_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid subscription ID: '{id}'. A subscription ID should start with '{prefix}'.".format(
id=subscription_id, prefix=self.RESOURCE_ID_PREFIX)
)
result = super(CustomerSubscriptions, self).delete(subscription_id, data)
return self.get_resource_object(result)
|
Cancel subscription and return the subscription object.
Deleting a subscription causes the subscription status to changed to 'canceled'.
The updated subscription object is returned.
|
entailment
|
def get(self, chargeback_id, **params):
"""Verify the chargeback ID and retrieve the chargeback from the API."""
if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.".format(
id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX)
)
return super(Chargebacks, self).get(chargeback_id, **params)
|
Verify the chargeback ID and retrieve the chargeback from the API.
|
entailment
|
def generate_querystring(params):
"""
Generate a querystring suitable for use in the v2 api.
The Requests library doesn't know how to generate querystrings that encode dictionaries using square brackets:
https://api.mollie.com/v2/methods?amount[value]=100.00&amount[currency]=USD
Note: we use `sorted()` to work around a difference in iteration behaviour between Python 2 and 3.
This makes the output predictable, and ordering of querystring parameters shouldn't matter.
"""
if not params:
return None
parts = []
for param, value in sorted(params.items()):
if not isinstance(value, dict):
parts.append(urlencode({param: value}))
else:
# encode dictionary with square brackets
for key, sub_value in sorted(value.items()):
composed = '{param}[{key}]'.format(param=param, key=key)
parts.append(urlencode({composed: sub_value}))
if parts:
return '&'.join(parts)
|
Generate a querystring suitable for use in the v2 api.
The Requests library doesn't know how to generate querystrings that encode dictionaries using square brackets:
https://api.mollie.com/v2/methods?amount[value]=100.00&amount[currency]=USD
Note: we use `sorted()` to work around a difference in iteration behaviour between Python 2 and 3.
This makes the output predictable, and ordering of querystring parameters shouldn't matter.
|
entailment
|
def set_user_agent_component(self, key, value, sanitize=True):
"""Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
"""
if sanitize:
key = ''.join(_x.capitalize() for _x in re.findall(r'\S+', key))
if re.search(r'\s+', value):
value = '_'.join(re.findall(r'\S+', value))
self.user_agent_components[key] = value
|
Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
|
entailment
|
def user_agent(self):
"""Return the formatted user agent string."""
components = ["/".join(x) for x in self.user_agent_components.items()]
return " ".join(components)
|
Return the formatted user agent string.
|
entailment
|
def factory(resp):
"""
Return a ResponseError subclass based on the API payload.
All errors are documented: https://docs.mollie.com/guides/handling-errors#all-possible-status-codes
More exceptions should be added here when appropriate, and when useful examples of API errors are available.
"""
status = resp['status']
if status == 401:
return UnauthorizedError(resp)
elif status == 404:
return NotFoundError(resp)
elif status == 422:
return UnprocessableEntityError(resp)
else:
# generic fallback
return ResponseError(resp)
|
Return a ResponseError subclass based on the API payload.
All errors are documented: https://docs.mollie.com/guides/handling-errors#all-possible-status-codes
More exceptions should be added here when appropriate, and when useful examples of API errors are available.
|
entailment
|
def delete(self, order_id, data=None):
"""Cancel order and return the order object.
Deleting an order causes the order status to change to canceled.
The updated order object is returned.
"""
if not order_id or not order_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid order ID: '{id}'. An order ID should start with '{prefix}'.".format(
id=order_id, prefix=self.RESOURCE_ID_PREFIX)
)
result = super(Orders, self).delete(order_id, data)
return self.get_resource_object(result)
|
Cancel order and return the order object.
Deleting an order causes the order status to change to canceled.
The updated order object is returned.
|
entailment
|
def customer(self):
"""Return the customer for this subscription."""
url = self._get_link('customer')
if url:
resp = self.client.customers.perform_api_call(self.client.customers.REST_READ, url)
return Customer(resp)
|
Return the customer for this subscription.
|
entailment
|
def payments(self):
"""Return a list of payments for this subscription."""
payments = self.client.subscription_payments.on(self).list()
return payments
|
Return a list of payments for this subscription.
|
entailment
|
def list_groups(self, filtr, url_prefix, auth, session, send_opts):
"""Get the groups the logged in user is a member of.
Optionally filter by 'member' or 'maintainer'.
Args:
filtr (string|None): ['member'|'maintainer'] or defaults to None.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of group names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_request(
'GET', 'application/json', url_prefix, auth)
if filtr is not None:
if not filtr == 'member' and not filtr == 'maintainer':
raise RuntimeError(
'filtr must be either "member", "maintainer", or None.')
req.params = {'filter': filtr}
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resp_json = resp.json()
return resp_json['groups']
msg = ('List groups failed, got HTTP response: ({}) - {}'.format(
resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp)
|
Get the groups the logged in user is a member of.
Optionally filter by 'member' or 'maintainer'.
Args:
filtr (string|None): ['member'|'maintainer'] or defaults to None.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of group names.
Raises:
requests.HTTPError on failure.
|
entailment
|
def delete_group(self, name, url_prefix, auth, session, send_opts):
"""Delete given group.
Args:
name (string): Name of group.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_request(
'DELETE', 'application/json', url_prefix, auth, name)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
return
msg = ('Delete failed for group {}, got HTTP response: ({}) - {}'.format(
name, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp)
|
Delete given group.
Args:
name (string): Name of group.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
|
entailment
|
def list_group_members(self, name, url_prefix, auth, session, send_opts):
"""Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_members_request(
'GET', 'application/json', url_prefix, auth, name)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resp_json = resp.json()
return resp_json['members']
msg = ('Failed getting members of group {}, got HTTP response: ({}) - {}'.format(
name, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp)
|
Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure.
|
entailment
|
def list_permissions(self, group_name=None, resource=None,
url_prefix=None, auth=None, session=None, send_opts=None):
"""List the permission sets for the logged in user
Optionally filter by resource or group.
Args:
group_name (string): Name of group to filter on
resource (intern.resource.boss.BossResource): Identifies which data model object to filter on
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[dict]): List of dictionaries of permission sets
"""
filter_params = {}
if group_name:
filter_params["group"] = group_name
if resource:
filter_params.update(resource.get_dict_route())
req = self.get_permission_request('GET', 'application/json',
url_prefix, auth, query_params=filter_params)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 200:
msg = "Failed to get permission sets. "
if group_name:
msg = "{} Group: {}".format(msg, group_name)
if resource:
msg = "{} Resource: {}".format(msg, resource.name)
msg = '{}, got HTTP response: ({}) - {}'.format(msg, resp.status_code, resp.text)
raise HTTPError(msg, request=req, response=resp)
else:
return resp.json()["permission-sets"]
|
List the permission sets for the logged in user
Optionally filter by resource or group.
Args:
group_name (string): Name of group to filter on
resource (intern.resource.boss.BossResource): Identifies which data model object to filter on
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[dict]): List of dictionaries of permission sets
|
entailment
|
def add_permissions(self, group_name, resource, permissions, url_prefix, auth, session, send_opts):
"""
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
"""
post_data = {"group": group_name,
"permissions": permissions,
}
post_data.update(resource.get_dict_route())
req = self.get_permission_request('POST', 'application/json',
url_prefix, auth, post_data=post_data)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 201:
msg = ('Failed adding permissions to group {}, got HTTP response: ({}) - {}'.format(group_name,
resp.status_code,
resp.text))
raise HTTPError(msg, request=req, response=resp)
|
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
|
entailment
|
def delete_permissions(self, grp_name, resource, url_prefix, auth, session, send_opts):
"""
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
filter_params = {"group": grp_name}
filter_params.update(resource.get_dict_route())
req = self.get_permission_request('DELETE', 'application/json',
url_prefix, auth, query_params=filter_params)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
return
msg = ('Failed deleting permissions to group {}, got HTTP response: ({}) - {}'.format(
grp_name, resp.status_code, resp.text))
raise HTTPError(msg, request=req, response=resp)
|
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
|
entailment
|
def get_user_roles(self, user, url_prefix, auth, session, send_opts):
"""Get roles associated with the given user.
Args:
user (string): User name.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure.
"""
req = self.get_user_role_request(
'GET', 'application/json', url_prefix, auth,
user)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
return resp.json()
msg = (
'Failed getting roles for user: {}, got HTTP response: ({}) - {}'
.format(user, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp)
|
Get roles associated with the given user.
Args:
user (string): User name.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list): List of roles that user has.
Raises:
requests.HTTPError on failure.
|
entailment
|
def add_user(
self, user, first_name, last_name, email, password,
url_prefix, auth, session, send_opts):
"""Add a new user.
Args:
user (string): User name.
first_name (string): User's first name.
last_name (string): User's last name.
email: (string): User's email address.
password: (string): User's password.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
req = self.get_user_request(
'POST', 'application/json', url_prefix, auth,
user, first_name, last_name, email, password)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 201:
return
msg = (
'Failed adding user: {}, got HTTP response: ({}) - {}'
.format(user, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp)
|
Add a new user.
Args:
user (string): User name.
first_name (string): User's first name.
last_name (string): User's last name.
email: (string): User's email address.
password: (string): User's password.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
|
entailment
|
def list(self, resource, url_prefix, auth, session, send_opts):
"""List all resources of the same type as the given resource.
Args:
resource (intern.resource.boss.BossResource): List resources of the same type as this..
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list): List of resources. Each resource is a dictionary.
Raises:
requests.HTTPError on failure.
"""
req = self.get_request(
resource, 'GET', 'application/json', url_prefix, auth,
proj_list_req=True)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
return self._get_resource_list(resp.json())
err = ('List failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp)
|
List all resources of the same type as the given resource.
Args:
resource (intern.resource.boss.BossResource): List resources of the same type as this..
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list): List of resources. Each resource is a dictionary.
Raises:
requests.HTTPError on failure.
|
entailment
|
def update(self, resource_name, resource, url_prefix, auth, session, send_opts):
"""Updates an entity in the data model using the given resource.
Args:
resource_name (string): Current name of the resource (in case the resource is getting its name changed).
resource (intern.resource.boss.BossResource): New attributes for the resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(intern.resource.boss.BossResource): Returns updated resource of given type on success.
Raises:
requests.HTTPError on failure.
"""
# Create a copy of the resource and change its name to resource_name
# in case the update includes changing the name of a resource.
old_resource = copy.deepcopy(resource)
old_resource.name = resource_name
json = self._get_resource_params(resource, for_update=True)
req = self.get_request(old_resource, 'PUT', 'application/json', url_prefix, auth, json=json)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
return self._create_resource_from_dict(resource, resp.json())
err = ('Update failed on {}, got HTTP response: ({}) - {}'.format(
old_resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp)
|
Updates an entity in the data model using the given resource.
Args:
resource_name (string): Current name of the resource (in case the resource is getting its name changed).
resource (intern.resource.boss.BossResource): New attributes for the resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(intern.resource.boss.BossResource): Returns updated resource of given type on success.
Raises:
requests.HTTPError on failure.
|
entailment
|
def delete(self, resource, url_prefix, auth, session, send_opts):
"""Deletes the entity described by the given resource.
Args:
resource (intern.resource.boss.BossResource)
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
req = self.get_request(
resource, 'DELETE', 'application/json', url_prefix, auth)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
return
err = ('Delete failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp)
|
Deletes the entity described by the given resource.
Args:
resource (intern.resource.boss.BossResource)
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
|
entailment
|
def _get_resource_params(self, resource, for_update=False):
"""Get dictionary containing all parameters for the given resource.
When getting params for a coordinate frame update, only name and
description are returned because they are the only fields that can
be updated.
Args:
resource (intern.resource.boss.resource.BossResource): A sub-class
whose parameters will be extracted into a dictionary.
for_update (bool): True if params will be used for an update.
Returns:
(dictionary): A dictionary containing the resource's parameters as
required by the Boss API.
Raises:
TypeError if resource is not a supported class.
"""
if isinstance(resource, CollectionResource):
return self._get_collection_params(resource)
if isinstance(resource, ExperimentResource):
return self._get_experiment_params(resource, for_update)
if isinstance(resource, CoordinateFrameResource):
return self._get_coordinate_params(resource, for_update)
if isinstance(resource, ChannelResource):
return self._get_channel_params(resource, for_update)
raise TypeError('resource is not supported type.')
|
Get dictionary containing all parameters for the given resource.
When getting params for a coordinate frame update, only name and
description are returned because they are the only fields that can
be updated.
Args:
resource (intern.resource.boss.resource.BossResource): A sub-class
whose parameters will be extracted into a dictionary.
for_update (bool): True if params will be used for an update.
Returns:
(dictionary): A dictionary containing the resource's parameters as
required by the Boss API.
Raises:
TypeError if resource is not a supported class.
|
entailment
|
def _create_resource_from_dict(self, resource, dict):
"""
Args:
resource (intern.resource.boss.BossResource): Used to determine type of resource to create.
dict (dictionary): JSON data returned by the Boss API.
Returns:
(intern.resource.boss.BossResource): Instance populated with values from dict.
Raises:
KeyError if dict missing required key.
TypeError if resource is not a supported class.
"""
if isinstance(resource, CollectionResource):
return self._get_collection(dict)
if isinstance(resource, ExperimentResource):
return self._get_experiment(dict, resource.coll_name)
if isinstance(resource, CoordinateFrameResource):
return self._get_coordinate(dict)
if isinstance(resource, ChannelResource):
return self._get_channel(dict, resource.coll_name, resource.exp_name)
raise TypeError('resource is not supported type.')
|
Args:
resource (intern.resource.boss.BossResource): Used to determine type of resource to create.
dict (dictionary): JSON data returned by the Boss API.
Returns:
(intern.resource.boss.BossResource): Instance populated with values from dict.
Raises:
KeyError if dict missing required key.
TypeError if resource is not a supported class.
|
entailment
|
def _get_resource_list(self, rsrc_dict):
"""Extracts list of resources from the HTTP response.
Args:
rsrc_dict (dict): HTTP response encoded in a dictionary.
Returns:
(list[string]): List of a type of resource (collections, experiments, etc).
Raises:
(RuntimeError): If rsrc_dict does not contain any known resources.
"""
if 'collections' in rsrc_dict:
return rsrc_dict['collections']
if 'experiments' in rsrc_dict:
return rsrc_dict['experiments']
if 'channels' in rsrc_dict:
return rsrc_dict['channels']
if 'coords' in rsrc_dict:
return rsrc_dict['coords']
raise RuntimeError('Invalid list response received from Boss. No known resource type returned.')
|
Extracts list of resources from the HTTP response.
Args:
rsrc_dict (dict): HTTP response encoded in a dictionary.
Returns:
(list[string]): List of a type of resource (collections, experiments, etc).
Raises:
(RuntimeError): If rsrc_dict does not contain any known resources.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.