partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
D.filter_trim
|
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
|
latools/D_obj.py
|
def filter_trim(self, start=1, end=1, filt=True):
"""
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
"""
params = locals()
del(params['self'])
f = self.filt.grab_filt(filt)
nf = filters.trim(f, start, end)
self.filt.add('trimmed_filter',
nf,
'Trimmed Filter ({:.0f} start, {:.0f} end)'.format(start, end),
params, setn=self.filt.maxset + 1)
|
def filter_trim(self, start=1, end=1, filt=True):
"""
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
"""
params = locals()
del(params['self'])
f = self.filt.grab_filt(filt)
nf = filters.trim(f, start, end)
self.filt.add('trimmed_filter',
nf,
'Trimmed Filter ({:.0f} start, {:.0f} end)'.format(start, end),
params, setn=self.filt.maxset + 1)
|
[
"Remove",
"points",
"from",
"the",
"start",
"and",
"end",
"of",
"filter",
"regions",
".",
"Parameters",
"----------",
"start",
"end",
":",
"int",
"The",
"number",
"of",
"points",
"to",
"remove",
"from",
"the",
"start",
"and",
"end",
"of",
"the",
"specified",
"filter",
".",
"filt",
":",
"valid",
"filter",
"string",
"or",
"bool",
"Which",
"filter",
"to",
"trim",
".",
"If",
"True",
"applies",
"to",
"currently",
"active",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1099-L1121
|
[
"def",
"filter_trim",
"(",
"self",
",",
"start",
"=",
"1",
",",
"end",
"=",
"1",
",",
"filt",
"=",
"True",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"f",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"nf",
"=",
"filters",
".",
"trim",
"(",
"f",
",",
"start",
",",
"end",
")",
"self",
".",
"filt",
".",
"add",
"(",
"'trimmed_filter'",
",",
"nf",
",",
"'Trimmed Filter ({:.0f} start, {:.0f} end)'",
".",
"format",
"(",
"start",
",",
"end",
")",
",",
"params",
",",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_exclude_downhole
|
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
|
latools/D_obj.py
|
def filter_exclude_downhole(self, threshold, filt=True):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
f = self.filt.grab_filt(filt)
if self.n == 1:
nfilt = filters.exclude_downhole(f, threshold)
else:
nfilt = []
for i in range(self.n):
nf = self.ns == i + 1
nfilt.append(filters.exclude_downhole(f & nf, threshold))
nfilt = np.apply_along_axis(any, 0, nfilt)
self.filt.add(name='downhole_excl_{:.0f}'.format(threshold),
filt=nfilt,
info='Exclude data downhole of {:.0f} consecutive filtered points.'.format(threshold),
params=(threshold, filt))
|
def filter_exclude_downhole(self, threshold, filt=True):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
f = self.filt.grab_filt(filt)
if self.n == 1:
nfilt = filters.exclude_downhole(f, threshold)
else:
nfilt = []
for i in range(self.n):
nf = self.ns == i + 1
nfilt.append(filters.exclude_downhole(f & nf, threshold))
nfilt = np.apply_along_axis(any, 0, nfilt)
self.filt.add(name='downhole_excl_{:.0f}'.format(threshold),
filt=nfilt,
info='Exclude data downhole of {:.0f} consecutive filtered points.'.format(threshold),
params=(threshold, filt))
|
[
"Exclude",
"all",
"points",
"down",
"-",
"hole",
"(",
"after",
")",
"the",
"first",
"excluded",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1124-L1152
|
[
"def",
"filter_exclude_downhole",
"(",
"self",
",",
"threshold",
",",
"filt",
"=",
"True",
")",
":",
"f",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"if",
"self",
".",
"n",
"==",
"1",
":",
"nfilt",
"=",
"filters",
".",
"exclude_downhole",
"(",
"f",
",",
"threshold",
")",
"else",
":",
"nfilt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n",
")",
":",
"nf",
"=",
"self",
".",
"ns",
"==",
"i",
"+",
"1",
"nfilt",
".",
"append",
"(",
"filters",
".",
"exclude_downhole",
"(",
"f",
"&",
"nf",
",",
"threshold",
")",
")",
"nfilt",
"=",
"np",
".",
"apply_along_axis",
"(",
"any",
",",
"0",
",",
"nfilt",
")",
"self",
".",
"filt",
".",
"add",
"(",
"name",
"=",
"'downhole_excl_{:.0f}'",
".",
"format",
"(",
"threshold",
")",
",",
"filt",
"=",
"nfilt",
",",
"info",
"=",
"'Exclude data downhole of {:.0f} consecutive filtered points.'",
".",
"format",
"(",
"threshold",
")",
",",
"params",
"=",
"(",
"threshold",
",",
"filt",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.signal_optimiser
|
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
|
latools/D_obj.py
|
def signal_optimiser(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, filt=True, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
params = locals()
del(params['self'])
setn = self.filt.maxset + 1
if isinstance(analytes, str):
analytes = [analytes]
# get filter
if filt is not False:
ind = (self.filt.grab_filt(filt, analytes))
else:
ind = np.full(self.Time.shape, True)
errmsg = []
ofilt = []
self.opt = {}
for i in range(self.n):
nind = ind & (self.ns == i + 1)
self.opt[i + 1], err = signal_optimiser(self, analytes=analytes,
min_points=min_points,
threshold_mode=threshold_mode,
threshold_mult=threshold_mult,
weights=weights,
ind=nind, x_bias=x_bias,
mode=mode)
if err == '':
ofilt.append(self.opt[i + 1].filt)
else:
errmsg.append(self.sample + '_{:.0f}: '.format(i + 1) + err)
if len(ofilt) > 0:
ofilt = np.apply_along_axis(any, 0, ofilt)
name = 'optimise_' + '_'.join(analytes)
self.filt.add(name=name,
filt=ofilt,
info="Optimisation filter to minimise " + ', '.join(analytes),
params=params, setn=setn)
if len(errmsg) > 0:
return '\n'.join(errmsg)
else:
return ''
|
def signal_optimiser(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, filt=True, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
params = locals()
del(params['self'])
setn = self.filt.maxset + 1
if isinstance(analytes, str):
analytes = [analytes]
# get filter
if filt is not False:
ind = (self.filt.grab_filt(filt, analytes))
else:
ind = np.full(self.Time.shape, True)
errmsg = []
ofilt = []
self.opt = {}
for i in range(self.n):
nind = ind & (self.ns == i + 1)
self.opt[i + 1], err = signal_optimiser(self, analytes=analytes,
min_points=min_points,
threshold_mode=threshold_mode,
threshold_mult=threshold_mult,
weights=weights,
ind=nind, x_bias=x_bias,
mode=mode)
if err == '':
ofilt.append(self.opt[i + 1].filt)
else:
errmsg.append(self.sample + '_{:.0f}: '.format(i + 1) + err)
if len(ofilt) > 0:
ofilt = np.apply_along_axis(any, 0, ofilt)
name = 'optimise_' + '_'.join(analytes)
self.filt.add(name=name,
filt=ofilt,
info="Optimisation filter to minimise " + ', '.join(analytes),
params=params, setn=setn)
if len(errmsg) > 0:
return '\n'.join(errmsg)
else:
return ''
|
[
"Optimise",
"data",
"selection",
"based",
"on",
"specified",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1156-L1252
|
[
"def",
"signal_optimiser",
"(",
"self",
",",
"analytes",
",",
"min_points",
"=",
"5",
",",
"threshold_mode",
"=",
"'kde_first_max'",
",",
"threshold_mult",
"=",
"1.",
",",
"x_bias",
"=",
"0",
",",
"weights",
"=",
"None",
",",
"filt",
"=",
"True",
",",
"mode",
"=",
"'minimise'",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"# get filter",
"if",
"filt",
"is",
"not",
"False",
":",
"ind",
"=",
"(",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"analytes",
")",
")",
"else",
":",
"ind",
"=",
"np",
".",
"full",
"(",
"self",
".",
"Time",
".",
"shape",
",",
"True",
")",
"errmsg",
"=",
"[",
"]",
"ofilt",
"=",
"[",
"]",
"self",
".",
"opt",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n",
")",
":",
"nind",
"=",
"ind",
"&",
"(",
"self",
".",
"ns",
"==",
"i",
"+",
"1",
")",
"self",
".",
"opt",
"[",
"i",
"+",
"1",
"]",
",",
"err",
"=",
"signal_optimiser",
"(",
"self",
",",
"analytes",
"=",
"analytes",
",",
"min_points",
"=",
"min_points",
",",
"threshold_mode",
"=",
"threshold_mode",
",",
"threshold_mult",
"=",
"threshold_mult",
",",
"weights",
"=",
"weights",
",",
"ind",
"=",
"nind",
",",
"x_bias",
"=",
"x_bias",
",",
"mode",
"=",
"mode",
")",
"if",
"err",
"==",
"''",
":",
"ofilt",
".",
"append",
"(",
"self",
".",
"opt",
"[",
"i",
"+",
"1",
"]",
".",
"filt",
")",
"else",
":",
"errmsg",
".",
"append",
"(",
"self",
".",
"sample",
"+",
"'_{:.0f}: '",
".",
"format",
"(",
"i",
"+",
"1",
")",
"+",
"err",
")",
"if",
"len",
"(",
"ofilt",
")",
">",
"0",
":",
"ofilt",
"=",
"np",
".",
"apply_along_axis",
"(",
"any",
",",
"0",
",",
"ofilt",
")",
"name",
"=",
"'optimise_'",
"+",
"'_'",
".",
"join",
"(",
"analytes",
")",
"self",
".",
"filt",
".",
"add",
"(",
"name",
"=",
"name",
",",
"filt",
"=",
"ofilt",
",",
"info",
"=",
"\"Optimisation filter to minimise \"",
"+",
"', '",
".",
"join",
"(",
"analytes",
")",
",",
"params",
"=",
"params",
",",
"setn",
"=",
"setn",
")",
"if",
"len",
"(",
"errmsg",
")",
">",
"0",
":",
"return",
"'\\n'",
".",
"join",
"(",
"errmsg",
")",
"else",
":",
"return",
"''"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.tplot
|
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
|
latools/D_obj.py
|
def tplot(self, analytes=None, figsize=[10, 4], scale='log', filt=None,
ranges=False, stats=False, stat='nanmean', err='nanstd',
focus_stage=None, err_envelope=False, ax=None):
"""
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
"""
return plot.tplot(self=self, analytes=analytes, figsize=figsize, scale=scale, filt=filt,
ranges=ranges, stats=stats, stat=stat, err=err,
focus_stage=focus_stage, err_envelope=err_envelope, ax=ax)
|
def tplot(self, analytes=None, figsize=[10, 4], scale='log', filt=None,
ranges=False, stats=False, stat='nanmean', err='nanstd',
focus_stage=None, err_envelope=False, ax=None):
"""
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
"""
return plot.tplot(self=self, analytes=analytes, figsize=figsize, scale=scale, filt=filt,
ranges=ranges, stats=stats, stat=stat, err=err,
focus_stage=focus_stage, err_envelope=err_envelope, ax=ax)
|
[
"Plot",
"analytes",
"as",
"a",
"function",
"of",
"Time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1274-L1312
|
[
"def",
"tplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"scale",
"=",
"'log'",
",",
"filt",
"=",
"None",
",",
"ranges",
"=",
"False",
",",
"stats",
"=",
"False",
",",
"stat",
"=",
"'nanmean'",
",",
"err",
"=",
"'nanstd'",
",",
"focus_stage",
"=",
"None",
",",
"err_envelope",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"return",
"plot",
".",
"tplot",
"(",
"self",
"=",
"self",
",",
"analytes",
"=",
"analytes",
",",
"figsize",
"=",
"figsize",
",",
"scale",
"=",
"scale",
",",
"filt",
"=",
"filt",
",",
"ranges",
"=",
"ranges",
",",
"stats",
"=",
"stats",
",",
"stat",
"=",
"stat",
",",
"err",
"=",
"err",
",",
"focus_stage",
"=",
"focus_stage",
",",
"err_envelope",
"=",
"err_envelope",
",",
"ax",
"=",
"ax",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.gplot
|
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
|
latools/D_obj.py
|
def gplot(self, analytes=None, win=5, figsize=[10, 4],
ranges=False, focus_stage=None, ax=None):
"""
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
"""
return plot.gplot(self=self, analytes=analytes, win=win, figsize=figsize,
ranges=ranges, focus_stage=focus_stage, ax=ax)
|
def gplot(self, analytes=None, win=5, figsize=[10, 4],
ranges=False, focus_stage=None, ax=None):
"""
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
"""
return plot.gplot(self=self, analytes=analytes, win=win, figsize=figsize,
ranges=ranges, focus_stage=focus_stage, ax=ax)
|
[
"Plot",
"analytes",
"gradients",
"as",
"a",
"function",
"of",
"Time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1315-L1338
|
[
"def",
"gplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"5",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"ranges",
"=",
"False",
",",
"focus_stage",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"return",
"plot",
".",
"gplot",
"(",
"self",
"=",
"self",
",",
"analytes",
"=",
"analytes",
",",
"win",
"=",
"win",
",",
"figsize",
"=",
"figsize",
",",
"ranges",
"=",
"ranges",
",",
"focus_stage",
"=",
"focus_stage",
",",
"ax",
"=",
"ax",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.crossplot
|
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
Returns
-------
(fig, axes)
|
latools/D_obj.py
|
def crossplot(self, analytes=None, bins=25, lognorm=True, filt=True, colourful=True, figsize=(12, 12)):
"""
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if figsize[0] < 1.5 * len(analytes):
figsize = [1.5 * len(analytes)] * 2
numvars = len(analytes)
fig, axes = plt.subplots(nrows=numvars, ncols=numvars,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# set up colour scales
if colourful:
cmlist = ['Blues', 'BuGn', 'BuPu', 'GnBu',
'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
else:
cmlist = ['Greys']
while len(cmlist) < len(analytes):
cmlist *= 2
udict = {}
for i, j in zip(*np.triu_indices_from(axes, k=1)):
for x, y in [(i, j), (j, i)]:
# set unit multipliers
mx, ux = unitpicker(np.nanmean(self.focus[analytes[x]]),
denominator=self.internal_standard,
focus_stage=self.focus_stage)
my, uy = unitpicker(np.nanmean(self.focus[analytes[y]]),
denominator=self.internal_standard,
focus_stage=self.focus_stage)
udict[analytes[x]] = (x, ux)
# get filter
xd = nominal_values(self.focus[analytes[x]])
yd = nominal_values(self.focus[analytes[y]])
ind = (self.filt.grab_filt(filt, analytes[x]) &
self.filt.grab_filt(filt, analytes[y]) &
~np.isnan(xd) &
~np.isnan(yd))
# make plot
pi = xd[ind] * mx
pj = yd[ind] * my
# determine normalisation shceme
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
# draw plots
axes[i, j].hist2d(pj, pi, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[i]))
axes[j, i].hist2d(pi, pj, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[j]))
axes[x, y].set_ylim([pi.min(), pi.max()])
axes[x, y].set_xlim([pj.min(), pj.max()])
# diagonal labels
for a, (i, u) in udict.items():
axes[i, i].annotate(a + '\n' + u, (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center')
# switch on alternating axes
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
axes[0, 0].set_title(self.sample, weight='bold', x=0.05, ha='left')
return fig, axes
|
def crossplot(self, analytes=None, bins=25, lognorm=True, filt=True, colourful=True, figsize=(12, 12)):
"""
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if figsize[0] < 1.5 * len(analytes):
figsize = [1.5 * len(analytes)] * 2
numvars = len(analytes)
fig, axes = plt.subplots(nrows=numvars, ncols=numvars,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# set up colour scales
if colourful:
cmlist = ['Blues', 'BuGn', 'BuPu', 'GnBu',
'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
else:
cmlist = ['Greys']
while len(cmlist) < len(analytes):
cmlist *= 2
udict = {}
for i, j in zip(*np.triu_indices_from(axes, k=1)):
for x, y in [(i, j), (j, i)]:
# set unit multipliers
mx, ux = unitpicker(np.nanmean(self.focus[analytes[x]]),
denominator=self.internal_standard,
focus_stage=self.focus_stage)
my, uy = unitpicker(np.nanmean(self.focus[analytes[y]]),
denominator=self.internal_standard,
focus_stage=self.focus_stage)
udict[analytes[x]] = (x, ux)
# get filter
xd = nominal_values(self.focus[analytes[x]])
yd = nominal_values(self.focus[analytes[y]])
ind = (self.filt.grab_filt(filt, analytes[x]) &
self.filt.grab_filt(filt, analytes[y]) &
~np.isnan(xd) &
~np.isnan(yd))
# make plot
pi = xd[ind] * mx
pj = yd[ind] * my
# determine normalisation shceme
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
# draw plots
axes[i, j].hist2d(pj, pi, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[i]))
axes[j, i].hist2d(pi, pj, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[j]))
axes[x, y].set_ylim([pi.min(), pi.max()])
axes[x, y].set_xlim([pj.min(), pj.max()])
# diagonal labels
for a, (i, u) in udict.items():
axes[i, i].annotate(a + '\n' + u, (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center')
# switch on alternating axes
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
axes[0, 0].set_title(self.sample, weight='bold', x=0.05, ha='left')
return fig, axes
|
[
"Plot",
"analytes",
"against",
"each",
"other",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1392-L1507
|
[
"def",
"crossplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"bins",
"=",
"25",
",",
"lognorm",
"=",
"True",
",",
"filt",
"=",
"True",
",",
"colourful",
"=",
"True",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"self",
".",
"focus_stage",
"in",
"[",
"'ratio'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"if",
"figsize",
"[",
"0",
"]",
"<",
"1.5",
"*",
"len",
"(",
"analytes",
")",
":",
"figsize",
"=",
"[",
"1.5",
"*",
"len",
"(",
"analytes",
")",
"]",
"*",
"2",
"numvars",
"=",
"len",
"(",
"analytes",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"numvars",
",",
"ncols",
"=",
"numvars",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0.05",
",",
"wspace",
"=",
"0.05",
")",
"for",
"ax",
"in",
"axes",
".",
"flat",
":",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"if",
"ax",
".",
"is_last_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'right'",
")",
"if",
"ax",
".",
"is_first_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'top'",
")",
"if",
"ax",
".",
"is_last_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"# set up colour scales",
"if",
"colourful",
":",
"cmlist",
"=",
"[",
"'Blues'",
",",
"'BuGn'",
",",
"'BuPu'",
",",
"'GnBu'",
",",
"'Greens'",
",",
"'Greys'",
",",
"'Oranges'",
",",
"'OrRd'",
",",
"'PuBu'",
",",
"'PuBuGn'",
",",
"'PuRd'",
",",
"'Purples'",
",",
"'RdPu'",
",",
"'Reds'",
",",
"'YlGn'",
",",
"'YlGnBu'",
",",
"'YlOrBr'",
",",
"'YlOrRd'",
"]",
"else",
":",
"cmlist",
"=",
"[",
"'Greys'",
"]",
"while",
"len",
"(",
"cmlist",
")",
"<",
"len",
"(",
"analytes",
")",
":",
"cmlist",
"*=",
"2",
"udict",
"=",
"{",
"}",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"*",
"np",
".",
"triu_indices_from",
"(",
"axes",
",",
"k",
"=",
"1",
")",
")",
":",
"for",
"x",
",",
"y",
"in",
"[",
"(",
"i",
",",
"j",
")",
",",
"(",
"j",
",",
"i",
")",
"]",
":",
"# set unit multipliers",
"mx",
",",
"ux",
"=",
"unitpicker",
"(",
"np",
".",
"nanmean",
"(",
"self",
".",
"focus",
"[",
"analytes",
"[",
"x",
"]",
"]",
")",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
")",
"my",
",",
"uy",
"=",
"unitpicker",
"(",
"np",
".",
"nanmean",
"(",
"self",
".",
"focus",
"[",
"analytes",
"[",
"y",
"]",
"]",
")",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
")",
"udict",
"[",
"analytes",
"[",
"x",
"]",
"]",
"=",
"(",
"x",
",",
"ux",
")",
"# get filter",
"xd",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"analytes",
"[",
"x",
"]",
"]",
")",
"yd",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"analytes",
"[",
"y",
"]",
"]",
")",
"ind",
"=",
"(",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"analytes",
"[",
"x",
"]",
")",
"&",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"analytes",
"[",
"y",
"]",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"xd",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"yd",
")",
")",
"# make plot",
"pi",
"=",
"xd",
"[",
"ind",
"]",
"*",
"mx",
"pj",
"=",
"yd",
"[",
"ind",
"]",
"*",
"my",
"# determine normalisation shceme",
"if",
"lognorm",
":",
"norm",
"=",
"mpl",
".",
"colors",
".",
"LogNorm",
"(",
")",
"else",
":",
"norm",
"=",
"None",
"# draw plots",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"hist2d",
"(",
"pj",
",",
"pi",
",",
"bins",
",",
"norm",
"=",
"norm",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmlist",
"[",
"i",
"]",
")",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"hist2d",
"(",
"pi",
",",
"pj",
",",
"bins",
",",
"norm",
"=",
"norm",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmlist",
"[",
"j",
"]",
")",
")",
"axes",
"[",
"x",
",",
"y",
"]",
".",
"set_ylim",
"(",
"[",
"pi",
".",
"min",
"(",
")",
",",
"pi",
".",
"max",
"(",
")",
"]",
")",
"axes",
"[",
"x",
",",
"y",
"]",
".",
"set_xlim",
"(",
"[",
"pj",
".",
"min",
"(",
")",
",",
"pj",
".",
"max",
"(",
")",
"]",
")",
"# diagonal labels",
"for",
"a",
",",
"(",
"i",
",",
"u",
")",
"in",
"udict",
".",
"items",
"(",
")",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"annotate",
"(",
"a",
"+",
"'\\n'",
"+",
"u",
",",
"(",
"0.5",
",",
"0.5",
")",
",",
"xycoords",
"=",
"'axes fraction'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
")",
"# switch on alternating axes",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"range",
"(",
"numvars",
")",
",",
"itertools",
".",
"cycle",
"(",
"(",
"-",
"1",
",",
"0",
")",
")",
")",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"xaxis",
".",
"set_visible",
"(",
"True",
")",
"for",
"label",
"in",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"get_xticklabels",
"(",
")",
":",
"label",
".",
"set_rotation",
"(",
"90",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"yaxis",
".",
"set_visible",
"(",
"True",
")",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"set_title",
"(",
"self",
".",
"sample",
",",
"weight",
"=",
"'bold'",
",",
"x",
"=",
"0.05",
",",
"ha",
"=",
"'left'",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.crossplot_filters
|
Plot the results of a group of filters in a crossplot.
Parameters
----------
filter_string : str
A string that identifies a group of filters.
e.g. 'test' would plot all filters with 'test' in the
name.
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
Returns
-------
fig, axes objects
|
latools/D_obj.py
|
def crossplot_filters(self, filter_string, analytes=None):
"""
Plot the results of a group of filters in a crossplot.
Parameters
----------
filter_string : str
A string that identifies a group of filters.
e.g. 'test' would plot all filters with 'test' in the
name.
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
Returns
-------
fig, axes objects
"""
if analytes is None:
analytes = [a for a in self.analytes if 'Ca' not in a]
# isolate relevant filters
filts = self.filt.components.keys()
cfilts = [f for f in filts if filter_string in f]
flab = re.compile('.*_(.*)$') # regex to get cluster number
# set up axes
numvars = len(analytes)
fig, axes = plt.subplots(nrows=numvars, ncols=numvars,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# isolate nominal_values for all analytes
focus = {k: nominal_values(v) for k, v in self.focus.items()}
# determine units for all analytes
udict = {a: unitpicker(np.nanmean(focus[a]),
denominator=self.internal_standard,
focus_stage=self.focus_stage) for a in analytes}
# determine ranges for all analytes
rdict = {a: (np.nanmin(focus[a] * udict[a][0]),
np.nanmax(focus[a] * udict[a][0])) for a in analytes}
for f in cfilts:
ind = self.filt.grab_filt(f)
focus = {k: nominal_values(v[ind]) for k, v in self.focus.items()}
lab = flab.match(f).groups()[0]
axes[0, 0].scatter([], [], s=10, label=lab)
for i, j in zip(*np.triu_indices_from(axes, k=1)):
# get analytes
ai = analytes[i]
aj = analytes[j]
# remove nan, apply multipliers
pi = focus[ai][~np.isnan(focus[ai])] * udict[ai][0]
pj = focus[aj][~np.isnan(focus[aj])] * udict[aj][0]
# make plot
axes[i, j].scatter(pj, pi, alpha=0.4, s=10, lw=0)
axes[j, i].scatter(pi, pj, alpha=0.4, s=10, lw=0)
axes[i, j].set_ylim(*rdict[ai])
axes[i, j].set_xlim(*rdict[aj])
axes[j, i].set_ylim(*rdict[aj])
axes[j, i].set_xlim(*rdict[ai])
# diagonal labels
for a, n in zip(analytes, np.arange(len(analytes))):
axes[n, n].annotate(a + '\n' + udict[a][1], (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center')
axes[n, n].set_xlim(*rdict[a])
axes[n, n].set_ylim(*rdict[a])
axes[0, 0].legend(loc='upper left', title=filter_string, fontsize=8)
# switch on alternating axes
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
return fig, axes
|
def crossplot_filters(self, filter_string, analytes=None):
"""
Plot the results of a group of filters in a crossplot.
Parameters
----------
filter_string : str
A string that identifies a group of filters.
e.g. 'test' would plot all filters with 'test' in the
name.
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
Returns
-------
fig, axes objects
"""
if analytes is None:
analytes = [a for a in self.analytes if 'Ca' not in a]
# isolate relevant filters
filts = self.filt.components.keys()
cfilts = [f for f in filts if filter_string in f]
flab = re.compile('.*_(.*)$') # regex to get cluster number
# set up axes
numvars = len(analytes)
fig, axes = plt.subplots(nrows=numvars, ncols=numvars,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# isolate nominal_values for all analytes
focus = {k: nominal_values(v) for k, v in self.focus.items()}
# determine units for all analytes
udict = {a: unitpicker(np.nanmean(focus[a]),
denominator=self.internal_standard,
focus_stage=self.focus_stage) for a in analytes}
# determine ranges for all analytes
rdict = {a: (np.nanmin(focus[a] * udict[a][0]),
np.nanmax(focus[a] * udict[a][0])) for a in analytes}
for f in cfilts:
ind = self.filt.grab_filt(f)
focus = {k: nominal_values(v[ind]) for k, v in self.focus.items()}
lab = flab.match(f).groups()[0]
axes[0, 0].scatter([], [], s=10, label=lab)
for i, j in zip(*np.triu_indices_from(axes, k=1)):
# get analytes
ai = analytes[i]
aj = analytes[j]
# remove nan, apply multipliers
pi = focus[ai][~np.isnan(focus[ai])] * udict[ai][0]
pj = focus[aj][~np.isnan(focus[aj])] * udict[aj][0]
# make plot
axes[i, j].scatter(pj, pi, alpha=0.4, s=10, lw=0)
axes[j, i].scatter(pi, pj, alpha=0.4, s=10, lw=0)
axes[i, j].set_ylim(*rdict[ai])
axes[i, j].set_xlim(*rdict[aj])
axes[j, i].set_ylim(*rdict[aj])
axes[j, i].set_xlim(*rdict[ai])
# diagonal labels
for a, n in zip(analytes, np.arange(len(analytes))):
axes[n, n].annotate(a + '\n' + udict[a][1], (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center')
axes[n, n].set_xlim(*rdict[a])
axes[n, n].set_ylim(*rdict[a])
axes[0, 0].legend(loc='upper left', title=filter_string, fontsize=8)
# switch on alternating axes
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
return fig, axes
|
[
"Plot",
"the",
"results",
"of",
"a",
"group",
"of",
"filters",
"in",
"a",
"crossplot",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1509-L1606
|
[
"def",
"crossplot_filters",
"(",
"self",
",",
"filter_string",
",",
"analytes",
"=",
"None",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"analytes",
"if",
"'Ca'",
"not",
"in",
"a",
"]",
"# isolate relevant filters",
"filts",
"=",
"self",
".",
"filt",
".",
"components",
".",
"keys",
"(",
")",
"cfilts",
"=",
"[",
"f",
"for",
"f",
"in",
"filts",
"if",
"filter_string",
"in",
"f",
"]",
"flab",
"=",
"re",
".",
"compile",
"(",
"'.*_(.*)$'",
")",
"# regex to get cluster number",
"# set up axes",
"numvars",
"=",
"len",
"(",
"analytes",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"numvars",
",",
"ncols",
"=",
"numvars",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0.05",
",",
"wspace",
"=",
"0.05",
")",
"for",
"ax",
"in",
"axes",
".",
"flat",
":",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"if",
"ax",
".",
"is_last_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'right'",
")",
"if",
"ax",
".",
"is_first_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'top'",
")",
"if",
"ax",
".",
"is_last_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"# isolate nominal_values for all analytes",
"focus",
"=",
"{",
"k",
":",
"nominal_values",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"focus",
".",
"items",
"(",
")",
"}",
"# determine units for all analytes",
"udict",
"=",
"{",
"a",
":",
"unitpicker",
"(",
"np",
".",
"nanmean",
"(",
"focus",
"[",
"a",
"]",
")",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
")",
"for",
"a",
"in",
"analytes",
"}",
"# determine ranges for all analytes",
"rdict",
"=",
"{",
"a",
":",
"(",
"np",
".",
"nanmin",
"(",
"focus",
"[",
"a",
"]",
"*",
"udict",
"[",
"a",
"]",
"[",
"0",
"]",
")",
",",
"np",
".",
"nanmax",
"(",
"focus",
"[",
"a",
"]",
"*",
"udict",
"[",
"a",
"]",
"[",
"0",
"]",
")",
")",
"for",
"a",
"in",
"analytes",
"}",
"for",
"f",
"in",
"cfilts",
":",
"ind",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"f",
")",
"focus",
"=",
"{",
"k",
":",
"nominal_values",
"(",
"v",
"[",
"ind",
"]",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"focus",
".",
"items",
"(",
")",
"}",
"lab",
"=",
"flab",
".",
"match",
"(",
"f",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"s",
"=",
"10",
",",
"label",
"=",
"lab",
")",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"*",
"np",
".",
"triu_indices_from",
"(",
"axes",
",",
"k",
"=",
"1",
")",
")",
":",
"# get analytes",
"ai",
"=",
"analytes",
"[",
"i",
"]",
"aj",
"=",
"analytes",
"[",
"j",
"]",
"# remove nan, apply multipliers",
"pi",
"=",
"focus",
"[",
"ai",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"focus",
"[",
"ai",
"]",
")",
"]",
"*",
"udict",
"[",
"ai",
"]",
"[",
"0",
"]",
"pj",
"=",
"focus",
"[",
"aj",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"focus",
"[",
"aj",
"]",
")",
"]",
"*",
"udict",
"[",
"aj",
"]",
"[",
"0",
"]",
"# make plot",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"scatter",
"(",
"pj",
",",
"pi",
",",
"alpha",
"=",
"0.4",
",",
"s",
"=",
"10",
",",
"lw",
"=",
"0",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"scatter",
"(",
"pi",
",",
"pj",
",",
"alpha",
"=",
"0.4",
",",
"s",
"=",
"10",
",",
"lw",
"=",
"0",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"ai",
"]",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"aj",
"]",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"aj",
"]",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"ai",
"]",
")",
"# diagonal labels",
"for",
"a",
",",
"n",
"in",
"zip",
"(",
"analytes",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"analytes",
")",
")",
")",
":",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"annotate",
"(",
"a",
"+",
"'\\n'",
"+",
"udict",
"[",
"a",
"]",
"[",
"1",
"]",
",",
"(",
"0.5",
",",
"0.5",
")",
",",
"xycoords",
"=",
"'axes fraction'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
")",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"a",
"]",
")",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"a",
"]",
")",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"legend",
"(",
"loc",
"=",
"'upper left'",
",",
"title",
"=",
"filter_string",
",",
"fontsize",
"=",
"8",
")",
"# switch on alternating axes",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"range",
"(",
"numvars",
")",
",",
"itertools",
".",
"cycle",
"(",
"(",
"-",
"1",
",",
"0",
")",
")",
")",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"xaxis",
".",
"set_visible",
"(",
"True",
")",
"for",
"label",
"in",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"get_xticklabels",
"(",
")",
":",
"label",
".",
"set_rotation",
"(",
"90",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"yaxis",
".",
"set_visible",
"(",
"True",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_report
|
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
|
latools/D_obj.py
|
def filter_report(self, filt=None, analytes=None, savedir=None, nbin=5):
"""
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
"""
return plot.filter_report(self, filt, analytes, savedir, nbin)
|
def filter_report(self, filt=None, analytes=None, savedir=None, nbin=5):
"""
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
"""
return plot.filter_report(self, filt, analytes, savedir, nbin)
|
[
"Visualise",
"effect",
"of",
"data",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1615-L1635
|
[
"def",
"filter_report",
"(",
"self",
",",
"filt",
"=",
"None",
",",
"analytes",
"=",
"None",
",",
"savedir",
"=",
"None",
",",
"nbin",
"=",
"5",
")",
":",
"return",
"plot",
".",
"filter_report",
"(",
"self",
",",
"filt",
",",
"analytes",
",",
"savedir",
",",
"nbin",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.get_params
|
Returns paramters used to process data.
Returns
-------
dict
dict of analysis parameters
|
latools/D_obj.py
|
def get_params(self):
"""
Returns paramters used to process data.
Returns
-------
dict
dict of analysis parameters
"""
outputs = ['sample',
'ratio_params',
'despike_params',
'autorange_params',
'bkgcorrect_params']
out = {}
for o in outputs:
out[o] = getattr(self, o)
out['filter_params'] = self.filt.params
out['filter_sequence'] = self.filt.sequence
out['filter_used'] = self.filt.make_keydict()
return out
|
def get_params(self):
"""
Returns paramters used to process data.
Returns
-------
dict
dict of analysis parameters
"""
outputs = ['sample',
'ratio_params',
'despike_params',
'autorange_params',
'bkgcorrect_params']
out = {}
for o in outputs:
out[o] = getattr(self, o)
out['filter_params'] = self.filt.params
out['filter_sequence'] = self.filt.sequence
out['filter_used'] = self.filt.make_keydict()
return out
|
[
"Returns",
"paramters",
"used",
"to",
"process",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1638-L1661
|
[
"def",
"get_params",
"(",
"self",
")",
":",
"outputs",
"=",
"[",
"'sample'",
",",
"'ratio_params'",
",",
"'despike_params'",
",",
"'autorange_params'",
",",
"'bkgcorrect_params'",
"]",
"out",
"=",
"{",
"}",
"for",
"o",
"in",
"outputs",
":",
"out",
"[",
"o",
"]",
"=",
"getattr",
"(",
"self",
",",
"o",
")",
"out",
"[",
"'filter_params'",
"]",
"=",
"self",
".",
"filt",
".",
"params",
"out",
"[",
"'filter_sequence'",
"]",
"=",
"self",
".",
"filt",
".",
"sequence",
"out",
"[",
"'filter_used'",
"]",
"=",
"self",
".",
"filt",
".",
"make_keydict",
"(",
")",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
tplot
|
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
|
latools/helpers/plot.py
|
def tplot(self, analytes=None, figsize=[10, 4], scale='log', filt=None,
ranges=False, stats=False, stat='nanmean', err='nanstd',
focus_stage=None, err_envelope=False, ax=None):
"""
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
"""
if type(analytes) is str:
analytes = [analytes]
if analytes is None:
analytes = self.analytes
if focus_stage is None:
focus_stage = self.focus_stage
# exclude internal standard from analytes
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.1, .12, .77, .8])
ret = True
else:
fig = ax.figure
ret = False
for a in analytes:
x = self.Time
y, yerr = unpack_uncertainties(self.data[focus_stage][a])
if scale is 'log':
ax.set_yscale('log')
y[y == 0] = np.nan
if filt:
ind = self.filt.grab_filt(filt, a)
xf = x.copy()
yf = y.copy()
yerrf = yerr.copy()
if any(~ind):
xf[~ind] = np.nan
yf[~ind] = np.nan
yerrf[~ind] = np.nan
if any(~ind):
ax.plot(x, y, color=self.cmap[a], alpha=.2, lw=0.6)
ax.plot(xf, yf, color=self.cmap[a], label=a)
if err_envelope:
ax.fill_between(xf, yf - yerrf, yf + yerrf, color=self.cmap[a],
alpha=0.2, zorder=-1)
else:
ax.plot(x, y, color=self.cmap[a], label=a)
if err_envelope:
ax.fill_between(x, y - yerr, y + yerr, color=self.cmap[a],
alpha=0.2, zorder=-1)
# Plot averages and error envelopes
if stats and hasattr(self, 'stats'):
warnings.warn('\nStatistic plotting is broken.\nCheck progress here: https://github.com/oscarbranson/latools/issues/18')
pass
# sts = self.stats[sig][0].size
# if sts > 1:
# for n in np.arange(self.n):
# n_ind = ind & (self.ns == n + 1)
# if sum(n_ind) > 2:
# x = [self.Time[n_ind][0], self.Time[n_ind][-1]]
# y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2
# yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +
# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)
# yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -
# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)
# ax.plot(x, y, color=self.cmap[a], lw=2)
# ax.fill_between(x + x[::-1], yp + yn,
# color=self.cmap[a], alpha=0.4,
# linewidth=0)
# else:
# x = [self.Time[0], self.Time[-1]]
# y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2
# yp = ([self.stats[sig][self.stats['analytes'] == a][0] +
# self.stats[err][self.stats['analytes'] == a][0]] * 2)
# yn = ([self.stats[sig][self.stats['analytes'] == a][0] -
# self.stats[err][self.stats['analytes'] == a][0]] * 2)
# ax.plot(x, y, color=self.cmap[a], lw=2)
# ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],
# alpha=0.4, linewidth=0)
if ranges:
for lims in self.bkgrng:
ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)
for lims in self.sigrng:
ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)
ax.text(0.01, 0.99, self.sample + ' : ' + focus_stage,
transform=ax.transAxes,
ha='left', va='top')
ax.set_xlabel('Time (s)')
ax.set_xlim(np.nanmin(x), np.nanmax(x))
# y label
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/{:s} count',
'calibrated': 'mol/mol {:s}'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
ax.set_ylabel(ud[focus_stage])
# if interactive:
# ax.legend()
# plugins.connect(fig, plugins.MousePosition(fontsize=14))
# display.clear_output(wait=True)
# display.display(fig)
# input('Press [Return] when finished.')
# else:
ax.legend(bbox_to_anchor=(1.15, 1))
if ret:
return fig, ax
|
def tplot(self, analytes=None, figsize=[10, 4], scale='log', filt=None,
ranges=False, stats=False, stat='nanmean', err='nanstd',
focus_stage=None, err_envelope=False, ax=None):
"""
Plot analytes as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
figsize : tuple
size of final figure.
scale : str or None
'log' = plot data on log scale
filt : bool, str or dict
False: plot unfiltered data.
True: plot filtered data over unfiltered data.
str: apply filter key to all analytes
dict: apply key to each analyte in dict. Must contain all
analytes plotted. Can use self.filt.keydict.
ranges : bool
show signal/background regions.
stats : bool
plot average and error of each trace, as specified by `stat` and
`err`.
stat : str
average statistic to plot.
err : str
error statistic to plot.
Returns
-------
figure, axis
"""
if type(analytes) is str:
analytes = [analytes]
if analytes is None:
analytes = self.analytes
if focus_stage is None:
focus_stage = self.focus_stage
# exclude internal standard from analytes
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.1, .12, .77, .8])
ret = True
else:
fig = ax.figure
ret = False
for a in analytes:
x = self.Time
y, yerr = unpack_uncertainties(self.data[focus_stage][a])
if scale is 'log':
ax.set_yscale('log')
y[y == 0] = np.nan
if filt:
ind = self.filt.grab_filt(filt, a)
xf = x.copy()
yf = y.copy()
yerrf = yerr.copy()
if any(~ind):
xf[~ind] = np.nan
yf[~ind] = np.nan
yerrf[~ind] = np.nan
if any(~ind):
ax.plot(x, y, color=self.cmap[a], alpha=.2, lw=0.6)
ax.plot(xf, yf, color=self.cmap[a], label=a)
if err_envelope:
ax.fill_between(xf, yf - yerrf, yf + yerrf, color=self.cmap[a],
alpha=0.2, zorder=-1)
else:
ax.plot(x, y, color=self.cmap[a], label=a)
if err_envelope:
ax.fill_between(x, y - yerr, y + yerr, color=self.cmap[a],
alpha=0.2, zorder=-1)
# Plot averages and error envelopes
if stats and hasattr(self, 'stats'):
warnings.warn('\nStatistic plotting is broken.\nCheck progress here: https://github.com/oscarbranson/latools/issues/18')
pass
# sts = self.stats[sig][0].size
# if sts > 1:
# for n in np.arange(self.n):
# n_ind = ind & (self.ns == n + 1)
# if sum(n_ind) > 2:
# x = [self.Time[n_ind][0], self.Time[n_ind][-1]]
# y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2
# yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +
# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)
# yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -
# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)
# ax.plot(x, y, color=self.cmap[a], lw=2)
# ax.fill_between(x + x[::-1], yp + yn,
# color=self.cmap[a], alpha=0.4,
# linewidth=0)
# else:
# x = [self.Time[0], self.Time[-1]]
# y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2
# yp = ([self.stats[sig][self.stats['analytes'] == a][0] +
# self.stats[err][self.stats['analytes'] == a][0]] * 2)
# yn = ([self.stats[sig][self.stats['analytes'] == a][0] -
# self.stats[err][self.stats['analytes'] == a][0]] * 2)
# ax.plot(x, y, color=self.cmap[a], lw=2)
# ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],
# alpha=0.4, linewidth=0)
if ranges:
for lims in self.bkgrng:
ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)
for lims in self.sigrng:
ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)
ax.text(0.01, 0.99, self.sample + ' : ' + focus_stage,
transform=ax.transAxes,
ha='left', va='top')
ax.set_xlabel('Time (s)')
ax.set_xlim(np.nanmin(x), np.nanmax(x))
# y label
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/{:s} count',
'calibrated': 'mol/mol {:s}'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
ax.set_ylabel(ud[focus_stage])
# if interactive:
# ax.legend()
# plugins.connect(fig, plugins.MousePosition(fontsize=14))
# display.clear_output(wait=True)
# display.display(fig)
# input('Press [Return] when finished.')
# else:
ax.legend(bbox_to_anchor=(1.15, 1))
if ret:
return fig, ax
|
[
"Plot",
"analytes",
"as",
"a",
"function",
"of",
"Time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L23-L173
|
[
"def",
"tplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"scale",
"=",
"'log'",
",",
"filt",
"=",
"None",
",",
"ranges",
"=",
"False",
",",
"stats",
"=",
"False",
",",
"stat",
"=",
"'nanmean'",
",",
"err",
"=",
"'nanstd'",
",",
"focus_stage",
"=",
"None",
",",
"err_envelope",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"if",
"type",
"(",
"analytes",
")",
"is",
"str",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"# exclude internal standard from analytes",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"a",
"!=",
"self",
".",
"internal_standard",
"]",
"if",
"ax",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".1",
",",
".12",
",",
".77",
",",
".8",
"]",
")",
"ret",
"=",
"True",
"else",
":",
"fig",
"=",
"ax",
".",
"figure",
"ret",
"=",
"False",
"for",
"a",
"in",
"analytes",
":",
"x",
"=",
"self",
".",
"Time",
"y",
",",
"yerr",
"=",
"unpack_uncertainties",
"(",
"self",
".",
"data",
"[",
"focus_stage",
"]",
"[",
"a",
"]",
")",
"if",
"scale",
"is",
"'log'",
":",
"ax",
".",
"set_yscale",
"(",
"'log'",
")",
"y",
"[",
"y",
"==",
"0",
"]",
"=",
"np",
".",
"nan",
"if",
"filt",
":",
"ind",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"a",
")",
"xf",
"=",
"x",
".",
"copy",
"(",
")",
"yf",
"=",
"y",
".",
"copy",
"(",
")",
"yerrf",
"=",
"yerr",
".",
"copy",
"(",
")",
"if",
"any",
"(",
"~",
"ind",
")",
":",
"xf",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"yf",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"yerrf",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"if",
"any",
"(",
"~",
"ind",
")",
":",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"alpha",
"=",
".2",
",",
"lw",
"=",
"0.6",
")",
"ax",
".",
"plot",
"(",
"xf",
",",
"yf",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"label",
"=",
"a",
")",
"if",
"err_envelope",
":",
"ax",
".",
"fill_between",
"(",
"xf",
",",
"yf",
"-",
"yerrf",
",",
"yf",
"+",
"yerrf",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"alpha",
"=",
"0.2",
",",
"zorder",
"=",
"-",
"1",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"label",
"=",
"a",
")",
"if",
"err_envelope",
":",
"ax",
".",
"fill_between",
"(",
"x",
",",
"y",
"-",
"yerr",
",",
"y",
"+",
"yerr",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"alpha",
"=",
"0.2",
",",
"zorder",
"=",
"-",
"1",
")",
"# Plot averages and error envelopes",
"if",
"stats",
"and",
"hasattr",
"(",
"self",
",",
"'stats'",
")",
":",
"warnings",
".",
"warn",
"(",
"'\\nStatistic plotting is broken.\\nCheck progress here: https://github.com/oscarbranson/latools/issues/18'",
")",
"pass",
"# sts = self.stats[sig][0].size",
"# if sts > 1:",
"# for n in np.arange(self.n):",
"# n_ind = ind & (self.ns == n + 1)",
"# if sum(n_ind) > 2:",
"# x = [self.Time[n_ind][0], self.Time[n_ind][-1]]",
"# y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2",
"# yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +",
"# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)",
"# yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -",
"# self.stats[err][self.stats['analytes'] == a][0][n]] * 2)",
"# ax.plot(x, y, color=self.cmap[a], lw=2)",
"# ax.fill_between(x + x[::-1], yp + yn,",
"# color=self.cmap[a], alpha=0.4,",
"# linewidth=0)",
"# else:",
"# x = [self.Time[0], self.Time[-1]]",
"# y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2",
"# yp = ([self.stats[sig][self.stats['analytes'] == a][0] +",
"# self.stats[err][self.stats['analytes'] == a][0]] * 2)",
"# yn = ([self.stats[sig][self.stats['analytes'] == a][0] -",
"# self.stats[err][self.stats['analytes'] == a][0]] * 2)",
"# ax.plot(x, y, color=self.cmap[a], lw=2)",
"# ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],",
"# alpha=0.4, linewidth=0)",
"if",
"ranges",
":",
"for",
"lims",
"in",
"self",
".",
"bkgrng",
":",
"ax",
".",
"axvspan",
"(",
"*",
"lims",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"1",
")",
"for",
"lims",
"in",
"self",
".",
"sigrng",
":",
"ax",
".",
"axvspan",
"(",
"*",
"lims",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"1",
")",
"ax",
".",
"text",
"(",
"0.01",
",",
"0.99",
",",
"self",
".",
"sample",
"+",
"' : '",
"+",
"focus_stage",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"ax",
".",
"set_xlim",
"(",
"np",
".",
"nanmin",
"(",
"x",
")",
",",
"np",
".",
"nanmax",
"(",
"x",
")",
")",
"# y label",
"ud",
"=",
"{",
"'rawdata'",
":",
"'counts'",
",",
"'despiked'",
":",
"'counts'",
",",
"'bkgsub'",
":",
"'background corrected counts'",
",",
"'ratios'",
":",
"'counts/{:s} count'",
",",
"'calibrated'",
":",
"'mol/mol {:s}'",
"}",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"ud",
"[",
"focus_stage",
"]",
"=",
"ud",
"[",
"focus_stage",
"]",
".",
"format",
"(",
"self",
".",
"internal_standard",
")",
"ax",
".",
"set_ylabel",
"(",
"ud",
"[",
"focus_stage",
"]",
")",
"# if interactive:",
"# ax.legend()",
"# plugins.connect(fig, plugins.MousePosition(fontsize=14))",
"# display.clear_output(wait=True)",
"# display.display(fig)",
"# input('Press [Return] when finished.')",
"# else:",
"ax",
".",
"legend",
"(",
"bbox_to_anchor",
"=",
"(",
"1.15",
",",
"1",
")",
")",
"if",
"ret",
":",
"return",
"fig",
",",
"ax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
gplot
|
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
|
latools/helpers/plot.py
|
def gplot(self, analytes=None, win=25, figsize=[10, 4],
ranges=False, focus_stage=None, ax=None, recalc=True):
"""
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
"""
if type(analytes) is str:
analytes = [analytes]
if analytes is None:
analytes = self.analytes
if focus_stage is None:
focus_stage = self.focus_stage
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.1, .12, .77, .8])
ret = True
else:
fig = ax.figure
ret = False
x = self.Time
if recalc or not self.grads_calced:
self.grads = calc_grads(x, self.data[focus_stage], analytes, win)
self.grads_calce = True
for a in analytes:
ax.plot(x, self.grads[a], color=self.cmap[a], label=a)
if ranges:
for lims in self.bkgrng:
ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)
for lims in self.sigrng:
ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)
ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage + ' : gradient',
transform=ax.transAxes,
ha='left', va='top')
ax.set_xlabel('Time (s)')
ax.set_xlim(np.nanmin(x), np.nanmax(x))
# y label
ud = {'rawdata': 'counts/s',
'despiked': 'counts/s',
'bkgsub': 'background corrected counts/s',
'ratios': 'counts/{:s} count/s',
'calibrated': 'mol/mol {:s}/s'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
ax.set_ylabel(ud[focus_stage])
# y tick format
def yfmt(x, p):
return '{:.0e}'.format(x)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(yfmt))
ax.legend(bbox_to_anchor=(1.15, 1))
ax.axhline(0, color='k', lw=1, ls='dashed', alpha=0.5)
if ret:
return fig, ax
|
def gplot(self, analytes=None, win=25, figsize=[10, 4],
ranges=False, focus_stage=None, ax=None, recalc=True):
"""
Plot analytes gradients as a function of Time.
Parameters
----------
analytes : array_like
list of strings containing names of analytes to plot.
None = all analytes.
win : int
The window over which to calculate the rolling gradient.
figsize : tuple
size of final figure.
ranges : bool
show signal/background regions.
Returns
-------
figure, axis
"""
if type(analytes) is str:
analytes = [analytes]
if analytes is None:
analytes = self.analytes
if focus_stage is None:
focus_stage = self.focus_stage
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.1, .12, .77, .8])
ret = True
else:
fig = ax.figure
ret = False
x = self.Time
if recalc or not self.grads_calced:
self.grads = calc_grads(x, self.data[focus_stage], analytes, win)
self.grads_calce = True
for a in analytes:
ax.plot(x, self.grads[a], color=self.cmap[a], label=a)
if ranges:
for lims in self.bkgrng:
ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)
for lims in self.sigrng:
ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)
ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage + ' : gradient',
transform=ax.transAxes,
ha='left', va='top')
ax.set_xlabel('Time (s)')
ax.set_xlim(np.nanmin(x), np.nanmax(x))
# y label
ud = {'rawdata': 'counts/s',
'despiked': 'counts/s',
'bkgsub': 'background corrected counts/s',
'ratios': 'counts/{:s} count/s',
'calibrated': 'mol/mol {:s}/s'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
ax.set_ylabel(ud[focus_stage])
# y tick format
def yfmt(x, p):
return '{:.0e}'.format(x)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(yfmt))
ax.legend(bbox_to_anchor=(1.15, 1))
ax.axhline(0, color='k', lw=1, ls='dashed', alpha=0.5)
if ret:
return fig, ax
|
[
"Plot",
"analytes",
"gradients",
"as",
"a",
"function",
"of",
"Time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L175-L254
|
[
"def",
"gplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"25",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"ranges",
"=",
"False",
",",
"focus_stage",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"recalc",
"=",
"True",
")",
":",
"if",
"type",
"(",
"analytes",
")",
"is",
"str",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"if",
"ax",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".1",
",",
".12",
",",
".77",
",",
".8",
"]",
")",
"ret",
"=",
"True",
"else",
":",
"fig",
"=",
"ax",
".",
"figure",
"ret",
"=",
"False",
"x",
"=",
"self",
".",
"Time",
"if",
"recalc",
"or",
"not",
"self",
".",
"grads_calced",
":",
"self",
".",
"grads",
"=",
"calc_grads",
"(",
"x",
",",
"self",
".",
"data",
"[",
"focus_stage",
"]",
",",
"analytes",
",",
"win",
")",
"self",
".",
"grads_calce",
"=",
"True",
"for",
"a",
"in",
"analytes",
":",
"ax",
".",
"plot",
"(",
"x",
",",
"self",
".",
"grads",
"[",
"a",
"]",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"a",
"]",
",",
"label",
"=",
"a",
")",
"if",
"ranges",
":",
"for",
"lims",
"in",
"self",
".",
"bkgrng",
":",
"ax",
".",
"axvspan",
"(",
"*",
"lims",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"1",
")",
"for",
"lims",
"in",
"self",
".",
"sigrng",
":",
"ax",
".",
"axvspan",
"(",
"*",
"lims",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"1",
")",
"ax",
".",
"text",
"(",
"0.01",
",",
"0.99",
",",
"self",
".",
"sample",
"+",
"' : '",
"+",
"self",
".",
"focus_stage",
"+",
"' : gradient'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"ax",
".",
"set_xlim",
"(",
"np",
".",
"nanmin",
"(",
"x",
")",
",",
"np",
".",
"nanmax",
"(",
"x",
")",
")",
"# y label",
"ud",
"=",
"{",
"'rawdata'",
":",
"'counts/s'",
",",
"'despiked'",
":",
"'counts/s'",
",",
"'bkgsub'",
":",
"'background corrected counts/s'",
",",
"'ratios'",
":",
"'counts/{:s} count/s'",
",",
"'calibrated'",
":",
"'mol/mol {:s}/s'",
"}",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"ud",
"[",
"focus_stage",
"]",
"=",
"ud",
"[",
"focus_stage",
"]",
".",
"format",
"(",
"self",
".",
"internal_standard",
")",
"ax",
".",
"set_ylabel",
"(",
"ud",
"[",
"focus_stage",
"]",
")",
"# y tick format",
"def",
"yfmt",
"(",
"x",
",",
"p",
")",
":",
"return",
"'{:.0e}'",
".",
"format",
"(",
"x",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"mpl",
".",
"ticker",
".",
"FuncFormatter",
"(",
"yfmt",
")",
")",
"ax",
".",
"legend",
"(",
"bbox_to_anchor",
"=",
"(",
"1.15",
",",
"1",
")",
")",
"ax",
".",
"axhline",
"(",
"0",
",",
"color",
"=",
"'k'",
",",
"lw",
"=",
"1",
",",
"ls",
"=",
"'dashed'",
",",
"alpha",
"=",
"0.5",
")",
"if",
"ret",
":",
"return",
"fig",
",",
"ax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
crossplot
|
Plot analytes against each other.
The number of plots is n**2 - n, where n = len(keys).
Parameters
----------
dat : dict
A dictionary of key: data pairs, where data is the same
length in each entry.
keys : optional, array_like or str
The keys of dat to plot. Defaults to all keys.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
figsize : tuple
colourful : bool
Returns
-------
(fig, axes)
|
latools/helpers/plot.py
|
def crossplot(dat, keys=None, lognorm=True, bins=25, figsize=(12, 12),
colourful=True, focus_stage=None, denominator=None,
mode='hist2d', cmap=None, **kwargs):
"""
Plot analytes against each other.
The number of plots is n**2 - n, where n = len(keys).
Parameters
----------
dat : dict
A dictionary of key: data pairs, where data is the same
length in each entry.
keys : optional, array_like or str
The keys of dat to plot. Defaults to all keys.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
figsize : tuple
colourful : bool
Returns
-------
(fig, axes)
"""
if keys is None:
keys = list(dat.keys())
numvar = len(keys)
if figsize[0] < 1.5 * numvar:
figsize = [1.5 * numvar] * 2
fig, axes = plt.subplots(nrows=numvar, ncols=numvar,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# set up colour scales
if colourful:
cmlist = ['Blues', 'BuGn', 'BuPu', 'GnBu',
'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
else:
cmlist = ['Greys']
if cmap is None and mode == 'scatter':
cmap = {k: 'k' for k in dat.keys()}
while len(cmlist) < len(keys):
cmlist *= 2
# isolate nominal_values for all keys
focus = {k: nominal_values(dat[k]) for k in keys}
# determine units for all keys
udict = {a: unitpicker(np.nanmean(focus[a]),
focus_stage=focus_stage,
denominator=denominator) for a in keys}
# determine ranges for all analytes
rdict = {a: (np.nanmin(focus[a] * udict[a][0]),
np.nanmax(focus[a] * udict[a][0])) for a in keys}
for i, j in tqdm(zip(*np.triu_indices_from(axes, k=1)), desc='Drawing Plots',
total=sum(range(len(keys)))):
# get analytes
ai = keys[i]
aj = keys[j]
# remove nan, apply multipliers
pi = focus[ai] * udict[ai][0]
pj = focus[aj] * udict[aj][0]
# determine normalisation shceme
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
# draw plots
if mode == 'hist2d':
# remove nan
pi = pi[~np.isnan(pi)]
pj = pj[~np.isnan(pj)]
axes[i, j].hist2d(pj, pi, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[i]))
axes[j, i].hist2d(pi, pj, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[j]))
elif mode == 'scatter':
axes[i, j].scatter(pj, pi, s=10,
color=cmap[ai], lw=0.5, edgecolor='k',
alpha=0.4)
axes[j, i].scatter(pi, pj, s=10,
color=cmap[aj], lw=0.5, edgecolor='k',
alpha=0.4)
else:
raise ValueError("invalid mode. Must be 'hist2d' or 'scatter'.")
axes[i, j].set_ylim(*rdict[ai])
axes[i, j].set_xlim(*rdict[aj])
axes[j, i].set_ylim(*rdict[aj])
axes[j, i].set_xlim(*rdict[ai])
# diagonal labels
for a, n in zip(keys, np.arange(len(keys))):
axes[n, n].annotate(a + '\n' + udict[a][1], (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center', fontsize=8)
axes[n, n].set_xlim(*rdict[a])
axes[n, n].set_ylim(*rdict[a])
# switch on alternating axes
for i, j in zip(range(numvar), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
return fig, axes
|
def crossplot(dat, keys=None, lognorm=True, bins=25, figsize=(12, 12),
colourful=True, focus_stage=None, denominator=None,
mode='hist2d', cmap=None, **kwargs):
"""
Plot analytes against each other.
The number of plots is n**2 - n, where n = len(keys).
Parameters
----------
dat : dict
A dictionary of key: data pairs, where data is the same
length in each entry.
keys : optional, array_like or str
The keys of dat to plot. Defaults to all keys.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
figsize : tuple
colourful : bool
Returns
-------
(fig, axes)
"""
if keys is None:
keys = list(dat.keys())
numvar = len(keys)
if figsize[0] < 1.5 * numvar:
figsize = [1.5 * numvar] * 2
fig, axes = plt.subplots(nrows=numvar, ncols=numvar,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# set up colour scales
if colourful:
cmlist = ['Blues', 'BuGn', 'BuPu', 'GnBu',
'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
else:
cmlist = ['Greys']
if cmap is None and mode == 'scatter':
cmap = {k: 'k' for k in dat.keys()}
while len(cmlist) < len(keys):
cmlist *= 2
# isolate nominal_values for all keys
focus = {k: nominal_values(dat[k]) for k in keys}
# determine units for all keys
udict = {a: unitpicker(np.nanmean(focus[a]),
focus_stage=focus_stage,
denominator=denominator) for a in keys}
# determine ranges for all analytes
rdict = {a: (np.nanmin(focus[a] * udict[a][0]),
np.nanmax(focus[a] * udict[a][0])) for a in keys}
for i, j in tqdm(zip(*np.triu_indices_from(axes, k=1)), desc='Drawing Plots',
total=sum(range(len(keys)))):
# get analytes
ai = keys[i]
aj = keys[j]
# remove nan, apply multipliers
pi = focus[ai] * udict[ai][0]
pj = focus[aj] * udict[aj][0]
# determine normalisation shceme
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
# draw plots
if mode == 'hist2d':
# remove nan
pi = pi[~np.isnan(pi)]
pj = pj[~np.isnan(pj)]
axes[i, j].hist2d(pj, pi, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[i]))
axes[j, i].hist2d(pi, pj, bins,
norm=norm,
cmap=plt.get_cmap(cmlist[j]))
elif mode == 'scatter':
axes[i, j].scatter(pj, pi, s=10,
color=cmap[ai], lw=0.5, edgecolor='k',
alpha=0.4)
axes[j, i].scatter(pi, pj, s=10,
color=cmap[aj], lw=0.5, edgecolor='k',
alpha=0.4)
else:
raise ValueError("invalid mode. Must be 'hist2d' or 'scatter'.")
axes[i, j].set_ylim(*rdict[ai])
axes[i, j].set_xlim(*rdict[aj])
axes[j, i].set_ylim(*rdict[aj])
axes[j, i].set_xlim(*rdict[ai])
# diagonal labels
for a, n in zip(keys, np.arange(len(keys))):
axes[n, n].annotate(a + '\n' + udict[a][1], (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center', fontsize=8)
axes[n, n].set_xlim(*rdict[a])
axes[n, n].set_ylim(*rdict[a])
# switch on alternating axes
for i, j in zip(range(numvar), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
return fig, axes
|
[
"Plot",
"analytes",
"against",
"each",
"other",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L256-L390
|
[
"def",
"crossplot",
"(",
"dat",
",",
"keys",
"=",
"None",
",",
"lognorm",
"=",
"True",
",",
"bins",
"=",
"25",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
",",
"colourful",
"=",
"True",
",",
"focus_stage",
"=",
"None",
",",
"denominator",
"=",
"None",
",",
"mode",
"=",
"'hist2d'",
",",
"cmap",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"list",
"(",
"dat",
".",
"keys",
"(",
")",
")",
"numvar",
"=",
"len",
"(",
"keys",
")",
"if",
"figsize",
"[",
"0",
"]",
"<",
"1.5",
"*",
"numvar",
":",
"figsize",
"=",
"[",
"1.5",
"*",
"numvar",
"]",
"*",
"2",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"numvar",
",",
"ncols",
"=",
"numvar",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0.05",
",",
"wspace",
"=",
"0.05",
")",
"for",
"ax",
"in",
"axes",
".",
"flat",
":",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"if",
"ax",
".",
"is_last_col",
"(",
")",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'right'",
")",
"if",
"ax",
".",
"is_first_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'top'",
")",
"if",
"ax",
".",
"is_last_row",
"(",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"# set up colour scales",
"if",
"colourful",
":",
"cmlist",
"=",
"[",
"'Blues'",
",",
"'BuGn'",
",",
"'BuPu'",
",",
"'GnBu'",
",",
"'Greens'",
",",
"'Greys'",
",",
"'Oranges'",
",",
"'OrRd'",
",",
"'PuBu'",
",",
"'PuBuGn'",
",",
"'PuRd'",
",",
"'Purples'",
",",
"'RdPu'",
",",
"'Reds'",
",",
"'YlGn'",
",",
"'YlGnBu'",
",",
"'YlOrBr'",
",",
"'YlOrRd'",
"]",
"else",
":",
"cmlist",
"=",
"[",
"'Greys'",
"]",
"if",
"cmap",
"is",
"None",
"and",
"mode",
"==",
"'scatter'",
":",
"cmap",
"=",
"{",
"k",
":",
"'k'",
"for",
"k",
"in",
"dat",
".",
"keys",
"(",
")",
"}",
"while",
"len",
"(",
"cmlist",
")",
"<",
"len",
"(",
"keys",
")",
":",
"cmlist",
"*=",
"2",
"# isolate nominal_values for all keys",
"focus",
"=",
"{",
"k",
":",
"nominal_values",
"(",
"dat",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"keys",
"}",
"# determine units for all keys",
"udict",
"=",
"{",
"a",
":",
"unitpicker",
"(",
"np",
".",
"nanmean",
"(",
"focus",
"[",
"a",
"]",
")",
",",
"focus_stage",
"=",
"focus_stage",
",",
"denominator",
"=",
"denominator",
")",
"for",
"a",
"in",
"keys",
"}",
"# determine ranges for all analytes",
"rdict",
"=",
"{",
"a",
":",
"(",
"np",
".",
"nanmin",
"(",
"focus",
"[",
"a",
"]",
"*",
"udict",
"[",
"a",
"]",
"[",
"0",
"]",
")",
",",
"np",
".",
"nanmax",
"(",
"focus",
"[",
"a",
"]",
"*",
"udict",
"[",
"a",
"]",
"[",
"0",
"]",
")",
")",
"for",
"a",
"in",
"keys",
"}",
"for",
"i",
",",
"j",
"in",
"tqdm",
"(",
"zip",
"(",
"*",
"np",
".",
"triu_indices_from",
"(",
"axes",
",",
"k",
"=",
"1",
")",
")",
",",
"desc",
"=",
"'Drawing Plots'",
",",
"total",
"=",
"sum",
"(",
"range",
"(",
"len",
"(",
"keys",
")",
")",
")",
")",
":",
"# get analytes",
"ai",
"=",
"keys",
"[",
"i",
"]",
"aj",
"=",
"keys",
"[",
"j",
"]",
"# remove nan, apply multipliers",
"pi",
"=",
"focus",
"[",
"ai",
"]",
"*",
"udict",
"[",
"ai",
"]",
"[",
"0",
"]",
"pj",
"=",
"focus",
"[",
"aj",
"]",
"*",
"udict",
"[",
"aj",
"]",
"[",
"0",
"]",
"# determine normalisation shceme",
"if",
"lognorm",
":",
"norm",
"=",
"mpl",
".",
"colors",
".",
"LogNorm",
"(",
")",
"else",
":",
"norm",
"=",
"None",
"# draw plots",
"if",
"mode",
"==",
"'hist2d'",
":",
"# remove nan",
"pi",
"=",
"pi",
"[",
"~",
"np",
".",
"isnan",
"(",
"pi",
")",
"]",
"pj",
"=",
"pj",
"[",
"~",
"np",
".",
"isnan",
"(",
"pj",
")",
"]",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"hist2d",
"(",
"pj",
",",
"pi",
",",
"bins",
",",
"norm",
"=",
"norm",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmlist",
"[",
"i",
"]",
")",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"hist2d",
"(",
"pi",
",",
"pj",
",",
"bins",
",",
"norm",
"=",
"norm",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmlist",
"[",
"j",
"]",
")",
")",
"elif",
"mode",
"==",
"'scatter'",
":",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"scatter",
"(",
"pj",
",",
"pi",
",",
"s",
"=",
"10",
",",
"color",
"=",
"cmap",
"[",
"ai",
"]",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
",",
"alpha",
"=",
"0.4",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"scatter",
"(",
"pi",
",",
"pj",
",",
"s",
"=",
"10",
",",
"color",
"=",
"cmap",
"[",
"aj",
"]",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
",",
"alpha",
"=",
"0.4",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid mode. Must be 'hist2d' or 'scatter'.\"",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"ai",
"]",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"aj",
"]",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"aj",
"]",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"ai",
"]",
")",
"# diagonal labels",
"for",
"a",
",",
"n",
"in",
"zip",
"(",
"keys",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"keys",
")",
")",
")",
":",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"annotate",
"(",
"a",
"+",
"'\\n'",
"+",
"udict",
"[",
"a",
"]",
"[",
"1",
"]",
",",
"(",
"0.5",
",",
"0.5",
")",
",",
"xycoords",
"=",
"'axes fraction'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
",",
"fontsize",
"=",
"8",
")",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"set_xlim",
"(",
"*",
"rdict",
"[",
"a",
"]",
")",
"axes",
"[",
"n",
",",
"n",
"]",
".",
"set_ylim",
"(",
"*",
"rdict",
"[",
"a",
"]",
")",
"# switch on alternating axes",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"range",
"(",
"numvar",
")",
",",
"itertools",
".",
"cycle",
"(",
"(",
"-",
"1",
",",
"0",
")",
")",
")",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"xaxis",
".",
"set_visible",
"(",
"True",
")",
"for",
"label",
"in",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"get_xticklabels",
"(",
")",
":",
"label",
".",
"set_rotation",
"(",
"90",
")",
"axes",
"[",
"i",
",",
"j",
"]",
".",
"yaxis",
".",
"set_visible",
"(",
"True",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
histograms
|
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
|
latools/helpers/plot.py
|
def histograms(dat, keys=None, bins=25, logy=False, cmap=None, ncol=4):
"""
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
"""
if keys is None:
keys = dat.keys()
ncol = int(ncol)
nrow = calc_nrow(len(keys), ncol)
fig, axs = plt.subplots(nrow, 4, figsize=[ncol * 2, nrow * 2])
pn = 0
for k, ax in zip(keys, axs.flat):
tmp = nominal_values(dat[k])
x = tmp[~np.isnan(tmp)]
if cmap is not None:
c = cmap[k]
else:
c = (0, 0, 0, 0.5)
ax.hist(x, bins=bins, color=c)
if logy:
ax.set_yscale('log')
ylab = '$log_{10}(n)$'
else:
ylab = 'n'
ax.set_ylim(1, ax.get_ylim()[1])
if ax.is_first_col():
ax.set_ylabel(ylab)
ax.set_yticklabels([])
ax.text(.95, .95, k, ha='right', va='top', transform=ax.transAxes)
pn += 1
for ax in axs.flat[pn:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs
|
def histograms(dat, keys=None, bins=25, logy=False, cmap=None, ncol=4):
"""
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
"""
if keys is None:
keys = dat.keys()
ncol = int(ncol)
nrow = calc_nrow(len(keys), ncol)
fig, axs = plt.subplots(nrow, 4, figsize=[ncol * 2, nrow * 2])
pn = 0
for k, ax in zip(keys, axs.flat):
tmp = nominal_values(dat[k])
x = tmp[~np.isnan(tmp)]
if cmap is not None:
c = cmap[k]
else:
c = (0, 0, 0, 0.5)
ax.hist(x, bins=bins, color=c)
if logy:
ax.set_yscale('log')
ylab = '$log_{10}(n)$'
else:
ylab = 'n'
ax.set_ylim(1, ax.get_ylim()[1])
if ax.is_first_col():
ax.set_ylabel(ylab)
ax.set_yticklabels([])
ax.text(.95, .95, k, ha='right', va='top', transform=ax.transAxes)
pn += 1
for ax in axs.flat[pn:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs
|
[
"Plot",
"histograms",
"of",
"all",
"items",
"in",
"dat",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L393-L456
|
[
"def",
"histograms",
"(",
"dat",
",",
"keys",
"=",
"None",
",",
"bins",
"=",
"25",
",",
"logy",
"=",
"False",
",",
"cmap",
"=",
"None",
",",
"ncol",
"=",
"4",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"dat",
".",
"keys",
"(",
")",
"ncol",
"=",
"int",
"(",
"ncol",
")",
"nrow",
"=",
"calc_nrow",
"(",
"len",
"(",
"keys",
")",
",",
"ncol",
")",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"nrow",
",",
"4",
",",
"figsize",
"=",
"[",
"ncol",
"*",
"2",
",",
"nrow",
"*",
"2",
"]",
")",
"pn",
"=",
"0",
"for",
"k",
",",
"ax",
"in",
"zip",
"(",
"keys",
",",
"axs",
".",
"flat",
")",
":",
"tmp",
"=",
"nominal_values",
"(",
"dat",
"[",
"k",
"]",
")",
"x",
"=",
"tmp",
"[",
"~",
"np",
".",
"isnan",
"(",
"tmp",
")",
"]",
"if",
"cmap",
"is",
"not",
"None",
":",
"c",
"=",
"cmap",
"[",
"k",
"]",
"else",
":",
"c",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.5",
")",
"ax",
".",
"hist",
"(",
"x",
",",
"bins",
"=",
"bins",
",",
"color",
"=",
"c",
")",
"if",
"logy",
":",
"ax",
".",
"set_yscale",
"(",
"'log'",
")",
"ylab",
"=",
"'$log_{10}(n)$'",
"else",
":",
"ylab",
"=",
"'n'",
"ax",
".",
"set_ylim",
"(",
"1",
",",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"set_ylabel",
"(",
"ylab",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"ax",
".",
"text",
"(",
".95",
",",
".95",
",",
"k",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
")",
"pn",
"+=",
"1",
"for",
"ax",
"in",
"axs",
".",
"flat",
"[",
"pn",
":",
"]",
":",
"ax",
".",
"set_visible",
"(",
"False",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig",
",",
"axs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
autorange_plot
|
Function for visualising the autorange mechanism.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window ised for signal smoothing. If None, gwin // 2.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
nbin : ind
Used to calculate the number of bins in the data histogram.
bins = len(sig) // nbin
Returns
-------
fig, axes
|
latools/helpers/plot.py
|
def autorange_plot(t, sig, gwin=7, swin=None, win=30,
on_mult=(1.5, 1.), off_mult=(1., 1.5),
nbin=10, thresh=None):
"""
Function for visualising the autorange mechanism.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window ised for signal smoothing. If None, gwin // 2.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
nbin : ind
Used to calculate the number of bins in the data histogram.
bins = len(sig) // nbin
Returns
-------
fig, axes
"""
if swin is None:
swin = gwin // 2
sigs = fastsmooth(sig, swin)
# perform autorange calculations
# bins = 50
bins = sig.size // nbin
kde_x = np.linspace(sig.min(), sig.max(), bins)
kde = gaussian_kde(sigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if thresh is not None:
mins = [thresh]
if len(mins) > 0:
bkg = sigs < (mins[0]) # set background as lowest distribution
else:
bkg = np.ones(sig.size, dtype=bool)
# bkg[0] = True # the first value must always be background
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
g = abs(fastgrad(sigs, gwin)) # calculate gradient of signal
# 2. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
if zeros is not None:
zeros = zeros.flatten()
lohi = []
pgs = []
excl = []
tps = []
failed = []
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
lohi.append([lo, hi])
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
tps.append(tp)
c = t[z] # center of transition
width = (t[1] - t[0]) * 2 # initial width guess
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
pgs.append(pg)
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
excl.append(lim)
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
failed.append(False)
except RuntimeError:
failed.append(True)
lohi.append([np.nan, np.nan])
pgs.append([np.nan, np.nan, np.nan])
excl.append([np.nan, np.nan])
tps.append(tp)
pass
else:
zeros = []
# make plot
nrows = 2 + len(zeros) // 2 + len(zeros) % 2
fig, axs = plt.subplots(nrows, 2, figsize=(6, 4 + 1.5 * nrows))
# Trace
ax1, ax2, ax3, ax4 = axs.flat[:4]
ax4.set_visible(False)
# widen ax1 & 3
for ax in [ax1, ax3]:
p = ax.axes.get_position()
p2 = [p.x0, p.y0, p.width * 1.75, p.height]
ax.axes.set_position(p2)
# move ax3 up
p = ax3.axes.get_position()
p2 = [p.x0, p.y0 + 0.15 * p.height, p.width, p.height]
ax3.axes.set_position(p2)
# truncate ax2
p = ax2.axes.get_position()
p2 = [p.x0 + p.width * 0.6, p.y0, p.width * 0.4, p.height]
ax2.axes.set_position(p2)
# plot traces and gradient
ax1.plot(t, sig, color='k', lw=1)
ax1.set_xticklabels([])
ax1.set_ylabel('Signal')
ax3.plot(t, g, color='k', lw=1)
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Gradient')
# plot kde
ax2.fill_betweenx(kde_x, yd, color=(0, 0, 0, 0.2))
ax2.plot(yd, kde_x, color='k')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticklabels([])
ax2.set_xlabel('Data\nDensity')
# limit
for ax in [ax1, ax2]:
ax.axhline(mins[0], color='k', ls='dashed', alpha=0.4)
if len(zeros) > 0:
# zeros
for z in zeros:
ax1.axvline(t[z], color='r', alpha=0.5)
ax3.axvline(t[z], color='r', alpha=0.5)
# plot individual transitions
n = 1
for (lo, hi), lim, tp, pg, fail, ax in zip(lohi, excl, tps, pgs, failed, axs.flat[4:]):
# plot region on gradient axis
ax3.axvspan(t[lo], t[hi], color='r', alpha=0.1, zorder=-2)
# plot individual transitions
x = t[lo:hi]
y = g[lo:hi]
ys = sig[lo:hi]
ax.scatter(x, y, color='k', marker='x', zorder=-1, s=10)
ax.set_yticklabels([])
ax.set_ylim(rangecalc(y))
tax = ax.twinx()
tax.plot(x, ys, color='k', alpha=0.3, zorder=-5)
tax.set_yticklabels([])
tax.set_ylim(rangecalc(ys))
# plot fitted gaussian
xn = np.linspace(x.min(), x.max(), 100)
ax.plot(xn, gauss(xn, *pg), color='r', alpha=0.5)
# plot center and excluded region
ax.axvline(pg[1], color='b', alpha=0.5)
ax.axvspan(*lim, color='b', alpha=0.1, zorder=-2)
ax1.axvspan(*lim, color='b', alpha=0.1, zorder=-2)
if tp:
ax.text(.05, .95, '{} (on)'.format(n), ha='left',
va='top', transform=ax.transAxes)
else:
ax.text(.95, .95, '{} (off)'.format(n), ha='right',
va='top', transform=ax.transAxes)
if ax.is_last_row():
ax.set_xlabel('Time (s)')
if ax.is_first_col():
ax.set_ylabel('Gradient (x)')
if ax.is_last_col():
tax.set_ylabel('Signal (line)')
if fail:
ax.axes.set_facecolor((1, 0, 0, 0.2))
ax.text(.5, .5, 'FAIL', ha='center', va='center',
fontsize=16, color=(1, 0, 0, 0.5), transform=ax.transAxes)
n += 1
# should never be, but just in case...
if len(zeros) % 2 == 1:
axs.flat[-1].set_visible = False
return fig, axs
|
def autorange_plot(t, sig, gwin=7, swin=None, win=30,
on_mult=(1.5, 1.), off_mult=(1., 1.5),
nbin=10, thresh=None):
"""
Function for visualising the autorange mechanism.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window ised for signal smoothing. If None, gwin // 2.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
nbin : ind
Used to calculate the number of bins in the data histogram.
bins = len(sig) // nbin
Returns
-------
fig, axes
"""
if swin is None:
swin = gwin // 2
sigs = fastsmooth(sig, swin)
# perform autorange calculations
# bins = 50
bins = sig.size // nbin
kde_x = np.linspace(sig.min(), sig.max(), bins)
kde = gaussian_kde(sigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if thresh is not None:
mins = [thresh]
if len(mins) > 0:
bkg = sigs < (mins[0]) # set background as lowest distribution
else:
bkg = np.ones(sig.size, dtype=bool)
# bkg[0] = True # the first value must always be background
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
g = abs(fastgrad(sigs, gwin)) # calculate gradient of signal
# 2. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
if zeros is not None:
zeros = zeros.flatten()
lohi = []
pgs = []
excl = []
tps = []
failed = []
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
lohi.append([lo, hi])
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
tps.append(tp)
c = t[z] # center of transition
width = (t[1] - t[0]) * 2 # initial width guess
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
pgs.append(pg)
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
excl.append(lim)
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
failed.append(False)
except RuntimeError:
failed.append(True)
lohi.append([np.nan, np.nan])
pgs.append([np.nan, np.nan, np.nan])
excl.append([np.nan, np.nan])
tps.append(tp)
pass
else:
zeros = []
# make plot
nrows = 2 + len(zeros) // 2 + len(zeros) % 2
fig, axs = plt.subplots(nrows, 2, figsize=(6, 4 + 1.5 * nrows))
# Trace
ax1, ax2, ax3, ax4 = axs.flat[:4]
ax4.set_visible(False)
# widen ax1 & 3
for ax in [ax1, ax3]:
p = ax.axes.get_position()
p2 = [p.x0, p.y0, p.width * 1.75, p.height]
ax.axes.set_position(p2)
# move ax3 up
p = ax3.axes.get_position()
p2 = [p.x0, p.y0 + 0.15 * p.height, p.width, p.height]
ax3.axes.set_position(p2)
# truncate ax2
p = ax2.axes.get_position()
p2 = [p.x0 + p.width * 0.6, p.y0, p.width * 0.4, p.height]
ax2.axes.set_position(p2)
# plot traces and gradient
ax1.plot(t, sig, color='k', lw=1)
ax1.set_xticklabels([])
ax1.set_ylabel('Signal')
ax3.plot(t, g, color='k', lw=1)
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Gradient')
# plot kde
ax2.fill_betweenx(kde_x, yd, color=(0, 0, 0, 0.2))
ax2.plot(yd, kde_x, color='k')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticklabels([])
ax2.set_xlabel('Data\nDensity')
# limit
for ax in [ax1, ax2]:
ax.axhline(mins[0], color='k', ls='dashed', alpha=0.4)
if len(zeros) > 0:
# zeros
for z in zeros:
ax1.axvline(t[z], color='r', alpha=0.5)
ax3.axvline(t[z], color='r', alpha=0.5)
# plot individual transitions
n = 1
for (lo, hi), lim, tp, pg, fail, ax in zip(lohi, excl, tps, pgs, failed, axs.flat[4:]):
# plot region on gradient axis
ax3.axvspan(t[lo], t[hi], color='r', alpha=0.1, zorder=-2)
# plot individual transitions
x = t[lo:hi]
y = g[lo:hi]
ys = sig[lo:hi]
ax.scatter(x, y, color='k', marker='x', zorder=-1, s=10)
ax.set_yticklabels([])
ax.set_ylim(rangecalc(y))
tax = ax.twinx()
tax.plot(x, ys, color='k', alpha=0.3, zorder=-5)
tax.set_yticklabels([])
tax.set_ylim(rangecalc(ys))
# plot fitted gaussian
xn = np.linspace(x.min(), x.max(), 100)
ax.plot(xn, gauss(xn, *pg), color='r', alpha=0.5)
# plot center and excluded region
ax.axvline(pg[1], color='b', alpha=0.5)
ax.axvspan(*lim, color='b', alpha=0.1, zorder=-2)
ax1.axvspan(*lim, color='b', alpha=0.1, zorder=-2)
if tp:
ax.text(.05, .95, '{} (on)'.format(n), ha='left',
va='top', transform=ax.transAxes)
else:
ax.text(.95, .95, '{} (off)'.format(n), ha='right',
va='top', transform=ax.transAxes)
if ax.is_last_row():
ax.set_xlabel('Time (s)')
if ax.is_first_col():
ax.set_ylabel('Gradient (x)')
if ax.is_last_col():
tax.set_ylabel('Signal (line)')
if fail:
ax.axes.set_facecolor((1, 0, 0, 0.2))
ax.text(.5, .5, 'FAIL', ha='center', va='center',
fontsize=16, color=(1, 0, 0, 0.5), transform=ax.transAxes)
n += 1
# should never be, but just in case...
if len(zeros) % 2 == 1:
axs.flat[-1].set_visible = False
return fig, axs
|
[
"Function",
"for",
"visualising",
"the",
"autorange",
"mechanism",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L459-L690
|
[
"def",
"autorange_plot",
"(",
"t",
",",
"sig",
",",
"gwin",
"=",
"7",
",",
"swin",
"=",
"None",
",",
"win",
"=",
"30",
",",
"on_mult",
"=",
"(",
"1.5",
",",
"1.",
")",
",",
"off_mult",
"=",
"(",
"1.",
",",
"1.5",
")",
",",
"nbin",
"=",
"10",
",",
"thresh",
"=",
"None",
")",
":",
"if",
"swin",
"is",
"None",
":",
"swin",
"=",
"gwin",
"//",
"2",
"sigs",
"=",
"fastsmooth",
"(",
"sig",
",",
"swin",
")",
"# perform autorange calculations",
"# bins = 50",
"bins",
"=",
"sig",
".",
"size",
"//",
"nbin",
"kde_x",
"=",
"np",
".",
"linspace",
"(",
"sig",
".",
"min",
"(",
")",
",",
"sig",
".",
"max",
"(",
")",
",",
"bins",
")",
"kde",
"=",
"gaussian_kde",
"(",
"sigs",
")",
"yd",
"=",
"kde",
".",
"pdf",
"(",
"kde_x",
")",
"mins",
"=",
"findmins",
"(",
"kde_x",
",",
"yd",
")",
"# find minima in kde",
"if",
"thresh",
"is",
"not",
"None",
":",
"mins",
"=",
"[",
"thresh",
"]",
"if",
"len",
"(",
"mins",
")",
">",
"0",
":",
"bkg",
"=",
"sigs",
"<",
"(",
"mins",
"[",
"0",
"]",
")",
"# set background as lowest distribution",
"else",
":",
"bkg",
"=",
"np",
".",
"ones",
"(",
"sig",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"# bkg[0] = True # the first value must always be background",
"# assign rough background and signal regions based on kde minima",
"fbkg",
"=",
"bkg",
"fsig",
"=",
"~",
"bkg",
"g",
"=",
"abs",
"(",
"fastgrad",
"(",
"sigs",
",",
"gwin",
")",
")",
"# calculate gradient of signal",
"# 2. determine the approximate index of each transition",
"zeros",
"=",
"bool_2_indices",
"(",
"fsig",
")",
"if",
"zeros",
"is",
"not",
"None",
":",
"zeros",
"=",
"zeros",
".",
"flatten",
"(",
")",
"lohi",
"=",
"[",
"]",
"pgs",
"=",
"[",
"]",
"excl",
"=",
"[",
"]",
"tps",
"=",
"[",
"]",
"failed",
"=",
"[",
"]",
"for",
"z",
"in",
"zeros",
":",
"# for each approximate transition",
"# isolate the data around the transition",
"if",
"z",
"-",
"win",
"<",
"0",
":",
"lo",
"=",
"gwin",
"//",
"2",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"elif",
"z",
"+",
"win",
">",
"(",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
")",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
"else",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"xs",
"=",
"t",
"[",
"lo",
":",
"hi",
"]",
"ys",
"=",
"g",
"[",
"lo",
":",
"hi",
"]",
"lohi",
".",
"append",
"(",
"[",
"lo",
",",
"hi",
"]",
")",
"# determine type of transition (on/off)",
"mid",
"=",
"(",
"hi",
"+",
"lo",
")",
"//",
"2",
"tp",
"=",
"sigs",
"[",
"mid",
"+",
"3",
"]",
">",
"sigs",
"[",
"mid",
"-",
"3",
"]",
"# True if 'on' transition.",
"tps",
".",
"append",
"(",
"tp",
")",
"c",
"=",
"t",
"[",
"z",
"]",
"# center of transition",
"width",
"=",
"(",
"t",
"[",
"1",
"]",
"-",
"t",
"[",
"0",
"]",
")",
"*",
"2",
"# initial width guess",
"try",
":",
"pg",
",",
"_",
"=",
"curve_fit",
"(",
"gauss",
",",
"xs",
",",
"ys",
",",
"p0",
"=",
"(",
"np",
".",
"nanmax",
"(",
"ys",
")",
",",
"c",
",",
"width",
")",
",",
"sigma",
"=",
"(",
"xs",
"-",
"c",
")",
"**",
"2",
"+",
".01",
")",
"pgs",
".",
"append",
"(",
"pg",
")",
"fwhm",
"=",
"abs",
"(",
"2",
"*",
"pg",
"[",
"-",
"1",
"]",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"log",
"(",
"2",
")",
")",
")",
"# apply on_mult or off_mult, as appropriate.",
"if",
"tp",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"on_mult",
"+",
"pg",
"[",
"1",
"]",
"else",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"off_mult",
"+",
"pg",
"[",
"1",
"]",
"excl",
".",
"append",
"(",
"lim",
")",
"fbkg",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"fsig",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"failed",
".",
"append",
"(",
"False",
")",
"except",
"RuntimeError",
":",
"failed",
".",
"append",
"(",
"True",
")",
"lohi",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"pgs",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"excl",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"tps",
".",
"append",
"(",
"tp",
")",
"pass",
"else",
":",
"zeros",
"=",
"[",
"]",
"# make plot",
"nrows",
"=",
"2",
"+",
"len",
"(",
"zeros",
")",
"//",
"2",
"+",
"len",
"(",
"zeros",
")",
"%",
"2",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
",",
"2",
",",
"figsize",
"=",
"(",
"6",
",",
"4",
"+",
"1.5",
"*",
"nrows",
")",
")",
"# Trace",
"ax1",
",",
"ax2",
",",
"ax3",
",",
"ax4",
"=",
"axs",
".",
"flat",
"[",
":",
"4",
"]",
"ax4",
".",
"set_visible",
"(",
"False",
")",
"# widen ax1 & 3",
"for",
"ax",
"in",
"[",
"ax1",
",",
"ax3",
"]",
":",
"p",
"=",
"ax",
".",
"axes",
".",
"get_position",
"(",
")",
"p2",
"=",
"[",
"p",
".",
"x0",
",",
"p",
".",
"y0",
",",
"p",
".",
"width",
"*",
"1.75",
",",
"p",
".",
"height",
"]",
"ax",
".",
"axes",
".",
"set_position",
"(",
"p2",
")",
"# move ax3 up",
"p",
"=",
"ax3",
".",
"axes",
".",
"get_position",
"(",
")",
"p2",
"=",
"[",
"p",
".",
"x0",
",",
"p",
".",
"y0",
"+",
"0.15",
"*",
"p",
".",
"height",
",",
"p",
".",
"width",
",",
"p",
".",
"height",
"]",
"ax3",
".",
"axes",
".",
"set_position",
"(",
"p2",
")",
"# truncate ax2",
"p",
"=",
"ax2",
".",
"axes",
".",
"get_position",
"(",
")",
"p2",
"=",
"[",
"p",
".",
"x0",
"+",
"p",
".",
"width",
"*",
"0.6",
",",
"p",
".",
"y0",
",",
"p",
".",
"width",
"*",
"0.4",
",",
"p",
".",
"height",
"]",
"ax2",
".",
"axes",
".",
"set_position",
"(",
"p2",
")",
"# plot traces and gradient",
"ax1",
".",
"plot",
"(",
"t",
",",
"sig",
",",
"color",
"=",
"'k'",
",",
"lw",
"=",
"1",
")",
"ax1",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"ax1",
".",
"set_ylabel",
"(",
"'Signal'",
")",
"ax3",
".",
"plot",
"(",
"t",
",",
"g",
",",
"color",
"=",
"'k'",
",",
"lw",
"=",
"1",
")",
"ax3",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"ax3",
".",
"set_ylabel",
"(",
"'Gradient'",
")",
"# plot kde",
"ax2",
".",
"fill_betweenx",
"(",
"kde_x",
",",
"yd",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.2",
")",
")",
"ax2",
".",
"plot",
"(",
"yd",
",",
"kde_x",
",",
"color",
"=",
"'k'",
")",
"ax2",
".",
"set_ylim",
"(",
"ax1",
".",
"get_ylim",
"(",
")",
")",
"ax2",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"ax2",
".",
"set_xlabel",
"(",
"'Data\\nDensity'",
")",
"# limit",
"for",
"ax",
"in",
"[",
"ax1",
",",
"ax2",
"]",
":",
"ax",
".",
"axhline",
"(",
"mins",
"[",
"0",
"]",
",",
"color",
"=",
"'k'",
",",
"ls",
"=",
"'dashed'",
",",
"alpha",
"=",
"0.4",
")",
"if",
"len",
"(",
"zeros",
")",
">",
"0",
":",
"# zeros",
"for",
"z",
"in",
"zeros",
":",
"ax1",
".",
"axvline",
"(",
"t",
"[",
"z",
"]",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.5",
")",
"ax3",
".",
"axvline",
"(",
"t",
"[",
"z",
"]",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.5",
")",
"# plot individual transitions",
"n",
"=",
"1",
"for",
"(",
"lo",
",",
"hi",
")",
",",
"lim",
",",
"tp",
",",
"pg",
",",
"fail",
",",
"ax",
"in",
"zip",
"(",
"lohi",
",",
"excl",
",",
"tps",
",",
"pgs",
",",
"failed",
",",
"axs",
".",
"flat",
"[",
"4",
":",
"]",
")",
":",
"# plot region on gradient axis",
"ax3",
".",
"axvspan",
"(",
"t",
"[",
"lo",
"]",
",",
"t",
"[",
"hi",
"]",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"2",
")",
"# plot individual transitions",
"x",
"=",
"t",
"[",
"lo",
":",
"hi",
"]",
"y",
"=",
"g",
"[",
"lo",
":",
"hi",
"]",
"ys",
"=",
"sig",
"[",
"lo",
":",
"hi",
"]",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"color",
"=",
"'k'",
",",
"marker",
"=",
"'x'",
",",
"zorder",
"=",
"-",
"1",
",",
"s",
"=",
"10",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"rangecalc",
"(",
"y",
")",
")",
"tax",
"=",
"ax",
".",
"twinx",
"(",
")",
"tax",
".",
"plot",
"(",
"x",
",",
"ys",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.3",
",",
"zorder",
"=",
"-",
"5",
")",
"tax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"tax",
".",
"set_ylim",
"(",
"rangecalc",
"(",
"ys",
")",
")",
"# plot fitted gaussian",
"xn",
"=",
"np",
".",
"linspace",
"(",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
",",
"100",
")",
"ax",
".",
"plot",
"(",
"xn",
",",
"gauss",
"(",
"xn",
",",
"*",
"pg",
")",
",",
"color",
"=",
"'r'",
",",
"alpha",
"=",
"0.5",
")",
"# plot center and excluded region",
"ax",
".",
"axvline",
"(",
"pg",
"[",
"1",
"]",
",",
"color",
"=",
"'b'",
",",
"alpha",
"=",
"0.5",
")",
"ax",
".",
"axvspan",
"(",
"*",
"lim",
",",
"color",
"=",
"'b'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"2",
")",
"ax1",
".",
"axvspan",
"(",
"*",
"lim",
",",
"color",
"=",
"'b'",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"2",
")",
"if",
"tp",
":",
"ax",
".",
"text",
"(",
".05",
",",
".95",
",",
"'{} (on)'",
".",
"format",
"(",
"n",
")",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
")",
"else",
":",
"ax",
".",
"text",
"(",
".95",
",",
".95",
",",
"'{} (off)'",
".",
"format",
"(",
"n",
")",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
")",
"if",
"ax",
".",
"is_last_row",
"(",
")",
":",
"ax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"set_ylabel",
"(",
"'Gradient (x)'",
")",
"if",
"ax",
".",
"is_last_col",
"(",
")",
":",
"tax",
".",
"set_ylabel",
"(",
"'Signal (line)'",
")",
"if",
"fail",
":",
"ax",
".",
"axes",
".",
"set_facecolor",
"(",
"(",
"1",
",",
"0",
",",
"0",
",",
"0.2",
")",
")",
"ax",
".",
"text",
"(",
".5",
",",
".5",
",",
"'FAIL'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
",",
"fontsize",
"=",
"16",
",",
"color",
"=",
"(",
"1",
",",
"0",
",",
"0",
",",
"0.5",
")",
",",
"transform",
"=",
"ax",
".",
"transAxes",
")",
"n",
"+=",
"1",
"# should never be, but just in case...",
"if",
"len",
"(",
"zeros",
")",
"%",
"2",
"==",
"1",
":",
"axs",
".",
"flat",
"[",
"-",
"1",
"]",
".",
"set_visible",
"=",
"False",
"return",
"fig",
",",
"axs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
calibration_plot
|
Plot the calibration lines between measured and known SRM values.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
datarange : boolean
Whether or not to show the distribution of the measured data
alongside the calibration curve.
loglog : boolean
Whether or not to plot the data on a log - log scale. This is
useful if you have two low standards very close together,
and want to check whether your data are between them, or
below them.
Returns
-------
(fig, axes)
|
latools/helpers/plot.py
|
def calibration_plot(self, analytes=None, datarange=True, loglog=False, ncol=3, srm_group=None, save=True):
"""
Plot the calibration lines between measured and known SRM values.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
datarange : boolean
Whether or not to show the distribution of the measured data
alongside the calibration curve.
loglog : boolean
Whether or not to plot the data on a log - log scale. This is
useful if you have two low standards very close together,
and want to check whether your data are between them, or
below them.
Returns
-------
(fig, axes)
"""
if isinstance(analytes, str):
analytes = [analytes]
if analytes is None:
analytes = [a for a in self.analytes if self.internal_standard not in a]
if srm_group is not None:
srm_groups = {int(g): t for g, t in self.stdtab.loc[:, ['group', 'gTime']].values}
try:
gTime = srm_groups[srm_group]
except KeyError:
text = ('Invalid SRM group selection. Valid options are:\n' +
' Key: Time Centre\n' +
'\n'.join([' {:}: {:.1f}s'.format(k, v) for k, v in srm_groups.items()]))
print(text)
else:
gTime = None
ncol = int(ncol)
n = len(analytes)
nrow = calc_nrow(n + 1, ncol)
axes = []
if not datarange:
fig = plt.figure(figsize=[4.1 * ncol, 3 * nrow])
else:
fig = plt.figure(figsize=[4.7 * ncol, 3 * nrow])
self.get_focus()
gs = mpl.gridspec.GridSpec(nrows=int(nrow), ncols=int(ncol),
hspace=0.35, wspace=0.3)
mdict = self.srm_mdict
for g, a in zip(gs, analytes):
if not datarange:
ax = fig.add_axes(g.get_position(fig))
axes.append((ax,))
else:
f = 0.8
p0 = g.get_position(fig)
p1 = [p0.x0, p0.y0, p0.width * f, p0.height]
p2 = [p0.x0 + p0.width * f, p0.y0, p0.width * (1 - f), p0.height]
ax = fig.add_axes(p1)
axh = fig.add_axes(p2)
axes.append((ax, axh))
if gTime is None:
sub = idx[a]
else:
sub = idx[a, :, :, gTime]
x = self.srmtabs.loc[sub, 'meas_mean'].values
xe = self.srmtabs.loc[sub, 'meas_err'].values
y = self.srmtabs.loc[sub, 'srm_mean'].values
ye = self.srmtabs.loc[sub, 'srm_err'].values
srm = self.srmtabs.loc[sub].index.get_level_values('SRM')
# plot calibration data
for s, m in mdict.items():
ind = srm == s
ax.errorbar(x[ind], y[ind], xerr=xe[ind], yerr=ye[ind],
color=self.cmaps[a], alpha=0.6,
lw=0, elinewidth=1, marker=m, #'o',
capsize=0, markersize=5, label='_')
# work out axis scaling
if not loglog:
xmax = np.nanmax(x + xe)
ymax = np.nanmax(y + ye)
if any(x - xe < 0):
xmin = np.nanmin(x - xe)
xpad = (xmax - xmin) * 0.05
xlim = [xmin - xpad, xmax + xpad]
else:
xlim = [0, xmax * 1.05]
if any(y - ye < 0):
ymin = np.nanmin(y - ye)
ypad = (ymax - ymin) * 0.05
ylim = [ymin - ypad, ymax + ypad]
else:
ylim = [0, ymax * 1.05]
else:
xd = self.srmtabs.loc[a, 'meas_mean'][self.srmtabs.loc[a, 'meas_mean'] > 0].values
yd = self.srmtabs.loc[a, 'srm_mean'][self.srmtabs.loc[a, 'srm_mean'] > 0].values
xlim = [10**np.floor(np.log10(np.nanmin(xd))),
10**np.ceil(np.log10(np.nanmax(xd)))]
ylim = [10**np.floor(np.log10(np.nanmin(yd))),
10**np.ceil(np.log10(np.nanmax(yd)))]
# scale sanity checks
if xlim[0] == xlim[1]:
xlim[0] = ylim[0]
if ylim[0] == ylim[1]:
ylim[0] = xlim[0]
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# visual warning if any values < 0
if xlim[0] < 0:
ax.axvspan(xlim[0], 0, color=(1,0.8,0.8), zorder=-1)
if ylim[0] < 0:
ax.axhspan(ylim[0], 0, color=(1,0.8,0.8), zorder=-1)
if any(x < 0) or any(y < 0):
ax.text(.5, .5, 'WARNING: Values below zero.', color='r', weight='bold',
ha='center', va='center', rotation=40, transform=ax.transAxes, alpha=0.6)
# calculate line and R2
if loglog:
x = np.logspace(*np.log10(xlim), 100)
else:
x = np.array(xlim)
if gTime is None:
coefs = self.calib_params.loc[:, a]
else:
coefs = self.calib_params.loc[gTime, a]
m = np.nanmean(coefs['m'])
m_nom = nominal_values(m)
# calculate case-specific paramers
if 'c' in coefs:
c = np.nanmean(coefs['c'])
c_nom = nominal_values(c)
# calculate R2
ym = self.srmtabs.loc[a, 'meas_mean'] * m_nom + c_nom
R2 = R2calc(self.srmtabs.loc[a, 'srm_mean'], ym, force_zero=False)
# generate line and label
line = x * m_nom + c_nom
label = 'y = {:.2e} x'.format(m)
if c > 0:
label += '\n+ {:.2e}'.format(c)
else:
label += '\n {:.2e}'.format(c)
else:
# calculate R2
ym = self.srmtabs.loc[a, 'meas_mean'] * m_nom
R2 = R2calc(self.srmtabs.loc[a, 'srm_mean'], ym, force_zero=True)
# generate line and label
line = x * m_nom
label = 'y = {:.2e} x'.format(m)
# plot line of best fit
ax.plot(x, line, color=(0, 0, 0, 0.5), ls='dashed')
# add R2 to label
if round(R2, 3) == 1:
label = '$R^2$: >0.999\n' + label
else:
label = '$R^2$: {:.3f}\n'.format(R2) + label
ax.text(.05, .95, pretty_element(a), transform=ax.transAxes,
weight='bold', va='top', ha='left', size=12)
ax.set_xlabel('counts/counts ' + self.internal_standard)
ax.set_ylabel('mol/mol ' + self.internal_standard)
# write calibration equation on graph happens after data distribution
# plot data distribution historgram alongside calibration plot
if datarange:
# isolate data
meas = nominal_values(self.focus[a])
meas = meas[~np.isnan(meas)]
# check and set y scale
if np.nanmin(meas) < ylim[0]:
if loglog:
mmeas = meas[meas > 0]
ylim[0] = 10**np.floor(np.log10(np.nanmin(mmeas)))
else:
ylim[0] = 0
ax.set_ylim(ylim)
m95 = np.percentile(meas[~np.isnan(meas)], 95) * 1.05
if m95 > ylim[1]:
if loglog:
ylim[1] = 10**np.ceil(np.log10(m95))
else:
ylim[1] = m95
# hist
if loglog:
bins = np.logspace(*np.log10(ylim), 30)
else:
bins = np.linspace(*ylim, 30)
axh.hist(meas, bins=bins, orientation='horizontal',
color=self.cmaps[a], lw=0.5, alpha=0.5)
if loglog:
axh.set_yscale('log')
axh.set_ylim(ylim) # ylim of histogram axis
ax.set_ylim(ylim) # ylim of calibration axis
axh.set_xticks([])
axh.set_yticklabels([])
# write calibration equation on graph
cmax = np.nanmax(y)
if cmax / ylim[1] > 0.5:
ax.text(0.98, 0.04, label, transform=ax.transAxes,
va='bottom', ha='right')
else:
ax.text(0.02, 0.75, label, transform=ax.transAxes,
va='top', ha='left')
if srm_group is None:
title = 'All SRMs'
else:
title = 'SRM Group {:} (centre at {:.1f}s)'.format(srm_group, gTime)
axes[0][0].set_title(title, loc='left', weight='bold', fontsize=12)
# SRM legend
ax = fig.add_axes(gs[-1].get_position(fig))
for lab, m in mdict.items():
ax.scatter([],[],marker=m, label=lab, color=(0,0,0,0.6))
ax.legend()
ax.axis('off')
if save:
fig.savefig(self.report_dir + '/calibration.pdf')
return fig, axes
|
def calibration_plot(self, analytes=None, datarange=True, loglog=False, ncol=3, srm_group=None, save=True):
"""
Plot the calibration lines between measured and known SRM values.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
datarange : boolean
Whether or not to show the distribution of the measured data
alongside the calibration curve.
loglog : boolean
Whether or not to plot the data on a log - log scale. This is
useful if you have two low standards very close together,
and want to check whether your data are between them, or
below them.
Returns
-------
(fig, axes)
"""
if isinstance(analytes, str):
analytes = [analytes]
if analytes is None:
analytes = [a for a in self.analytes if self.internal_standard not in a]
if srm_group is not None:
srm_groups = {int(g): t for g, t in self.stdtab.loc[:, ['group', 'gTime']].values}
try:
gTime = srm_groups[srm_group]
except KeyError:
text = ('Invalid SRM group selection. Valid options are:\n' +
' Key: Time Centre\n' +
'\n'.join([' {:}: {:.1f}s'.format(k, v) for k, v in srm_groups.items()]))
print(text)
else:
gTime = None
ncol = int(ncol)
n = len(analytes)
nrow = calc_nrow(n + 1, ncol)
axes = []
if not datarange:
fig = plt.figure(figsize=[4.1 * ncol, 3 * nrow])
else:
fig = plt.figure(figsize=[4.7 * ncol, 3 * nrow])
self.get_focus()
gs = mpl.gridspec.GridSpec(nrows=int(nrow), ncols=int(ncol),
hspace=0.35, wspace=0.3)
mdict = self.srm_mdict
for g, a in zip(gs, analytes):
if not datarange:
ax = fig.add_axes(g.get_position(fig))
axes.append((ax,))
else:
f = 0.8
p0 = g.get_position(fig)
p1 = [p0.x0, p0.y0, p0.width * f, p0.height]
p2 = [p0.x0 + p0.width * f, p0.y0, p0.width * (1 - f), p0.height]
ax = fig.add_axes(p1)
axh = fig.add_axes(p2)
axes.append((ax, axh))
if gTime is None:
sub = idx[a]
else:
sub = idx[a, :, :, gTime]
x = self.srmtabs.loc[sub, 'meas_mean'].values
xe = self.srmtabs.loc[sub, 'meas_err'].values
y = self.srmtabs.loc[sub, 'srm_mean'].values
ye = self.srmtabs.loc[sub, 'srm_err'].values
srm = self.srmtabs.loc[sub].index.get_level_values('SRM')
# plot calibration data
for s, m in mdict.items():
ind = srm == s
ax.errorbar(x[ind], y[ind], xerr=xe[ind], yerr=ye[ind],
color=self.cmaps[a], alpha=0.6,
lw=0, elinewidth=1, marker=m, #'o',
capsize=0, markersize=5, label='_')
# work out axis scaling
if not loglog:
xmax = np.nanmax(x + xe)
ymax = np.nanmax(y + ye)
if any(x - xe < 0):
xmin = np.nanmin(x - xe)
xpad = (xmax - xmin) * 0.05
xlim = [xmin - xpad, xmax + xpad]
else:
xlim = [0, xmax * 1.05]
if any(y - ye < 0):
ymin = np.nanmin(y - ye)
ypad = (ymax - ymin) * 0.05
ylim = [ymin - ypad, ymax + ypad]
else:
ylim = [0, ymax * 1.05]
else:
xd = self.srmtabs.loc[a, 'meas_mean'][self.srmtabs.loc[a, 'meas_mean'] > 0].values
yd = self.srmtabs.loc[a, 'srm_mean'][self.srmtabs.loc[a, 'srm_mean'] > 0].values
xlim = [10**np.floor(np.log10(np.nanmin(xd))),
10**np.ceil(np.log10(np.nanmax(xd)))]
ylim = [10**np.floor(np.log10(np.nanmin(yd))),
10**np.ceil(np.log10(np.nanmax(yd)))]
# scale sanity checks
if xlim[0] == xlim[1]:
xlim[0] = ylim[0]
if ylim[0] == ylim[1]:
ylim[0] = xlim[0]
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# visual warning if any values < 0
if xlim[0] < 0:
ax.axvspan(xlim[0], 0, color=(1,0.8,0.8), zorder=-1)
if ylim[0] < 0:
ax.axhspan(ylim[0], 0, color=(1,0.8,0.8), zorder=-1)
if any(x < 0) or any(y < 0):
ax.text(.5, .5, 'WARNING: Values below zero.', color='r', weight='bold',
ha='center', va='center', rotation=40, transform=ax.transAxes, alpha=0.6)
# calculate line and R2
if loglog:
x = np.logspace(*np.log10(xlim), 100)
else:
x = np.array(xlim)
if gTime is None:
coefs = self.calib_params.loc[:, a]
else:
coefs = self.calib_params.loc[gTime, a]
m = np.nanmean(coefs['m'])
m_nom = nominal_values(m)
# calculate case-specific paramers
if 'c' in coefs:
c = np.nanmean(coefs['c'])
c_nom = nominal_values(c)
# calculate R2
ym = self.srmtabs.loc[a, 'meas_mean'] * m_nom + c_nom
R2 = R2calc(self.srmtabs.loc[a, 'srm_mean'], ym, force_zero=False)
# generate line and label
line = x * m_nom + c_nom
label = 'y = {:.2e} x'.format(m)
if c > 0:
label += '\n+ {:.2e}'.format(c)
else:
label += '\n {:.2e}'.format(c)
else:
# calculate R2
ym = self.srmtabs.loc[a, 'meas_mean'] * m_nom
R2 = R2calc(self.srmtabs.loc[a, 'srm_mean'], ym, force_zero=True)
# generate line and label
line = x * m_nom
label = 'y = {:.2e} x'.format(m)
# plot line of best fit
ax.plot(x, line, color=(0, 0, 0, 0.5), ls='dashed')
# add R2 to label
if round(R2, 3) == 1:
label = '$R^2$: >0.999\n' + label
else:
label = '$R^2$: {:.3f}\n'.format(R2) + label
ax.text(.05, .95, pretty_element(a), transform=ax.transAxes,
weight='bold', va='top', ha='left', size=12)
ax.set_xlabel('counts/counts ' + self.internal_standard)
ax.set_ylabel('mol/mol ' + self.internal_standard)
# write calibration equation on graph happens after data distribution
# plot data distribution historgram alongside calibration plot
if datarange:
# isolate data
meas = nominal_values(self.focus[a])
meas = meas[~np.isnan(meas)]
# check and set y scale
if np.nanmin(meas) < ylim[0]:
if loglog:
mmeas = meas[meas > 0]
ylim[0] = 10**np.floor(np.log10(np.nanmin(mmeas)))
else:
ylim[0] = 0
ax.set_ylim(ylim)
m95 = np.percentile(meas[~np.isnan(meas)], 95) * 1.05
if m95 > ylim[1]:
if loglog:
ylim[1] = 10**np.ceil(np.log10(m95))
else:
ylim[1] = m95
# hist
if loglog:
bins = np.logspace(*np.log10(ylim), 30)
else:
bins = np.linspace(*ylim, 30)
axh.hist(meas, bins=bins, orientation='horizontal',
color=self.cmaps[a], lw=0.5, alpha=0.5)
if loglog:
axh.set_yscale('log')
axh.set_ylim(ylim) # ylim of histogram axis
ax.set_ylim(ylim) # ylim of calibration axis
axh.set_xticks([])
axh.set_yticklabels([])
# write calibration equation on graph
cmax = np.nanmax(y)
if cmax / ylim[1] > 0.5:
ax.text(0.98, 0.04, label, transform=ax.transAxes,
va='bottom', ha='right')
else:
ax.text(0.02, 0.75, label, transform=ax.transAxes,
va='top', ha='left')
if srm_group is None:
title = 'All SRMs'
else:
title = 'SRM Group {:} (centre at {:.1f}s)'.format(srm_group, gTime)
axes[0][0].set_title(title, loc='left', weight='bold', fontsize=12)
# SRM legend
ax = fig.add_axes(gs[-1].get_position(fig))
for lab, m in mdict.items():
ax.scatter([],[],marker=m, label=lab, color=(0,0,0,0.6))
ax.legend()
ax.axis('off')
if save:
fig.savefig(self.report_dir + '/calibration.pdf')
return fig, axes
|
[
"Plot",
"the",
"calibration",
"lines",
"between",
"measured",
"and",
"known",
"SRM",
"values",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L692-L941
|
[
"def",
"calibration_plot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"datarange",
"=",
"True",
",",
"loglog",
"=",
"False",
",",
"ncol",
"=",
"3",
",",
"srm_group",
"=",
"None",
",",
"save",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"if",
"srm_group",
"is",
"not",
"None",
":",
"srm_groups",
"=",
"{",
"int",
"(",
"g",
")",
":",
"t",
"for",
"g",
",",
"t",
"in",
"self",
".",
"stdtab",
".",
"loc",
"[",
":",
",",
"[",
"'group'",
",",
"'gTime'",
"]",
"]",
".",
"values",
"}",
"try",
":",
"gTime",
"=",
"srm_groups",
"[",
"srm_group",
"]",
"except",
"KeyError",
":",
"text",
"=",
"(",
"'Invalid SRM group selection. Valid options are:\\n'",
"+",
"' Key: Time Centre\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"[",
"' {:}: {:.1f}s'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"srm_groups",
".",
"items",
"(",
")",
"]",
")",
")",
"print",
"(",
"text",
")",
"else",
":",
"gTime",
"=",
"None",
"ncol",
"=",
"int",
"(",
"ncol",
")",
"n",
"=",
"len",
"(",
"analytes",
")",
"nrow",
"=",
"calc_nrow",
"(",
"n",
"+",
"1",
",",
"ncol",
")",
"axes",
"=",
"[",
"]",
"if",
"not",
"datarange",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"[",
"4.1",
"*",
"ncol",
",",
"3",
"*",
"nrow",
"]",
")",
"else",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"[",
"4.7",
"*",
"ncol",
",",
"3",
"*",
"nrow",
"]",
")",
"self",
".",
"get_focus",
"(",
")",
"gs",
"=",
"mpl",
".",
"gridspec",
".",
"GridSpec",
"(",
"nrows",
"=",
"int",
"(",
"nrow",
")",
",",
"ncols",
"=",
"int",
"(",
"ncol",
")",
",",
"hspace",
"=",
"0.35",
",",
"wspace",
"=",
"0.3",
")",
"mdict",
"=",
"self",
".",
"srm_mdict",
"for",
"g",
",",
"a",
"in",
"zip",
"(",
"gs",
",",
"analytes",
")",
":",
"if",
"not",
"datarange",
":",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"g",
".",
"get_position",
"(",
"fig",
")",
")",
"axes",
".",
"append",
"(",
"(",
"ax",
",",
")",
")",
"else",
":",
"f",
"=",
"0.8",
"p0",
"=",
"g",
".",
"get_position",
"(",
"fig",
")",
"p1",
"=",
"[",
"p0",
".",
"x0",
",",
"p0",
".",
"y0",
",",
"p0",
".",
"width",
"*",
"f",
",",
"p0",
".",
"height",
"]",
"p2",
"=",
"[",
"p0",
".",
"x0",
"+",
"p0",
".",
"width",
"*",
"f",
",",
"p0",
".",
"y0",
",",
"p0",
".",
"width",
"*",
"(",
"1",
"-",
"f",
")",
",",
"p0",
".",
"height",
"]",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"p1",
")",
"axh",
"=",
"fig",
".",
"add_axes",
"(",
"p2",
")",
"axes",
".",
"append",
"(",
"(",
"ax",
",",
"axh",
")",
")",
"if",
"gTime",
"is",
"None",
":",
"sub",
"=",
"idx",
"[",
"a",
"]",
"else",
":",
"sub",
"=",
"idx",
"[",
"a",
",",
":",
",",
":",
",",
"gTime",
"]",
"x",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"sub",
",",
"'meas_mean'",
"]",
".",
"values",
"xe",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"sub",
",",
"'meas_err'",
"]",
".",
"values",
"y",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"sub",
",",
"'srm_mean'",
"]",
".",
"values",
"ye",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"sub",
",",
"'srm_err'",
"]",
".",
"values",
"srm",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"sub",
"]",
".",
"index",
".",
"get_level_values",
"(",
"'SRM'",
")",
"# plot calibration data",
"for",
"s",
",",
"m",
"in",
"mdict",
".",
"items",
"(",
")",
":",
"ind",
"=",
"srm",
"==",
"s",
"ax",
".",
"errorbar",
"(",
"x",
"[",
"ind",
"]",
",",
"y",
"[",
"ind",
"]",
",",
"xerr",
"=",
"xe",
"[",
"ind",
"]",
",",
"yerr",
"=",
"ye",
"[",
"ind",
"]",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"alpha",
"=",
"0.6",
",",
"lw",
"=",
"0",
",",
"elinewidth",
"=",
"1",
",",
"marker",
"=",
"m",
",",
"#'o',",
"capsize",
"=",
"0",
",",
"markersize",
"=",
"5",
",",
"label",
"=",
"'_'",
")",
"# work out axis scaling",
"if",
"not",
"loglog",
":",
"xmax",
"=",
"np",
".",
"nanmax",
"(",
"x",
"+",
"xe",
")",
"ymax",
"=",
"np",
".",
"nanmax",
"(",
"y",
"+",
"ye",
")",
"if",
"any",
"(",
"x",
"-",
"xe",
"<",
"0",
")",
":",
"xmin",
"=",
"np",
".",
"nanmin",
"(",
"x",
"-",
"xe",
")",
"xpad",
"=",
"(",
"xmax",
"-",
"xmin",
")",
"*",
"0.05",
"xlim",
"=",
"[",
"xmin",
"-",
"xpad",
",",
"xmax",
"+",
"xpad",
"]",
"else",
":",
"xlim",
"=",
"[",
"0",
",",
"xmax",
"*",
"1.05",
"]",
"if",
"any",
"(",
"y",
"-",
"ye",
"<",
"0",
")",
":",
"ymin",
"=",
"np",
".",
"nanmin",
"(",
"y",
"-",
"ye",
")",
"ypad",
"=",
"(",
"ymax",
"-",
"ymin",
")",
"*",
"0.05",
"ylim",
"=",
"[",
"ymin",
"-",
"ypad",
",",
"ymax",
"+",
"ypad",
"]",
"else",
":",
"ylim",
"=",
"[",
"0",
",",
"ymax",
"*",
"1.05",
"]",
"else",
":",
"xd",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'meas_mean'",
"]",
"[",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'meas_mean'",
"]",
">",
"0",
"]",
".",
"values",
"yd",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'srm_mean'",
"]",
"[",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'srm_mean'",
"]",
">",
"0",
"]",
".",
"values",
"xlim",
"=",
"[",
"10",
"**",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"nanmin",
"(",
"xd",
")",
")",
")",
",",
"10",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"nanmax",
"(",
"xd",
")",
")",
")",
"]",
"ylim",
"=",
"[",
"10",
"**",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"nanmin",
"(",
"yd",
")",
")",
")",
",",
"10",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"nanmax",
"(",
"yd",
")",
")",
")",
"]",
"# scale sanity checks",
"if",
"xlim",
"[",
"0",
"]",
"==",
"xlim",
"[",
"1",
"]",
":",
"xlim",
"[",
"0",
"]",
"=",
"ylim",
"[",
"0",
"]",
"if",
"ylim",
"[",
"0",
"]",
"==",
"ylim",
"[",
"1",
"]",
":",
"ylim",
"[",
"0",
"]",
"=",
"xlim",
"[",
"0",
"]",
"ax",
".",
"set_xscale",
"(",
"'log'",
")",
"ax",
".",
"set_yscale",
"(",
"'log'",
")",
"ax",
".",
"set_xlim",
"(",
"xlim",
")",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"# visual warning if any values < 0",
"if",
"xlim",
"[",
"0",
"]",
"<",
"0",
":",
"ax",
".",
"axvspan",
"(",
"xlim",
"[",
"0",
"]",
",",
"0",
",",
"color",
"=",
"(",
"1",
",",
"0.8",
",",
"0.8",
")",
",",
"zorder",
"=",
"-",
"1",
")",
"if",
"ylim",
"[",
"0",
"]",
"<",
"0",
":",
"ax",
".",
"axhspan",
"(",
"ylim",
"[",
"0",
"]",
",",
"0",
",",
"color",
"=",
"(",
"1",
",",
"0.8",
",",
"0.8",
")",
",",
"zorder",
"=",
"-",
"1",
")",
"if",
"any",
"(",
"x",
"<",
"0",
")",
"or",
"any",
"(",
"y",
"<",
"0",
")",
":",
"ax",
".",
"text",
"(",
".5",
",",
".5",
",",
"'WARNING: Values below zero.'",
",",
"color",
"=",
"'r'",
",",
"weight",
"=",
"'bold'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
",",
"rotation",
"=",
"40",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"alpha",
"=",
"0.6",
")",
"# calculate line and R2",
"if",
"loglog",
":",
"x",
"=",
"np",
".",
"logspace",
"(",
"*",
"np",
".",
"log10",
"(",
"xlim",
")",
",",
"100",
")",
"else",
":",
"x",
"=",
"np",
".",
"array",
"(",
"xlim",
")",
"if",
"gTime",
"is",
"None",
":",
"coefs",
"=",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"a",
"]",
"else",
":",
"coefs",
"=",
"self",
".",
"calib_params",
".",
"loc",
"[",
"gTime",
",",
"a",
"]",
"m",
"=",
"np",
".",
"nanmean",
"(",
"coefs",
"[",
"'m'",
"]",
")",
"m_nom",
"=",
"nominal_values",
"(",
"m",
")",
"# calculate case-specific paramers",
"if",
"'c'",
"in",
"coefs",
":",
"c",
"=",
"np",
".",
"nanmean",
"(",
"coefs",
"[",
"'c'",
"]",
")",
"c_nom",
"=",
"nominal_values",
"(",
"c",
")",
"# calculate R2",
"ym",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'meas_mean'",
"]",
"*",
"m_nom",
"+",
"c_nom",
"R2",
"=",
"R2calc",
"(",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'srm_mean'",
"]",
",",
"ym",
",",
"force_zero",
"=",
"False",
")",
"# generate line and label",
"line",
"=",
"x",
"*",
"m_nom",
"+",
"c_nom",
"label",
"=",
"'y = {:.2e} x'",
".",
"format",
"(",
"m",
")",
"if",
"c",
">",
"0",
":",
"label",
"+=",
"'\\n+ {:.2e}'",
".",
"format",
"(",
"c",
")",
"else",
":",
"label",
"+=",
"'\\n {:.2e}'",
".",
"format",
"(",
"c",
")",
"else",
":",
"# calculate R2",
"ym",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'meas_mean'",
"]",
"*",
"m_nom",
"R2",
"=",
"R2calc",
"(",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"a",
",",
"'srm_mean'",
"]",
",",
"ym",
",",
"force_zero",
"=",
"True",
")",
"# generate line and label",
"line",
"=",
"x",
"*",
"m_nom",
"label",
"=",
"'y = {:.2e} x'",
".",
"format",
"(",
"m",
")",
"# plot line of best fit",
"ax",
".",
"plot",
"(",
"x",
",",
"line",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.5",
")",
",",
"ls",
"=",
"'dashed'",
")",
"# add R2 to label",
"if",
"round",
"(",
"R2",
",",
"3",
")",
"==",
"1",
":",
"label",
"=",
"'$R^2$: >0.999\\n'",
"+",
"label",
"else",
":",
"label",
"=",
"'$R^2$: {:.3f}\\n'",
".",
"format",
"(",
"R2",
")",
"+",
"label",
"ax",
".",
"text",
"(",
".05",
",",
".95",
",",
"pretty_element",
"(",
"a",
")",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"weight",
"=",
"'bold'",
",",
"va",
"=",
"'top'",
",",
"ha",
"=",
"'left'",
",",
"size",
"=",
"12",
")",
"ax",
".",
"set_xlabel",
"(",
"'counts/counts '",
"+",
"self",
".",
"internal_standard",
")",
"ax",
".",
"set_ylabel",
"(",
"'mol/mol '",
"+",
"self",
".",
"internal_standard",
")",
"# write calibration equation on graph happens after data distribution",
"# plot data distribution historgram alongside calibration plot",
"if",
"datarange",
":",
"# isolate data",
"meas",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"a",
"]",
")",
"meas",
"=",
"meas",
"[",
"~",
"np",
".",
"isnan",
"(",
"meas",
")",
"]",
"# check and set y scale",
"if",
"np",
".",
"nanmin",
"(",
"meas",
")",
"<",
"ylim",
"[",
"0",
"]",
":",
"if",
"loglog",
":",
"mmeas",
"=",
"meas",
"[",
"meas",
">",
"0",
"]",
"ylim",
"[",
"0",
"]",
"=",
"10",
"**",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"nanmin",
"(",
"mmeas",
")",
")",
")",
"else",
":",
"ylim",
"[",
"0",
"]",
"=",
"0",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"m95",
"=",
"np",
".",
"percentile",
"(",
"meas",
"[",
"~",
"np",
".",
"isnan",
"(",
"meas",
")",
"]",
",",
"95",
")",
"*",
"1.05",
"if",
"m95",
">",
"ylim",
"[",
"1",
"]",
":",
"if",
"loglog",
":",
"ylim",
"[",
"1",
"]",
"=",
"10",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log10",
"(",
"m95",
")",
")",
"else",
":",
"ylim",
"[",
"1",
"]",
"=",
"m95",
"# hist",
"if",
"loglog",
":",
"bins",
"=",
"np",
".",
"logspace",
"(",
"*",
"np",
".",
"log10",
"(",
"ylim",
")",
",",
"30",
")",
"else",
":",
"bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"ylim",
",",
"30",
")",
"axh",
".",
"hist",
"(",
"meas",
",",
"bins",
"=",
"bins",
",",
"orientation",
"=",
"'horizontal'",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"lw",
"=",
"0.5",
",",
"alpha",
"=",
"0.5",
")",
"if",
"loglog",
":",
"axh",
".",
"set_yscale",
"(",
"'log'",
")",
"axh",
".",
"set_ylim",
"(",
"ylim",
")",
"# ylim of histogram axis",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"# ylim of calibration axis",
"axh",
".",
"set_xticks",
"(",
"[",
"]",
")",
"axh",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"# write calibration equation on graph",
"cmax",
"=",
"np",
".",
"nanmax",
"(",
"y",
")",
"if",
"cmax",
"/",
"ylim",
"[",
"1",
"]",
">",
"0.5",
":",
"ax",
".",
"text",
"(",
"0.98",
",",
"0.04",
",",
"label",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"va",
"=",
"'bottom'",
",",
"ha",
"=",
"'right'",
")",
"else",
":",
"ax",
".",
"text",
"(",
"0.02",
",",
"0.75",
",",
"label",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"va",
"=",
"'top'",
",",
"ha",
"=",
"'left'",
")",
"if",
"srm_group",
"is",
"None",
":",
"title",
"=",
"'All SRMs'",
"else",
":",
"title",
"=",
"'SRM Group {:} (centre at {:.1f}s)'",
".",
"format",
"(",
"srm_group",
",",
"gTime",
")",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"set_title",
"(",
"title",
",",
"loc",
"=",
"'left'",
",",
"weight",
"=",
"'bold'",
",",
"fontsize",
"=",
"12",
")",
"# SRM legend",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"gs",
"[",
"-",
"1",
"]",
".",
"get_position",
"(",
"fig",
")",
")",
"for",
"lab",
",",
"m",
"in",
"mdict",
".",
"items",
"(",
")",
":",
"ax",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"marker",
"=",
"m",
",",
"label",
"=",
"lab",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.6",
")",
")",
"ax",
".",
"legend",
"(",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"if",
"save",
":",
"fig",
".",
"savefig",
"(",
"self",
".",
"report_dir",
"+",
"'/calibration.pdf'",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
filter_report
|
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
|
latools/helpers/plot.py
|
def filter_report(Data, filt=None, analytes=None, savedir=None, nbin=5):
"""
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
"""
if filt is None or filt == 'all':
sets = Data.filt.sets
else:
sets = {k: v for k, v in Data.filt.sets.items() if any(filt in f for f in v)}
regex = re.compile('^([0-9]+)_([A-Za-z0-9-]+)_'
'([A-Za-z0-9-]+)[_$]?'
'([a-z0-9]+)?')
cm = plt.cm.get_cmap('Spectral')
ngrps = len(sets)
if analytes is None:
analytes = Data.analytes
elif isinstance(analytes, str):
analytes = [analytes]
axes = []
for analyte in analytes:
if analyte != Data.internal_standard:
fig = plt.figure()
for i in sorted(sets.keys()):
filts = sets[i]
nfilts = np.array([re.match(regex, f).groups() for f in filts])
fgnames = np.array(['_'.join(a) for a in nfilts[:, 1:3]])
fgrp = np.unique(fgnames)[0]
fig.set_size_inches(10, 3.5 * ngrps)
h = .8 / ngrps
y = nominal_values(Data.focus[analyte])
yh = y[~np.isnan(y)]
m, u = unitpicker(np.nanmax(y),
denominator=Data.internal_standard,
focus_stage=Data.focus_stage)
axs = tax, hax = (fig.add_axes([.1, .9 - (i + 1) * h, .6, h * .98]),
fig.add_axes([.7, .9 - (i + 1) * h, .2, h * .98]))
axes.append(axs)
# get variables
fg = sets[i]
cs = cm(np.linspace(0, 1, len(fg)))
fn = ['_'.join(x) for x in nfilts[:, (0, 3)]]
an = nfilts[:, 0]
bins = np.linspace(np.nanmin(y), np.nanmax(y), len(yh) // nbin) * m
if 'DBSCAN' in fgrp:
# determine data filters
core_ind = Data.filt.components[[f for f in fg
if 'core' in f][0]]
other = np.array([('noise' not in f) & ('core' not in f)
for f in fg])
tfg = fg[other]
tfn = fn[other]
tcs = cm(np.linspace(0, 1, len(tfg)))
# plot all data
hax.hist(m * yh, bins, alpha=0.2, orientation='horizontal',
color='k', lw=0)
# legend markers for core/member
tax.scatter([], [], s=20, label='core', color='w', lw=0.5, edgecolor='k')
tax.scatter([], [], s=7.5, label='member', color='w', lw=0.5, edgecolor='k')
# plot noise
try:
noise_ind = Data.filt.components[[f for f in fg
if 'noise' in f][0]]
tax.scatter(Data.Time[noise_ind], m * y[noise_ind],
lw=1, color='k', s=10, marker='x',
label='noise', alpha=0.6)
except:
pass
# plot filtered data
for f, c, lab in zip(tfg, tcs, tfn):
ind = Data.filt.components[f]
tax.scatter(Data.Time[~core_ind & ind],
m * y[~core_ind & ind], lw=.5, color=c, s=5, edgecolor='k')
tax.scatter(Data.Time[core_ind & ind],
m * y[core_ind & ind], lw=.5, color=c, s=15, edgecolor='k',
label=lab)
hax.hist(m * y[ind][~np.isnan(y[ind])], bins, color=c, lw=0.1,
orientation='horizontal', alpha=0.6)
else:
# plot all data
tax.scatter(Data.Time, m * y, color='k', alpha=0.2, lw=0.1,
s=20, label='excl')
hax.hist(m * yh, bins, alpha=0.2, orientation='horizontal',
color='k', lw=0)
# plot filtered data
for f, c, lab in zip(fg, cs, fn):
ind = Data.filt.components[f]
tax.scatter(Data.Time[ind], m * y[ind],
edgecolor=(0,0,0,0), color=c, s=15, label=lab)
hax.hist(m * y[ind][~np.isnan(y[ind])], bins, color=c, lw=0.1,
orientation='horizontal', alpha=0.6)
if 'thresh' in fgrp and analyte in fgrp:
tax.axhline(Data.filt.params[fg[0]]['threshold'] * m,
ls='dashed', zorder=-2, alpha=0.5, color='k')
hax.axhline(Data.filt.params[fg[0]]['threshold'] * m,
ls='dashed', zorder=-2, alpha=0.5, color='k')
# formatting
for ax in axs:
mn = np.nanmin(y) * m
mx = np.nanmax(y) * m
rn = mx - mn
ax.set_ylim(mn - .05 * rn, mx + 0.05 * rn)
# legend
hn, la = tax.get_legend_handles_labels()
hax.legend(hn, la, loc='upper right', scatterpoints=1)
tax.text(.02, .98, Data.sample + ': ' + fgrp, size=12,
weight='bold', ha='left', va='top',
transform=tax.transAxes)
tax.set_ylabel(pretty_element(analyte) + ' (' + u + ')')
tax.set_xticks(tax.get_xticks()[:-1])
hax.set_yticklabels([])
if i < ngrps - 1:
tax.set_xticklabels([])
hax.set_xticklabels([])
else:
tax.set_xlabel('Time (s)')
hax.set_xlabel('n')
if isinstance(savedir, str):
fig.savefig(savedir + '/' + Data.sample + '_' +
analyte + '.pdf')
plt.close(fig)
return fig, axes
|
def filter_report(Data, filt=None, analytes=None, savedir=None, nbin=5):
"""
Visualise effect of data filters.
Parameters
----------
filt : str
Exact or partial name of filter to plot. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name will be plotted.
Defaults to all filters.
analyte : str
Name of analyte to plot.
save : str
file path to save the plot
Returns
-------
(fig, axes)
"""
if filt is None or filt == 'all':
sets = Data.filt.sets
else:
sets = {k: v for k, v in Data.filt.sets.items() if any(filt in f for f in v)}
regex = re.compile('^([0-9]+)_([A-Za-z0-9-]+)_'
'([A-Za-z0-9-]+)[_$]?'
'([a-z0-9]+)?')
cm = plt.cm.get_cmap('Spectral')
ngrps = len(sets)
if analytes is None:
analytes = Data.analytes
elif isinstance(analytes, str):
analytes = [analytes]
axes = []
for analyte in analytes:
if analyte != Data.internal_standard:
fig = plt.figure()
for i in sorted(sets.keys()):
filts = sets[i]
nfilts = np.array([re.match(regex, f).groups() for f in filts])
fgnames = np.array(['_'.join(a) for a in nfilts[:, 1:3]])
fgrp = np.unique(fgnames)[0]
fig.set_size_inches(10, 3.5 * ngrps)
h = .8 / ngrps
y = nominal_values(Data.focus[analyte])
yh = y[~np.isnan(y)]
m, u = unitpicker(np.nanmax(y),
denominator=Data.internal_standard,
focus_stage=Data.focus_stage)
axs = tax, hax = (fig.add_axes([.1, .9 - (i + 1) * h, .6, h * .98]),
fig.add_axes([.7, .9 - (i + 1) * h, .2, h * .98]))
axes.append(axs)
# get variables
fg = sets[i]
cs = cm(np.linspace(0, 1, len(fg)))
fn = ['_'.join(x) for x in nfilts[:, (0, 3)]]
an = nfilts[:, 0]
bins = np.linspace(np.nanmin(y), np.nanmax(y), len(yh) // nbin) * m
if 'DBSCAN' in fgrp:
# determine data filters
core_ind = Data.filt.components[[f for f in fg
if 'core' in f][0]]
other = np.array([('noise' not in f) & ('core' not in f)
for f in fg])
tfg = fg[other]
tfn = fn[other]
tcs = cm(np.linspace(0, 1, len(tfg)))
# plot all data
hax.hist(m * yh, bins, alpha=0.2, orientation='horizontal',
color='k', lw=0)
# legend markers for core/member
tax.scatter([], [], s=20, label='core', color='w', lw=0.5, edgecolor='k')
tax.scatter([], [], s=7.5, label='member', color='w', lw=0.5, edgecolor='k')
# plot noise
try:
noise_ind = Data.filt.components[[f for f in fg
if 'noise' in f][0]]
tax.scatter(Data.Time[noise_ind], m * y[noise_ind],
lw=1, color='k', s=10, marker='x',
label='noise', alpha=0.6)
except:
pass
# plot filtered data
for f, c, lab in zip(tfg, tcs, tfn):
ind = Data.filt.components[f]
tax.scatter(Data.Time[~core_ind & ind],
m * y[~core_ind & ind], lw=.5, color=c, s=5, edgecolor='k')
tax.scatter(Data.Time[core_ind & ind],
m * y[core_ind & ind], lw=.5, color=c, s=15, edgecolor='k',
label=lab)
hax.hist(m * y[ind][~np.isnan(y[ind])], bins, color=c, lw=0.1,
orientation='horizontal', alpha=0.6)
else:
# plot all data
tax.scatter(Data.Time, m * y, color='k', alpha=0.2, lw=0.1,
s=20, label='excl')
hax.hist(m * yh, bins, alpha=0.2, orientation='horizontal',
color='k', lw=0)
# plot filtered data
for f, c, lab in zip(fg, cs, fn):
ind = Data.filt.components[f]
tax.scatter(Data.Time[ind], m * y[ind],
edgecolor=(0,0,0,0), color=c, s=15, label=lab)
hax.hist(m * y[ind][~np.isnan(y[ind])], bins, color=c, lw=0.1,
orientation='horizontal', alpha=0.6)
if 'thresh' in fgrp and analyte in fgrp:
tax.axhline(Data.filt.params[fg[0]]['threshold'] * m,
ls='dashed', zorder=-2, alpha=0.5, color='k')
hax.axhline(Data.filt.params[fg[0]]['threshold'] * m,
ls='dashed', zorder=-2, alpha=0.5, color='k')
# formatting
for ax in axs:
mn = np.nanmin(y) * m
mx = np.nanmax(y) * m
rn = mx - mn
ax.set_ylim(mn - .05 * rn, mx + 0.05 * rn)
# legend
hn, la = tax.get_legend_handles_labels()
hax.legend(hn, la, loc='upper right', scatterpoints=1)
tax.text(.02, .98, Data.sample + ': ' + fgrp, size=12,
weight='bold', ha='left', va='top',
transform=tax.transAxes)
tax.set_ylabel(pretty_element(analyte) + ' (' + u + ')')
tax.set_xticks(tax.get_xticks()[:-1])
hax.set_yticklabels([])
if i < ngrps - 1:
tax.set_xticklabels([])
hax.set_xticklabels([])
else:
tax.set_xlabel('Time (s)')
hax.set_xlabel('n')
if isinstance(savedir, str):
fig.savefig(savedir + '/' + Data.sample + '_' +
analyte + '.pdf')
plt.close(fig)
return fig, axes
|
[
"Visualise",
"effect",
"of",
"data",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/plot.py#L1002-L1159
|
[
"def",
"filter_report",
"(",
"Data",
",",
"filt",
"=",
"None",
",",
"analytes",
"=",
"None",
",",
"savedir",
"=",
"None",
",",
"nbin",
"=",
"5",
")",
":",
"if",
"filt",
"is",
"None",
"or",
"filt",
"==",
"'all'",
":",
"sets",
"=",
"Data",
".",
"filt",
".",
"sets",
"else",
":",
"sets",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"Data",
".",
"filt",
".",
"sets",
".",
"items",
"(",
")",
"if",
"any",
"(",
"filt",
"in",
"f",
"for",
"f",
"in",
"v",
")",
"}",
"regex",
"=",
"re",
".",
"compile",
"(",
"'^([0-9]+)_([A-Za-z0-9-]+)_'",
"'([A-Za-z0-9-]+)[_$]?'",
"'([a-z0-9]+)?'",
")",
"cm",
"=",
"plt",
".",
"cm",
".",
"get_cmap",
"(",
"'Spectral'",
")",
"ngrps",
"=",
"len",
"(",
"sets",
")",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"Data",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"axes",
"=",
"[",
"]",
"for",
"analyte",
"in",
"analytes",
":",
"if",
"analyte",
"!=",
"Data",
".",
"internal_standard",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"for",
"i",
"in",
"sorted",
"(",
"sets",
".",
"keys",
"(",
")",
")",
":",
"filts",
"=",
"sets",
"[",
"i",
"]",
"nfilts",
"=",
"np",
".",
"array",
"(",
"[",
"re",
".",
"match",
"(",
"regex",
",",
"f",
")",
".",
"groups",
"(",
")",
"for",
"f",
"in",
"filts",
"]",
")",
"fgnames",
"=",
"np",
".",
"array",
"(",
"[",
"'_'",
".",
"join",
"(",
"a",
")",
"for",
"a",
"in",
"nfilts",
"[",
":",
",",
"1",
":",
"3",
"]",
"]",
")",
"fgrp",
"=",
"np",
".",
"unique",
"(",
"fgnames",
")",
"[",
"0",
"]",
"fig",
".",
"set_size_inches",
"(",
"10",
",",
"3.5",
"*",
"ngrps",
")",
"h",
"=",
".8",
"/",
"ngrps",
"y",
"=",
"nominal_values",
"(",
"Data",
".",
"focus",
"[",
"analyte",
"]",
")",
"yh",
"=",
"y",
"[",
"~",
"np",
".",
"isnan",
"(",
"y",
")",
"]",
"m",
",",
"u",
"=",
"unitpicker",
"(",
"np",
".",
"nanmax",
"(",
"y",
")",
",",
"denominator",
"=",
"Data",
".",
"internal_standard",
",",
"focus_stage",
"=",
"Data",
".",
"focus_stage",
")",
"axs",
"=",
"tax",
",",
"hax",
"=",
"(",
"fig",
".",
"add_axes",
"(",
"[",
".1",
",",
".9",
"-",
"(",
"i",
"+",
"1",
")",
"*",
"h",
",",
".6",
",",
"h",
"*",
".98",
"]",
")",
",",
"fig",
".",
"add_axes",
"(",
"[",
".7",
",",
".9",
"-",
"(",
"i",
"+",
"1",
")",
"*",
"h",
",",
".2",
",",
"h",
"*",
".98",
"]",
")",
")",
"axes",
".",
"append",
"(",
"axs",
")",
"# get variables",
"fg",
"=",
"sets",
"[",
"i",
"]",
"cs",
"=",
"cm",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"len",
"(",
"fg",
")",
")",
")",
"fn",
"=",
"[",
"'_'",
".",
"join",
"(",
"x",
")",
"for",
"x",
"in",
"nfilts",
"[",
":",
",",
"(",
"0",
",",
"3",
")",
"]",
"]",
"an",
"=",
"nfilts",
"[",
":",
",",
"0",
"]",
"bins",
"=",
"np",
".",
"linspace",
"(",
"np",
".",
"nanmin",
"(",
"y",
")",
",",
"np",
".",
"nanmax",
"(",
"y",
")",
",",
"len",
"(",
"yh",
")",
"//",
"nbin",
")",
"*",
"m",
"if",
"'DBSCAN'",
"in",
"fgrp",
":",
"# determine data filters",
"core_ind",
"=",
"Data",
".",
"filt",
".",
"components",
"[",
"[",
"f",
"for",
"f",
"in",
"fg",
"if",
"'core'",
"in",
"f",
"]",
"[",
"0",
"]",
"]",
"other",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"'noise'",
"not",
"in",
"f",
")",
"&",
"(",
"'core'",
"not",
"in",
"f",
")",
"for",
"f",
"in",
"fg",
"]",
")",
"tfg",
"=",
"fg",
"[",
"other",
"]",
"tfn",
"=",
"fn",
"[",
"other",
"]",
"tcs",
"=",
"cm",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"len",
"(",
"tfg",
")",
")",
")",
"# plot all data",
"hax",
".",
"hist",
"(",
"m",
"*",
"yh",
",",
"bins",
",",
"alpha",
"=",
"0.2",
",",
"orientation",
"=",
"'horizontal'",
",",
"color",
"=",
"'k'",
",",
"lw",
"=",
"0",
")",
"# legend markers for core/member",
"tax",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"s",
"=",
"20",
",",
"label",
"=",
"'core'",
",",
"color",
"=",
"'w'",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
")",
"tax",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"s",
"=",
"7.5",
",",
"label",
"=",
"'member'",
",",
"color",
"=",
"'w'",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
")",
"# plot noise",
"try",
":",
"noise_ind",
"=",
"Data",
".",
"filt",
".",
"components",
"[",
"[",
"f",
"for",
"f",
"in",
"fg",
"if",
"'noise'",
"in",
"f",
"]",
"[",
"0",
"]",
"]",
"tax",
".",
"scatter",
"(",
"Data",
".",
"Time",
"[",
"noise_ind",
"]",
",",
"m",
"*",
"y",
"[",
"noise_ind",
"]",
",",
"lw",
"=",
"1",
",",
"color",
"=",
"'k'",
",",
"s",
"=",
"10",
",",
"marker",
"=",
"'x'",
",",
"label",
"=",
"'noise'",
",",
"alpha",
"=",
"0.6",
")",
"except",
":",
"pass",
"# plot filtered data",
"for",
"f",
",",
"c",
",",
"lab",
"in",
"zip",
"(",
"tfg",
",",
"tcs",
",",
"tfn",
")",
":",
"ind",
"=",
"Data",
".",
"filt",
".",
"components",
"[",
"f",
"]",
"tax",
".",
"scatter",
"(",
"Data",
".",
"Time",
"[",
"~",
"core_ind",
"&",
"ind",
"]",
",",
"m",
"*",
"y",
"[",
"~",
"core_ind",
"&",
"ind",
"]",
",",
"lw",
"=",
".5",
",",
"color",
"=",
"c",
",",
"s",
"=",
"5",
",",
"edgecolor",
"=",
"'k'",
")",
"tax",
".",
"scatter",
"(",
"Data",
".",
"Time",
"[",
"core_ind",
"&",
"ind",
"]",
",",
"m",
"*",
"y",
"[",
"core_ind",
"&",
"ind",
"]",
",",
"lw",
"=",
".5",
",",
"color",
"=",
"c",
",",
"s",
"=",
"15",
",",
"edgecolor",
"=",
"'k'",
",",
"label",
"=",
"lab",
")",
"hax",
".",
"hist",
"(",
"m",
"*",
"y",
"[",
"ind",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"y",
"[",
"ind",
"]",
")",
"]",
",",
"bins",
",",
"color",
"=",
"c",
",",
"lw",
"=",
"0.1",
",",
"orientation",
"=",
"'horizontal'",
",",
"alpha",
"=",
"0.6",
")",
"else",
":",
"# plot all data",
"tax",
".",
"scatter",
"(",
"Data",
".",
"Time",
",",
"m",
"*",
"y",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.2",
",",
"lw",
"=",
"0.1",
",",
"s",
"=",
"20",
",",
"label",
"=",
"'excl'",
")",
"hax",
".",
"hist",
"(",
"m",
"*",
"yh",
",",
"bins",
",",
"alpha",
"=",
"0.2",
",",
"orientation",
"=",
"'horizontal'",
",",
"color",
"=",
"'k'",
",",
"lw",
"=",
"0",
")",
"# plot filtered data",
"for",
"f",
",",
"c",
",",
"lab",
"in",
"zip",
"(",
"fg",
",",
"cs",
",",
"fn",
")",
":",
"ind",
"=",
"Data",
".",
"filt",
".",
"components",
"[",
"f",
"]",
"tax",
".",
"scatter",
"(",
"Data",
".",
"Time",
"[",
"ind",
"]",
",",
"m",
"*",
"y",
"[",
"ind",
"]",
",",
"edgecolor",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
",",
"color",
"=",
"c",
",",
"s",
"=",
"15",
",",
"label",
"=",
"lab",
")",
"hax",
".",
"hist",
"(",
"m",
"*",
"y",
"[",
"ind",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"y",
"[",
"ind",
"]",
")",
"]",
",",
"bins",
",",
"color",
"=",
"c",
",",
"lw",
"=",
"0.1",
",",
"orientation",
"=",
"'horizontal'",
",",
"alpha",
"=",
"0.6",
")",
"if",
"'thresh'",
"in",
"fgrp",
"and",
"analyte",
"in",
"fgrp",
":",
"tax",
".",
"axhline",
"(",
"Data",
".",
"filt",
".",
"params",
"[",
"fg",
"[",
"0",
"]",
"]",
"[",
"'threshold'",
"]",
"*",
"m",
",",
"ls",
"=",
"'dashed'",
",",
"zorder",
"=",
"-",
"2",
",",
"alpha",
"=",
"0.5",
",",
"color",
"=",
"'k'",
")",
"hax",
".",
"axhline",
"(",
"Data",
".",
"filt",
".",
"params",
"[",
"fg",
"[",
"0",
"]",
"]",
"[",
"'threshold'",
"]",
"*",
"m",
",",
"ls",
"=",
"'dashed'",
",",
"zorder",
"=",
"-",
"2",
",",
"alpha",
"=",
"0.5",
",",
"color",
"=",
"'k'",
")",
"# formatting",
"for",
"ax",
"in",
"axs",
":",
"mn",
"=",
"np",
".",
"nanmin",
"(",
"y",
")",
"*",
"m",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"y",
")",
"*",
"m",
"rn",
"=",
"mx",
"-",
"mn",
"ax",
".",
"set_ylim",
"(",
"mn",
"-",
".05",
"*",
"rn",
",",
"mx",
"+",
"0.05",
"*",
"rn",
")",
"# legend",
"hn",
",",
"la",
"=",
"tax",
".",
"get_legend_handles_labels",
"(",
")",
"hax",
".",
"legend",
"(",
"hn",
",",
"la",
",",
"loc",
"=",
"'upper right'",
",",
"scatterpoints",
"=",
"1",
")",
"tax",
".",
"text",
"(",
".02",
",",
".98",
",",
"Data",
".",
"sample",
"+",
"': '",
"+",
"fgrp",
",",
"size",
"=",
"12",
",",
"weight",
"=",
"'bold'",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
",",
"transform",
"=",
"tax",
".",
"transAxes",
")",
"tax",
".",
"set_ylabel",
"(",
"pretty_element",
"(",
"analyte",
")",
"+",
"' ('",
"+",
"u",
"+",
"')'",
")",
"tax",
".",
"set_xticks",
"(",
"tax",
".",
"get_xticks",
"(",
")",
"[",
":",
"-",
"1",
"]",
")",
"hax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"if",
"i",
"<",
"ngrps",
"-",
"1",
":",
"tax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"hax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"else",
":",
"tax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"hax",
".",
"set_xlabel",
"(",
"'n'",
")",
"if",
"isinstance",
"(",
"savedir",
",",
"str",
")",
":",
"fig",
".",
"savefig",
"(",
"savedir",
"+",
"'/'",
"+",
"Data",
".",
"sample",
"+",
"'_'",
"+",
"analyte",
"+",
"'.pdf'",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
pairwise_reproducibility
|
Calculate the reproducibility of LA-ICPMS based on unique pairs of repeat analyses.
Pairwise differences are fit with a half-Cauchy distribution, and the median and
95% confidence limits are returned for each analyte.
Parameters
----------
df : pandas.DataFrame
A dataset
plot : bool
Whether or not to plot the resulting error distributions.
Returns
-------
pdiffs : pandas.DataFrame
Unique pairwise differences for all analytes.
rep_dists : dict of scipy.stats.halfcauchy
Half-Cauchy distribution objects fitted to the
differences.
rep_stats : dict of tuples
The 50% and 95% quantiles of the half-cauchy
distribution.
(fig, axs) : matplotlib objects
The figure. If not made, returnes (None, None) placeholder
|
Supplement/comparison_tools/stats.py
|
def pairwise_reproducibility(df, plot=False):
"""
Calculate the reproducibility of LA-ICPMS based on unique pairs of repeat analyses.
Pairwise differences are fit with a half-Cauchy distribution, and the median and
95% confidence limits are returned for each analyte.
Parameters
----------
df : pandas.DataFrame
A dataset
plot : bool
Whether or not to plot the resulting error distributions.
Returns
-------
pdiffs : pandas.DataFrame
Unique pairwise differences for all analytes.
rep_dists : dict of scipy.stats.halfcauchy
Half-Cauchy distribution objects fitted to the
differences.
rep_stats : dict of tuples
The 50% and 95% quantiles of the half-cauchy
distribution.
(fig, axs) : matplotlib objects
The figure. If not made, returnes (None, None) placeholder
"""
ans = df.columns.values
pdifs = []
# calculate differences between unique pairs
for ind, d in df.groupby(level=0):
d.index = d.index.droplevel(0)
difs = []
for i, r in d.iterrows():
t = d.loc[i+1:, :]
difs.append(t[ans] - r[ans])
pdifs.append(pd.concat(difs))
pdifs = pd.concat(pdifs).abs()
# calculate stats
rep_stats = {}
rep_dists = {}
errfn = stats.halfcauchy
for a in ans:
d = pdifs.loc[:, a].dropna().values
hdist = errfn.fit(d, floc=0)
rep_dists[a] = errfn(*hdist)
rep_stats[a] = rep_dists[a].ppf((0.5, 0.95))
# make plot
if not plot:
return pdifs, rep_dists, rep_stats, (None, None)
fig, axs = plt.subplots(1, len(ans), figsize=[len(ans) * 2, 2])
for a, ax in zip(ans, axs):
d = pdifs.loc[:, a].dropna().values
hist, edges, _ = ax.hist(d, 30)
ax.plot(edges, rep_dists[a].pdf(edges) * (sum(hist) * np.mean(np.diff(edges))))
ax.set_title(a, loc='left')
return pdifs, rep_dists, rep_stats, (fig, axs)
|
def pairwise_reproducibility(df, plot=False):
"""
Calculate the reproducibility of LA-ICPMS based on unique pairs of repeat analyses.
Pairwise differences are fit with a half-Cauchy distribution, and the median and
95% confidence limits are returned for each analyte.
Parameters
----------
df : pandas.DataFrame
A dataset
plot : bool
Whether or not to plot the resulting error distributions.
Returns
-------
pdiffs : pandas.DataFrame
Unique pairwise differences for all analytes.
rep_dists : dict of scipy.stats.halfcauchy
Half-Cauchy distribution objects fitted to the
differences.
rep_stats : dict of tuples
The 50% and 95% quantiles of the half-cauchy
distribution.
(fig, axs) : matplotlib objects
The figure. If not made, returnes (None, None) placeholder
"""
ans = df.columns.values
pdifs = []
# calculate differences between unique pairs
for ind, d in df.groupby(level=0):
d.index = d.index.droplevel(0)
difs = []
for i, r in d.iterrows():
t = d.loc[i+1:, :]
difs.append(t[ans] - r[ans])
pdifs.append(pd.concat(difs))
pdifs = pd.concat(pdifs).abs()
# calculate stats
rep_stats = {}
rep_dists = {}
errfn = stats.halfcauchy
for a in ans:
d = pdifs.loc[:, a].dropna().values
hdist = errfn.fit(d, floc=0)
rep_dists[a] = errfn(*hdist)
rep_stats[a] = rep_dists[a].ppf((0.5, 0.95))
# make plot
if not plot:
return pdifs, rep_dists, rep_stats, (None, None)
fig, axs = plt.subplots(1, len(ans), figsize=[len(ans) * 2, 2])
for a, ax in zip(ans, axs):
d = pdifs.loc[:, a].dropna().values
hist, edges, _ = ax.hist(d, 30)
ax.plot(edges, rep_dists[a].pdf(edges) * (sum(hist) * np.mean(np.diff(edges))))
ax.set_title(a, loc='left')
return pdifs, rep_dists, rep_stats, (fig, axs)
|
[
"Calculate",
"the",
"reproducibility",
"of",
"LA",
"-",
"ICPMS",
"based",
"on",
"unique",
"pairs",
"of",
"repeat",
"analyses",
".",
"Pairwise",
"differences",
"are",
"fit",
"with",
"a",
"half",
"-",
"Cauchy",
"distribution",
"and",
"the",
"median",
"and",
"95%",
"confidence",
"limits",
"are",
"returned",
"for",
"each",
"analyte",
".",
"Parameters",
"----------",
"df",
":",
"pandas",
".",
"DataFrame",
"A",
"dataset",
"plot",
":",
"bool",
"Whether",
"or",
"not",
"to",
"plot",
"the",
"resulting",
"error",
"distributions",
".",
"Returns",
"-------",
"pdiffs",
":",
"pandas",
".",
"DataFrame",
"Unique",
"pairwise",
"differences",
"for",
"all",
"analytes",
".",
"rep_dists",
":",
"dict",
"of",
"scipy",
".",
"stats",
".",
"halfcauchy",
"Half",
"-",
"Cauchy",
"distribution",
"objects",
"fitted",
"to",
"the",
"differences",
".",
"rep_stats",
":",
"dict",
"of",
"tuples",
"The",
"50%",
"and",
"95%",
"quantiles",
"of",
"the",
"half",
"-",
"cauchy",
"distribution",
".",
"(",
"fig",
"axs",
")",
":",
"matplotlib",
"objects",
"The",
"figure",
".",
"If",
"not",
"made",
"returnes",
"(",
"None",
"None",
")",
"placeholder"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/stats.py#L14-L81
|
[
"def",
"pairwise_reproducibility",
"(",
"df",
",",
"plot",
"=",
"False",
")",
":",
"ans",
"=",
"df",
".",
"columns",
".",
"values",
"pdifs",
"=",
"[",
"]",
"# calculate differences between unique pairs",
"for",
"ind",
",",
"d",
"in",
"df",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
":",
"d",
".",
"index",
"=",
"d",
".",
"index",
".",
"droplevel",
"(",
"0",
")",
"difs",
"=",
"[",
"]",
"for",
"i",
",",
"r",
"in",
"d",
".",
"iterrows",
"(",
")",
":",
"t",
"=",
"d",
".",
"loc",
"[",
"i",
"+",
"1",
":",
",",
":",
"]",
"difs",
".",
"append",
"(",
"t",
"[",
"ans",
"]",
"-",
"r",
"[",
"ans",
"]",
")",
"pdifs",
".",
"append",
"(",
"pd",
".",
"concat",
"(",
"difs",
")",
")",
"pdifs",
"=",
"pd",
".",
"concat",
"(",
"pdifs",
")",
".",
"abs",
"(",
")",
"# calculate stats",
"rep_stats",
"=",
"{",
"}",
"rep_dists",
"=",
"{",
"}",
"errfn",
"=",
"stats",
".",
"halfcauchy",
"for",
"a",
"in",
"ans",
":",
"d",
"=",
"pdifs",
".",
"loc",
"[",
":",
",",
"a",
"]",
".",
"dropna",
"(",
")",
".",
"values",
"hdist",
"=",
"errfn",
".",
"fit",
"(",
"d",
",",
"floc",
"=",
"0",
")",
"rep_dists",
"[",
"a",
"]",
"=",
"errfn",
"(",
"*",
"hdist",
")",
"rep_stats",
"[",
"a",
"]",
"=",
"rep_dists",
"[",
"a",
"]",
".",
"ppf",
"(",
"(",
"0.5",
",",
"0.95",
")",
")",
"# make plot",
"if",
"not",
"plot",
":",
"return",
"pdifs",
",",
"rep_dists",
",",
"rep_stats",
",",
"(",
"None",
",",
"None",
")",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"len",
"(",
"ans",
")",
",",
"figsize",
"=",
"[",
"len",
"(",
"ans",
")",
"*",
"2",
",",
"2",
"]",
")",
"for",
"a",
",",
"ax",
"in",
"zip",
"(",
"ans",
",",
"axs",
")",
":",
"d",
"=",
"pdifs",
".",
"loc",
"[",
":",
",",
"a",
"]",
".",
"dropna",
"(",
")",
".",
"values",
"hist",
",",
"edges",
",",
"_",
"=",
"ax",
".",
"hist",
"(",
"d",
",",
"30",
")",
"ax",
".",
"plot",
"(",
"edges",
",",
"rep_dists",
"[",
"a",
"]",
".",
"pdf",
"(",
"edges",
")",
"*",
"(",
"sum",
"(",
"hist",
")",
"*",
"np",
".",
"mean",
"(",
"np",
".",
"diff",
"(",
"edges",
")",
")",
")",
")",
"ax",
".",
"set_title",
"(",
"a",
",",
"loc",
"=",
"'left'",
")",
"return",
"pdifs",
",",
"rep_dists",
",",
"rep_stats",
",",
"(",
"fig",
",",
"axs",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
comparison_stats
|
Compute comparison stats for test and LAtools data.
Population-level similarity assessed by a Kolmogorov-Smirnov test.
Individual similarity assessed by a pairwise Wilcoxon signed rank test.
Trends in residuals assessed by regression analysis, where significance of
the slope and intercept is determined by t-tests (both relative to zero).
Parameters
----------
df : pandas.DataFrame
A dataframe containing reference ('X/Ca_r'), test user
('X/Ca_t') and LAtools ('X123') data.
els : list
list of elements (names only) to plot.
Returns
-------
pandas.DataFrame
|
Supplement/comparison_tools/stats.py
|
def comparison_stats(df, els=['Mg', 'Sr', 'Ba', 'Al', 'Mn']):
"""
Compute comparison stats for test and LAtools data.
Population-level similarity assessed by a Kolmogorov-Smirnov test.
Individual similarity assessed by a pairwise Wilcoxon signed rank test.
Trends in residuals assessed by regression analysis, where significance of
the slope and intercept is determined by t-tests (both relative to zero).
Parameters
----------
df : pandas.DataFrame
A dataframe containing reference ('X/Ca_r'), test user
('X/Ca_t') and LAtools ('X123') data.
els : list
list of elements (names only) to plot.
Returns
-------
pandas.DataFrame
"""
# get corresponding analyte and ratio names
As = []
Rs = []
analytes = [c for c in df.columns if ('_r' not in c) and ('_t' not in c)]
ratios = [c for c in df.columns if ('_r' in c)]
for e in els:
if e == 'Sr':
As.append('Sr88')
elif e == 'Mg':
As.append('Mg24')
else:
As.append([a for a in analytes if e in a][0])
Rs.append([r for r in ratios if e in r][0][:-2])
yt_stats = []
yl_stats = []
for i, (e, a) in enumerate(zip(Rs, As)):
if a == 'Ba138':
m = 1e3
u = '$\mu$mol/mol'
else:
m = 1
u = 'mmol/mol'
x = df.loc[:, e + '_r'].values * m
yt = df.loc[:, e + '_t'].values * m
yl = df.loc[:, a].values * m
yt_stats.append(summary_stats(x, yt, e))
yl_stats.append(summary_stats(x, yl, e))
yt_stats = pd.concat(yt_stats).T
yl_stats = pd.concat(yl_stats).T
return pd.concat([yt_stats, yl_stats], keys=['Test User', 'LAtools']).T
|
def comparison_stats(df, els=['Mg', 'Sr', 'Ba', 'Al', 'Mn']):
"""
Compute comparison stats for test and LAtools data.
Population-level similarity assessed by a Kolmogorov-Smirnov test.
Individual similarity assessed by a pairwise Wilcoxon signed rank test.
Trends in residuals assessed by regression analysis, where significance of
the slope and intercept is determined by t-tests (both relative to zero).
Parameters
----------
df : pandas.DataFrame
A dataframe containing reference ('X/Ca_r'), test user
('X/Ca_t') and LAtools ('X123') data.
els : list
list of elements (names only) to plot.
Returns
-------
pandas.DataFrame
"""
# get corresponding analyte and ratio names
As = []
Rs = []
analytes = [c for c in df.columns if ('_r' not in c) and ('_t' not in c)]
ratios = [c for c in df.columns if ('_r' in c)]
for e in els:
if e == 'Sr':
As.append('Sr88')
elif e == 'Mg':
As.append('Mg24')
else:
As.append([a for a in analytes if e in a][0])
Rs.append([r for r in ratios if e in r][0][:-2])
yt_stats = []
yl_stats = []
for i, (e, a) in enumerate(zip(Rs, As)):
if a == 'Ba138':
m = 1e3
u = '$\mu$mol/mol'
else:
m = 1
u = 'mmol/mol'
x = df.loc[:, e + '_r'].values * m
yt = df.loc[:, e + '_t'].values * m
yl = df.loc[:, a].values * m
yt_stats.append(summary_stats(x, yt, e))
yl_stats.append(summary_stats(x, yl, e))
yt_stats = pd.concat(yt_stats).T
yl_stats = pd.concat(yl_stats).T
return pd.concat([yt_stats, yl_stats], keys=['Test User', 'LAtools']).T
|
[
"Compute",
"comparison",
"stats",
"for",
"test",
"and",
"LAtools",
"data",
".",
"Population",
"-",
"level",
"similarity",
"assessed",
"by",
"a",
"Kolmogorov",
"-",
"Smirnov",
"test",
".",
"Individual",
"similarity",
"assessed",
"by",
"a",
"pairwise",
"Wilcoxon",
"signed",
"rank",
"test",
".",
"Trends",
"in",
"residuals",
"assessed",
"by",
"regression",
"analysis",
"where",
"significance",
"of",
"the",
"slope",
"and",
"intercept",
"is",
"determined",
"by",
"t",
"-",
"tests",
"(",
"both",
"relative",
"to",
"zero",
")",
".",
"Parameters",
"----------",
"df",
":",
"pandas",
".",
"DataFrame",
"A",
"dataframe",
"containing",
"reference",
"(",
"X",
"/",
"Ca_r",
")",
"test",
"user",
"(",
"X",
"/",
"Ca_t",
")",
"and",
"LAtools",
"(",
"X123",
")",
"data",
".",
"els",
":",
"list",
"list",
"of",
"elements",
"(",
"names",
"only",
")",
"to",
"plot",
".",
"Returns",
"-------",
"pandas",
".",
"DataFrame"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/stats.py#L83-L144
|
[
"def",
"comparison_stats",
"(",
"df",
",",
"els",
"=",
"[",
"'Mg'",
",",
"'Sr'",
",",
"'Ba'",
",",
"'Al'",
",",
"'Mn'",
"]",
")",
":",
"# get corresponding analyte and ratio names",
"As",
"=",
"[",
"]",
"Rs",
"=",
"[",
"]",
"analytes",
"=",
"[",
"c",
"for",
"c",
"in",
"df",
".",
"columns",
"if",
"(",
"'_r'",
"not",
"in",
"c",
")",
"and",
"(",
"'_t'",
"not",
"in",
"c",
")",
"]",
"ratios",
"=",
"[",
"c",
"for",
"c",
"in",
"df",
".",
"columns",
"if",
"(",
"'_r'",
"in",
"c",
")",
"]",
"for",
"e",
"in",
"els",
":",
"if",
"e",
"==",
"'Sr'",
":",
"As",
".",
"append",
"(",
"'Sr88'",
")",
"elif",
"e",
"==",
"'Mg'",
":",
"As",
".",
"append",
"(",
"'Mg24'",
")",
"else",
":",
"As",
".",
"append",
"(",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"e",
"in",
"a",
"]",
"[",
"0",
"]",
")",
"Rs",
".",
"append",
"(",
"[",
"r",
"for",
"r",
"in",
"ratios",
"if",
"e",
"in",
"r",
"]",
"[",
"0",
"]",
"[",
":",
"-",
"2",
"]",
")",
"yt_stats",
"=",
"[",
"]",
"yl_stats",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"e",
",",
"a",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"Rs",
",",
"As",
")",
")",
":",
"if",
"a",
"==",
"'Ba138'",
":",
"m",
"=",
"1e3",
"u",
"=",
"'$\\mu$mol/mol'",
"else",
":",
"m",
"=",
"1",
"u",
"=",
"'mmol/mol'",
"x",
"=",
"df",
".",
"loc",
"[",
":",
",",
"e",
"+",
"'_r'",
"]",
".",
"values",
"*",
"m",
"yt",
"=",
"df",
".",
"loc",
"[",
":",
",",
"e",
"+",
"'_t'",
"]",
".",
"values",
"*",
"m",
"yl",
"=",
"df",
".",
"loc",
"[",
":",
",",
"a",
"]",
".",
"values",
"*",
"m",
"yt_stats",
".",
"append",
"(",
"summary_stats",
"(",
"x",
",",
"yt",
",",
"e",
")",
")",
"yl_stats",
".",
"append",
"(",
"summary_stats",
"(",
"x",
",",
"yl",
",",
"e",
")",
")",
"yt_stats",
"=",
"pd",
".",
"concat",
"(",
"yt_stats",
")",
".",
"T",
"yl_stats",
"=",
"pd",
".",
"concat",
"(",
"yl_stats",
")",
".",
"T",
"return",
"pd",
".",
"concat",
"(",
"[",
"yt_stats",
",",
"yl_stats",
"]",
",",
"keys",
"=",
"[",
"'Test User'",
",",
"'LAtools'",
"]",
")",
".",
"T"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
summary_stats
|
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
|
Supplement/comparison_tools/stats.py
|
def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'),
('Residual Summary', 'Median'),
('Residual Summary', 'LQ'),
('Residual Summary', 'IQR'),
('Residual Summary', 'UQ'),
('Residual Regression', 'Slope'),
('Residual Regression', 'Slope t'),
('Residual Regression', 'Slope p'),
('Residual Regression', 'Intercept'),
('Residual Regression', 'Intercept t'),
('Residual Regression', 'Intercept p'),
('Residual Regression', 'R2'),
('Kolmogorov-Smirnov', 'KS'),
('Kolmogorov-Smirnov', 'p')])
out = pd.DataFrame(index=nm, columns=cols)
# remove nan values
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
# calculate residuals
r = y - x
# summary statistics
cat = 'Residual Summary'
out.loc[:, (cat, 'N')] = len(x)
out.loc[:, (cat, 'Median')] = np.median(r)
out.loc[:, [(cat, 'LQ'), (cat, 'UQ')]] = np.percentile(r, [25, 75])
out.loc[:, (cat, 'IQR')] = out.loc[:, (cat, 'UQ')] - out.loc[:, (cat, 'LQ')]
# non-paired test for same distribution
cat = 'Kolmogorov-Smirnov'
ks = stats.ks_2samp(x, y)
out.loc[:, (cat, 'KS')] = ks.statistic
out.loc[:, (cat, 'p')] = ks.pvalue
# regression analysis of residuals - slope should be 0, intercept should be 0
cat = 'Residual Regression'
X = sm.add_constant(x)
reg = sm.OLS(r, X, missing='drop')
fit = reg.fit()
out.loc[:, [(cat, 'Intercept'), (cat, 'Slope')]] = fit.params
out.loc[:, [(cat, 'Intercept t'), (cat, 'Slope t')]] = fit.tvalues
out.loc[:, (cat, 'R2')] = fit.rsquared
out.loc[:, [(cat, 'Intercept p'), (cat, 'Slope p')]] = fit.pvalues
return out
|
def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'),
('Residual Summary', 'Median'),
('Residual Summary', 'LQ'),
('Residual Summary', 'IQR'),
('Residual Summary', 'UQ'),
('Residual Regression', 'Slope'),
('Residual Regression', 'Slope t'),
('Residual Regression', 'Slope p'),
('Residual Regression', 'Intercept'),
('Residual Regression', 'Intercept t'),
('Residual Regression', 'Intercept p'),
('Residual Regression', 'R2'),
('Kolmogorov-Smirnov', 'KS'),
('Kolmogorov-Smirnov', 'p')])
out = pd.DataFrame(index=nm, columns=cols)
# remove nan values
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
# calculate residuals
r = y - x
# summary statistics
cat = 'Residual Summary'
out.loc[:, (cat, 'N')] = len(x)
out.loc[:, (cat, 'Median')] = np.median(r)
out.loc[:, [(cat, 'LQ'), (cat, 'UQ')]] = np.percentile(r, [25, 75])
out.loc[:, (cat, 'IQR')] = out.loc[:, (cat, 'UQ')] - out.loc[:, (cat, 'LQ')]
# non-paired test for same distribution
cat = 'Kolmogorov-Smirnov'
ks = stats.ks_2samp(x, y)
out.loc[:, (cat, 'KS')] = ks.statistic
out.loc[:, (cat, 'p')] = ks.pvalue
# regression analysis of residuals - slope should be 0, intercept should be 0
cat = 'Residual Regression'
X = sm.add_constant(x)
reg = sm.OLS(r, X, missing='drop')
fit = reg.fit()
out.loc[:, [(cat, 'Intercept'), (cat, 'Slope')]] = fit.params
out.loc[:, [(cat, 'Intercept t'), (cat, 'Slope t')]] = fit.tvalues
out.loc[:, (cat, 'R2')] = fit.rsquared
out.loc[:, [(cat, 'Intercept p'), (cat, 'Slope p')]] = fit.pvalues
return out
|
[
"Compute",
"summary",
"statistics",
"for",
"paired",
"x",
"y",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/stats.py#L146-L225
|
[
"def",
"summary_stats",
"(",
"x",
",",
"y",
",",
"nm",
"=",
"None",
")",
":",
"# create datafrane for results",
"if",
"isinstance",
"(",
"nm",
",",
"str",
")",
":",
"nm",
"=",
"[",
"nm",
"]",
"# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],",
"# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])",
"# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',",
"# 'Wilcoxon_stat', 'Wilcoxon_p',",
"# 'KS_stat', 'KS_p',",
"# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']",
"# out = pd.DataFrame(index=nm, columns=cols)",
"cols",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"[",
"(",
"'Residual Summary'",
",",
"'N'",
")",
",",
"(",
"'Residual Summary'",
",",
"'Median'",
")",
",",
"(",
"'Residual Summary'",
",",
"'LQ'",
")",
",",
"(",
"'Residual Summary'",
",",
"'IQR'",
")",
",",
"(",
"'Residual Summary'",
",",
"'UQ'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Slope'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Slope t'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Slope p'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Intercept'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Intercept t'",
")",
",",
"(",
"'Residual Regression'",
",",
"'Intercept p'",
")",
",",
"(",
"'Residual Regression'",
",",
"'R2'",
")",
",",
"(",
"'Kolmogorov-Smirnov'",
",",
"'KS'",
")",
",",
"(",
"'Kolmogorov-Smirnov'",
",",
"'p'",
")",
"]",
")",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"nm",
",",
"columns",
"=",
"cols",
")",
"# remove nan values",
"ind",
"=",
"~",
"(",
"np",
".",
"isnan",
"(",
"x",
")",
"|",
"np",
".",
"isnan",
"(",
"y",
")",
")",
"x",
"=",
"x",
"[",
"ind",
"]",
"y",
"=",
"y",
"[",
"ind",
"]",
"# calculate residuals",
"r",
"=",
"y",
"-",
"x",
"# summary statistics",
"cat",
"=",
"'Residual Summary'",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'N'",
")",
"]",
"=",
"len",
"(",
"x",
")",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'Median'",
")",
"]",
"=",
"np",
".",
"median",
"(",
"r",
")",
"out",
".",
"loc",
"[",
":",
",",
"[",
"(",
"cat",
",",
"'LQ'",
")",
",",
"(",
"cat",
",",
"'UQ'",
")",
"]",
"]",
"=",
"np",
".",
"percentile",
"(",
"r",
",",
"[",
"25",
",",
"75",
"]",
")",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'IQR'",
")",
"]",
"=",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'UQ'",
")",
"]",
"-",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'LQ'",
")",
"]",
"# non-paired test for same distribution",
"cat",
"=",
"'Kolmogorov-Smirnov'",
"ks",
"=",
"stats",
".",
"ks_2samp",
"(",
"x",
",",
"y",
")",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'KS'",
")",
"]",
"=",
"ks",
".",
"statistic",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'p'",
")",
"]",
"=",
"ks",
".",
"pvalue",
"# regression analysis of residuals - slope should be 0, intercept should be 0",
"cat",
"=",
"'Residual Regression'",
"X",
"=",
"sm",
".",
"add_constant",
"(",
"x",
")",
"reg",
"=",
"sm",
".",
"OLS",
"(",
"r",
",",
"X",
",",
"missing",
"=",
"'drop'",
")",
"fit",
"=",
"reg",
".",
"fit",
"(",
")",
"out",
".",
"loc",
"[",
":",
",",
"[",
"(",
"cat",
",",
"'Intercept'",
")",
",",
"(",
"cat",
",",
"'Slope'",
")",
"]",
"]",
"=",
"fit",
".",
"params",
"out",
".",
"loc",
"[",
":",
",",
"[",
"(",
"cat",
",",
"'Intercept t'",
")",
",",
"(",
"cat",
",",
"'Slope t'",
")",
"]",
"]",
"=",
"fit",
".",
"tvalues",
"out",
".",
"loc",
"[",
":",
",",
"(",
"cat",
",",
"'R2'",
")",
"]",
"=",
"fit",
".",
"rsquared",
"out",
".",
"loc",
"[",
":",
",",
"[",
"(",
"cat",
",",
"'Intercept p'",
")",
",",
"(",
"cat",
",",
"'Slope p'",
")",
"]",
"]",
"=",
"fit",
".",
"pvalues",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
load_reference_data
|
Fetch LAtools reference data from online repository.
Parameters
----------
name : str<
Which data to download. Can be one of 'culture_reference',
'culture_test', 'downcore_reference', 'downcore_test', 'iolite_reference'
or 'zircon_reference'.
If None, all are downloaded and returned as a dict.
Returns
-------
pandas.DataFrame or dict.
|
Supplement/comparison_tools/helpers.py
|
def load_reference_data(name=None):
"""
Fetch LAtools reference data from online repository.
Parameters
----------
name : str<
Which data to download. Can be one of 'culture_reference',
'culture_test', 'downcore_reference', 'downcore_test', 'iolite_reference'
or 'zircon_reference'.
If None, all are downloaded and returned as a dict.
Returns
-------
pandas.DataFrame or dict.
"""
base_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'
gids = {'culture_reference': '0',
'culture_test': '1170065442',
'downcore_reference': '190752797',
'downcore_test': '721359794',
'iolite_reference': '483581945',
'zircon_reference': '1355554964'}
if name is None:
out = {}
for nm, gid in gids.items():
url = base_url.format(gid)
tmp = pd.read_csv(url, header=[0], index_col=[0, 1])
tmp.index.names = ['sample', 'rep']
tmp.columns.names = ['analyte']
tmp.sort_index(1, inplace=True)
out[nm] = tmp
else:
gid = gids[name]
url = base_url.format(gid)
out = pd.read_csv(url, index_col=[0, 1])
out.columns.names = ['analyte']
out.sort_index(1, inplace=True)
return out
|
def load_reference_data(name=None):
"""
Fetch LAtools reference data from online repository.
Parameters
----------
name : str<
Which data to download. Can be one of 'culture_reference',
'culture_test', 'downcore_reference', 'downcore_test', 'iolite_reference'
or 'zircon_reference'.
If None, all are downloaded and returned as a dict.
Returns
-------
pandas.DataFrame or dict.
"""
base_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'
gids = {'culture_reference': '0',
'culture_test': '1170065442',
'downcore_reference': '190752797',
'downcore_test': '721359794',
'iolite_reference': '483581945',
'zircon_reference': '1355554964'}
if name is None:
out = {}
for nm, gid in gids.items():
url = base_url.format(gid)
tmp = pd.read_csv(url, header=[0], index_col=[0, 1])
tmp.index.names = ['sample', 'rep']
tmp.columns.names = ['analyte']
tmp.sort_index(1, inplace=True)
out[nm] = tmp
else:
gid = gids[name]
url = base_url.format(gid)
out = pd.read_csv(url, index_col=[0, 1])
out.columns.names = ['analyte']
out.sort_index(1, inplace=True)
return out
|
[
"Fetch",
"LAtools",
"reference",
"data",
"from",
"online",
"repository",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/helpers.py#L18-L57
|
[
"def",
"load_reference_data",
"(",
"name",
"=",
"None",
")",
":",
"base_url",
"=",
"'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'",
"gids",
"=",
"{",
"'culture_reference'",
":",
"'0'",
",",
"'culture_test'",
":",
"'1170065442'",
",",
"'downcore_reference'",
":",
"'190752797'",
",",
"'downcore_test'",
":",
"'721359794'",
",",
"'iolite_reference'",
":",
"'483581945'",
",",
"'zircon_reference'",
":",
"'1355554964'",
"}",
"if",
"name",
"is",
"None",
":",
"out",
"=",
"{",
"}",
"for",
"nm",
",",
"gid",
"in",
"gids",
".",
"items",
"(",
")",
":",
"url",
"=",
"base_url",
".",
"format",
"(",
"gid",
")",
"tmp",
"=",
"pd",
".",
"read_csv",
"(",
"url",
",",
"header",
"=",
"[",
"0",
"]",
",",
"index_col",
"=",
"[",
"0",
",",
"1",
"]",
")",
"tmp",
".",
"index",
".",
"names",
"=",
"[",
"'sample'",
",",
"'rep'",
"]",
"tmp",
".",
"columns",
".",
"names",
"=",
"[",
"'analyte'",
"]",
"tmp",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"out",
"[",
"nm",
"]",
"=",
"tmp",
"else",
":",
"gid",
"=",
"gids",
"[",
"name",
"]",
"url",
"=",
"base_url",
".",
"format",
"(",
"gid",
")",
"out",
"=",
"pd",
".",
"read_csv",
"(",
"url",
",",
"index_col",
"=",
"[",
"0",
",",
"1",
"]",
")",
"out",
".",
"columns",
".",
"names",
"=",
"[",
"'analyte'",
"]",
"out",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
AllInstances.lookup
|
Find an instance of the type class `TC` for type `G`.
Iterates `G`'s parent classes, looking up instances for each,
checking whether the instance is a subclass of the target type
class `TC`.
|
amino/tc/base.py
|
def lookup(self, TC: type, G: type) -> Optional[TypeClass]:
''' Find an instance of the type class `TC` for type `G`.
Iterates `G`'s parent classes, looking up instances for each,
checking whether the instance is a subclass of the target type
class `TC`.
'''
if isinstance(G, str):
raise ImplicitNotFound(TC, G, f'{G} is a string annotation')
if not isinstance(G, (type, TypeVar, _GenericAlias)):
raise ImplicitNotFound(TC, G, f'{G} is neither type, _GenericAlias nor TypeVar: {type(G)}')
match = lambda a: self._lookup_type(TC, a)
def attach_type(tc: TypeClass) -> TypeClass:
setattr(tc, 'tpe', G)
return tc
scrutinee = (
(
unbounded_typevar(TC, G)
if G.__bound__ is None else
G.__bound__
)
if isinstance(G, TypeVar) else
G
)
safe_mro = lambda t: getattr(t, '__mro__', (t,))
target = scrutinee.__origin__ if isinstance(scrutinee, _GenericAlias) else scrutinee
mro = safe_mro(target)
return next((attach_type(a) for a in map(match, mro) if a is not None), None)
|
def lookup(self, TC: type, G: type) -> Optional[TypeClass]:
''' Find an instance of the type class `TC` for type `G`.
Iterates `G`'s parent classes, looking up instances for each,
checking whether the instance is a subclass of the target type
class `TC`.
'''
if isinstance(G, str):
raise ImplicitNotFound(TC, G, f'{G} is a string annotation')
if not isinstance(G, (type, TypeVar, _GenericAlias)):
raise ImplicitNotFound(TC, G, f'{G} is neither type, _GenericAlias nor TypeVar: {type(G)}')
match = lambda a: self._lookup_type(TC, a)
def attach_type(tc: TypeClass) -> TypeClass:
setattr(tc, 'tpe', G)
return tc
scrutinee = (
(
unbounded_typevar(TC, G)
if G.__bound__ is None else
G.__bound__
)
if isinstance(G, TypeVar) else
G
)
safe_mro = lambda t: getattr(t, '__mro__', (t,))
target = scrutinee.__origin__ if isinstance(scrutinee, _GenericAlias) else scrutinee
mro = safe_mro(target)
return next((attach_type(a) for a in map(match, mro) if a is not None), None)
|
[
"Find",
"an",
"instance",
"of",
"the",
"type",
"class",
"TC",
"for",
"type",
"G",
".",
"Iterates",
"G",
"s",
"parent",
"classes",
"looking",
"up",
"instances",
"for",
"each",
"checking",
"whether",
"the",
"instance",
"is",
"a",
"subclass",
"of",
"the",
"target",
"type",
"class",
"TC",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/tc/base.py#L375-L401
|
[
"def",
"lookup",
"(",
"self",
",",
"TC",
":",
"type",
",",
"G",
":",
"type",
")",
"->",
"Optional",
"[",
"TypeClass",
"]",
":",
"if",
"isinstance",
"(",
"G",
",",
"str",
")",
":",
"raise",
"ImplicitNotFound",
"(",
"TC",
",",
"G",
",",
"f'{G} is a string annotation'",
")",
"if",
"not",
"isinstance",
"(",
"G",
",",
"(",
"type",
",",
"TypeVar",
",",
"_GenericAlias",
")",
")",
":",
"raise",
"ImplicitNotFound",
"(",
"TC",
",",
"G",
",",
"f'{G} is neither type, _GenericAlias nor TypeVar: {type(G)}'",
")",
"match",
"=",
"lambda",
"a",
":",
"self",
".",
"_lookup_type",
"(",
"TC",
",",
"a",
")",
"def",
"attach_type",
"(",
"tc",
":",
"TypeClass",
")",
"->",
"TypeClass",
":",
"setattr",
"(",
"tc",
",",
"'tpe'",
",",
"G",
")",
"return",
"tc",
"scrutinee",
"=",
"(",
"(",
"unbounded_typevar",
"(",
"TC",
",",
"G",
")",
"if",
"G",
".",
"__bound__",
"is",
"None",
"else",
"G",
".",
"__bound__",
")",
"if",
"isinstance",
"(",
"G",
",",
"TypeVar",
")",
"else",
"G",
")",
"safe_mro",
"=",
"lambda",
"t",
":",
"getattr",
"(",
"t",
",",
"'__mro__'",
",",
"(",
"t",
",",
")",
")",
"target",
"=",
"scrutinee",
".",
"__origin__",
"if",
"isinstance",
"(",
"scrutinee",
",",
"_GenericAlias",
")",
"else",
"scrutinee",
"mro",
"=",
"safe_mro",
"(",
"target",
")",
"return",
"next",
"(",
"(",
"attach_type",
"(",
"a",
")",
"for",
"a",
"in",
"map",
"(",
"match",
",",
"mro",
")",
"if",
"a",
"is",
"not",
"None",
")",
",",
"None",
")"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
rangecalc
|
Calculate padded range limits for axes.
|
Supplement/comparison_tools/plots.py
|
def rangecalc(x, y=None, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin([np.nanmin(x), np.nanmin(y)])
mx = np.nanmax([np.nanmax(x), np.nanmax(y)])
rn = mx - mn
return (mn - pad * rn, mx + pad * rn)
|
def rangecalc(x, y=None, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin([np.nanmin(x), np.nanmin(y)])
mx = np.nanmax([np.nanmax(x), np.nanmax(y)])
rn = mx - mn
return (mn - pad * rn, mx + pad * rn)
|
[
"Calculate",
"padded",
"range",
"limits",
"for",
"axes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/plots.py#L19-L27
|
[
"def",
"rangecalc",
"(",
"x",
",",
"y",
"=",
"None",
",",
"pad",
"=",
"0.05",
")",
":",
"mn",
"=",
"np",
".",
"nanmin",
"(",
"[",
"np",
".",
"nanmin",
"(",
"x",
")",
",",
"np",
".",
"nanmin",
"(",
"y",
")",
"]",
")",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"[",
"np",
".",
"nanmax",
"(",
"x",
")",
",",
"np",
".",
"nanmax",
"(",
"y",
")",
"]",
")",
"rn",
"=",
"mx",
"-",
"mn",
"return",
"(",
"mn",
"-",
"pad",
"*",
"rn",
",",
"mx",
"+",
"pad",
"*",
"rn",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
rangecalcx
|
Calculate padded range limits for axes.
|
Supplement/comparison_tools/plots.py
|
def rangecalcx(x, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin(x)
mx = np.nanmax(x)
rn = mx - mn
return (mn - pad * rn, mx + pad * rn)
|
def rangecalcx(x, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin(x)
mx = np.nanmax(x)
rn = mx - mn
return (mn - pad * rn, mx + pad * rn)
|
[
"Calculate",
"padded",
"range",
"limits",
"for",
"axes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/plots.py#L29-L37
|
[
"def",
"rangecalcx",
"(",
"x",
",",
"pad",
"=",
"0.05",
")",
":",
"mn",
"=",
"np",
".",
"nanmin",
"(",
"x",
")",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"x",
")",
"rn",
"=",
"mx",
"-",
"mn",
"return",
"(",
"mn",
"-",
"pad",
"*",
"rn",
",",
"mx",
"+",
"pad",
"*",
"rn",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
bland_altman
|
Draw a Bland-Altman plot of x and y data.
https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
Parameters
----------
x, y : array-like
x and y data to compare.
interval : float
Percentile band to draw on the residuals.
indep_conf : float
Independently determined confidence interval
to draw on the plot
ax : matplotlib.axesobject
The axis on which to draw the plot
**kwargs
Passed to ax.scatter
|
Supplement/comparison_tools/plots.py
|
def bland_altman(x, y, interval=None, indep_conf=None, ax=None, c=None, **kwargs):
"""
Draw a Bland-Altman plot of x and y data.
https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
Parameters
----------
x, y : array-like
x and y data to compare.
interval : float
Percentile band to draw on the residuals.
indep_conf : float
Independently determined confidence interval
to draw on the plot
ax : matplotlib.axesobject
The axis on which to draw the plot
**kwargs
Passed to ax.scatter
"""
ret = False
if ax is None:
fig, ax = plt.subplots(1, 1)
ret = True
# NaN screening
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
xy_mean = (x + y) / 2
xy_resid = (y - x)
ax.scatter(xy_mean, xy_resid, lw=0.5, edgecolor='k', alpha=0.6, c=c, s=15, **kwargs)
# markup
ax.axhline(0, ls='dashed', c='k', alpha=0.6, zorder=-1)
ax.axhline(np.median(xy_resid), ls='dashed', c=c, alpha=0.8)
if interval is not None:
perc = 100 - interval * 100
ints = [perc / 2, 100 - perc / 2]
lims = np.percentile(xy_resid, ints)
ax.axhspan(*lims, color=c, alpha=0.1, zorder=-3)
if indep_conf is not None:
ax.axhspan(-indep_conf, indep_conf, color=(0,0,0,0.1), zorder=-2)
# labels
ax.set_ylabel('y - x')
ax.set_xlabel('mean (x, y)')
if ret:
return fig, ax
|
def bland_altman(x, y, interval=None, indep_conf=None, ax=None, c=None, **kwargs):
"""
Draw a Bland-Altman plot of x and y data.
https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
Parameters
----------
x, y : array-like
x and y data to compare.
interval : float
Percentile band to draw on the residuals.
indep_conf : float
Independently determined confidence interval
to draw on the plot
ax : matplotlib.axesobject
The axis on which to draw the plot
**kwargs
Passed to ax.scatter
"""
ret = False
if ax is None:
fig, ax = plt.subplots(1, 1)
ret = True
# NaN screening
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
xy_mean = (x + y) / 2
xy_resid = (y - x)
ax.scatter(xy_mean, xy_resid, lw=0.5, edgecolor='k', alpha=0.6, c=c, s=15, **kwargs)
# markup
ax.axhline(0, ls='dashed', c='k', alpha=0.6, zorder=-1)
ax.axhline(np.median(xy_resid), ls='dashed', c=c, alpha=0.8)
if interval is not None:
perc = 100 - interval * 100
ints = [perc / 2, 100 - perc / 2]
lims = np.percentile(xy_resid, ints)
ax.axhspan(*lims, color=c, alpha=0.1, zorder=-3)
if indep_conf is not None:
ax.axhspan(-indep_conf, indep_conf, color=(0,0,0,0.1), zorder=-2)
# labels
ax.set_ylabel('y - x')
ax.set_xlabel('mean (x, y)')
if ret:
return fig, ax
|
[
"Draw",
"a",
"Bland",
"-",
"Altman",
"plot",
"of",
"x",
"and",
"y",
"data",
".",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Bland%E2%80%93Altman_plot",
"Parameters",
"----------",
"x",
"y",
":",
"array",
"-",
"like",
"x",
"and",
"y",
"data",
"to",
"compare",
".",
"interval",
":",
"float",
"Percentile",
"band",
"to",
"draw",
"on",
"the",
"residuals",
".",
"indep_conf",
":",
"float",
"Independently",
"determined",
"confidence",
"interval",
"to",
"draw",
"on",
"the",
"plot",
"ax",
":",
"matplotlib",
".",
"axesobject",
"The",
"axis",
"on",
"which",
"to",
"draw",
"the",
"plot",
"**",
"kwargs",
"Passed",
"to",
"ax",
".",
"scatter"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/plots.py#L250-L305
|
[
"def",
"bland_altman",
"(",
"x",
",",
"y",
",",
"interval",
"=",
"None",
",",
"indep_conf",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"c",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"False",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
")",
"ret",
"=",
"True",
"# NaN screening",
"ind",
"=",
"~",
"(",
"np",
".",
"isnan",
"(",
"x",
")",
"|",
"np",
".",
"isnan",
"(",
"y",
")",
")",
"x",
"=",
"x",
"[",
"ind",
"]",
"y",
"=",
"y",
"[",
"ind",
"]",
"xy_mean",
"=",
"(",
"x",
"+",
"y",
")",
"/",
"2",
"xy_resid",
"=",
"(",
"y",
"-",
"x",
")",
"ax",
".",
"scatter",
"(",
"xy_mean",
",",
"xy_resid",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
",",
"alpha",
"=",
"0.6",
",",
"c",
"=",
"c",
",",
"s",
"=",
"15",
",",
"*",
"*",
"kwargs",
")",
"# markup",
"ax",
".",
"axhline",
"(",
"0",
",",
"ls",
"=",
"'dashed'",
",",
"c",
"=",
"'k'",
",",
"alpha",
"=",
"0.6",
",",
"zorder",
"=",
"-",
"1",
")",
"ax",
".",
"axhline",
"(",
"np",
".",
"median",
"(",
"xy_resid",
")",
",",
"ls",
"=",
"'dashed'",
",",
"c",
"=",
"c",
",",
"alpha",
"=",
"0.8",
")",
"if",
"interval",
"is",
"not",
"None",
":",
"perc",
"=",
"100",
"-",
"interval",
"*",
"100",
"ints",
"=",
"[",
"perc",
"/",
"2",
",",
"100",
"-",
"perc",
"/",
"2",
"]",
"lims",
"=",
"np",
".",
"percentile",
"(",
"xy_resid",
",",
"ints",
")",
"ax",
".",
"axhspan",
"(",
"*",
"lims",
",",
"color",
"=",
"c",
",",
"alpha",
"=",
"0.1",
",",
"zorder",
"=",
"-",
"3",
")",
"if",
"indep_conf",
"is",
"not",
"None",
":",
"ax",
".",
"axhspan",
"(",
"-",
"indep_conf",
",",
"indep_conf",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.1",
")",
",",
"zorder",
"=",
"-",
"2",
")",
"# labels",
"ax",
".",
"set_ylabel",
"(",
"'y - x'",
")",
"ax",
".",
"set_xlabel",
"(",
"'mean (x, y)'",
")",
"if",
"ret",
":",
"return",
"fig",
",",
"ax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
autorange
|
Automatically separates signal and background in an on/off data stream.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window used for signal smoothing. If None, ``gwin // 2``.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
transform : str
How to transform the data. Default is 'log'.
Returns
-------
fbkg, fsig, ftrn, failed : tuple
where fbkg, fsig and ftrn are boolean arrays the same length as sig,
that are True where sig is background, signal and transition, respecively.
failed contains a list of transition positions where gaussian fitting
has failed.
|
latools/processes/signal_id.py
|
def autorange(t, sig, gwin=7, swin=None, win=30,
on_mult=(1.5, 1.), off_mult=(1., 1.5),
nbin=10, transform='log', thresh=None):
"""
Automatically separates signal and background in an on/off data stream.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window used for signal smoothing. If None, ``gwin // 2``.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
transform : str
How to transform the data. Default is 'log'.
Returns
-------
fbkg, fsig, ftrn, failed : tuple
where fbkg, fsig and ftrn are boolean arrays the same length as sig,
that are True where sig is background, signal and transition, respecively.
failed contains a list of transition positions where gaussian fitting
has failed.
"""
failed = []
# smooth signal
if swin is not None:
sigs = fastsmooth(sig, swin)
else:
sigs = sig
# transform signal
if transform == 'log':
tsigs = np.log10(sigs)
else:
tsigs = sigs
if thresh is None:
bins = 50
kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)
kde = gaussian_kde(tsigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if len(mins) > 0:
bkg = tsigs < (mins[0]) # set background as lowest distribution
else:
bkg = np.ones(tsigs.size, dtype=bool)
# bkg[0] = True # the first value must always be background
else:
bkg = tsigs < thresh
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
# remove transitions by fitting a gaussian to the gradients of
# each transition
# 1. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
# 2. calculate the absolute gradient of the target trace.
g = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.
if zeros is not None:
zeros = zeros.flatten()
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
# fit a gaussian to the first derivative of each
# transition. Initial guess parameters:
# - A: maximum gradient in data
# - mu: c
# - width: 2 * time step
# The 'sigma' parameter of curve_fit:
# This weights the fit by distance from c - i.e. data closer
# to c are more important in the fit than data further away
# from c. This allows the function to fit the correct curve,
# even if the data window has captured two independent
# transitions (i.e. end of one ablation and start of next)
# ablation are < win time steps apart).
c = t[z] # center of transition
width = (t[1] - t[0]) * 2
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
# get the x positions when the fitted gaussian is at 'conf' of
# maximum
# determine transition FWHM
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
except RuntimeError:
failed.append([c, tp])
pass
ftrn = ~fbkg & ~fsig
# if there are any failed transitions, exclude the mean transition width
# either side of the failures
if len(failed) > 0:
trns = t[bool_2_indices(ftrn)]
tr_mean = (trns[:, 1] - trns[:, 0]).mean() / 2
for f, tp in failed:
if tp:
ind = (t >= f - tr_mean *
on_mult[0]) & (t <= f + tr_mean * on_mult[0])
else:
ind = (t >= f - tr_mean *
off_mult[0]) & (t <= f + tr_mean * off_mult[0])
fsig[ind] = False
fbkg[ind] = False
ftrn[ind] = False
return fbkg, fsig, ftrn, [f[0] for f in failed]
|
def autorange(t, sig, gwin=7, swin=None, win=30,
on_mult=(1.5, 1.), off_mult=(1., 1.5),
nbin=10, transform='log', thresh=None):
"""
Automatically separates signal and background in an on/off data stream.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
t : array-like
Independent variable (usually time).
sig : array-like
Dependent signal, with distinctive 'on' and 'off' regions.
gwin : int
The window used for calculating first derivative.
Defaults to 7.
swin : int
The window used for signal smoothing. If None, ``gwin // 2``.
win : int
The width (c +/- win) of the transition data subsets.
Defaults to 20.
on_mult and off_mult : tuple, len=2
Control the width of the excluded transition regions, which is defined
relative to the peak full-width-half-maximum (FWHM) of the transition
gradient. The region n * FHWM below the transition, and m * FWHM above
the tranision will be excluded, where (n, m) are specified in `on_mult`
and `off_mult`.
`on_mult` and `off_mult` apply to the off-on and on-off transitions,
respectively.
Defaults to (1.5, 1) and (1, 1.5).
transform : str
How to transform the data. Default is 'log'.
Returns
-------
fbkg, fsig, ftrn, failed : tuple
where fbkg, fsig and ftrn are boolean arrays the same length as sig,
that are True where sig is background, signal and transition, respecively.
failed contains a list of transition positions where gaussian fitting
has failed.
"""
failed = []
# smooth signal
if swin is not None:
sigs = fastsmooth(sig, swin)
else:
sigs = sig
# transform signal
if transform == 'log':
tsigs = np.log10(sigs)
else:
tsigs = sigs
if thresh is None:
bins = 50
kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)
kde = gaussian_kde(tsigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if len(mins) > 0:
bkg = tsigs < (mins[0]) # set background as lowest distribution
else:
bkg = np.ones(tsigs.size, dtype=bool)
# bkg[0] = True # the first value must always be background
else:
bkg = tsigs < thresh
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
# remove transitions by fitting a gaussian to the gradients of
# each transition
# 1. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
# 2. calculate the absolute gradient of the target trace.
g = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.
if zeros is not None:
zeros = zeros.flatten()
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
# fit a gaussian to the first derivative of each
# transition. Initial guess parameters:
# - A: maximum gradient in data
# - mu: c
# - width: 2 * time step
# The 'sigma' parameter of curve_fit:
# This weights the fit by distance from c - i.e. data closer
# to c are more important in the fit than data further away
# from c. This allows the function to fit the correct curve,
# even if the data window has captured two independent
# transitions (i.e. end of one ablation and start of next)
# ablation are < win time steps apart).
c = t[z] # center of transition
width = (t[1] - t[0]) * 2
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
# get the x positions when the fitted gaussian is at 'conf' of
# maximum
# determine transition FWHM
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
except RuntimeError:
failed.append([c, tp])
pass
ftrn = ~fbkg & ~fsig
# if there are any failed transitions, exclude the mean transition width
# either side of the failures
if len(failed) > 0:
trns = t[bool_2_indices(ftrn)]
tr_mean = (trns[:, 1] - trns[:, 0]).mean() / 2
for f, tp in failed:
if tp:
ind = (t >= f - tr_mean *
on_mult[0]) & (t <= f + tr_mean * on_mult[0])
else:
ind = (t >= f - tr_mean *
off_mult[0]) & (t <= f + tr_mean * off_mult[0])
fsig[ind] = False
fbkg[ind] = False
ftrn[ind] = False
return fbkg, fsig, ftrn, [f[0] for f in failed]
|
[
"Automatically",
"separates",
"signal",
"and",
"background",
"in",
"an",
"on",
"/",
"off",
"data",
"stream",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/processes/signal_id.py#L8-L189
|
[
"def",
"autorange",
"(",
"t",
",",
"sig",
",",
"gwin",
"=",
"7",
",",
"swin",
"=",
"None",
",",
"win",
"=",
"30",
",",
"on_mult",
"=",
"(",
"1.5",
",",
"1.",
")",
",",
"off_mult",
"=",
"(",
"1.",
",",
"1.5",
")",
",",
"nbin",
"=",
"10",
",",
"transform",
"=",
"'log'",
",",
"thresh",
"=",
"None",
")",
":",
"failed",
"=",
"[",
"]",
"# smooth signal",
"if",
"swin",
"is",
"not",
"None",
":",
"sigs",
"=",
"fastsmooth",
"(",
"sig",
",",
"swin",
")",
"else",
":",
"sigs",
"=",
"sig",
"# transform signal",
"if",
"transform",
"==",
"'log'",
":",
"tsigs",
"=",
"np",
".",
"log10",
"(",
"sigs",
")",
"else",
":",
"tsigs",
"=",
"sigs",
"if",
"thresh",
"is",
"None",
":",
"bins",
"=",
"50",
"kde_x",
"=",
"np",
".",
"linspace",
"(",
"tsigs",
".",
"min",
"(",
")",
",",
"tsigs",
".",
"max",
"(",
")",
",",
"bins",
")",
"kde",
"=",
"gaussian_kde",
"(",
"tsigs",
")",
"yd",
"=",
"kde",
".",
"pdf",
"(",
"kde_x",
")",
"mins",
"=",
"findmins",
"(",
"kde_x",
",",
"yd",
")",
"# find minima in kde",
"if",
"len",
"(",
"mins",
")",
">",
"0",
":",
"bkg",
"=",
"tsigs",
"<",
"(",
"mins",
"[",
"0",
"]",
")",
"# set background as lowest distribution",
"else",
":",
"bkg",
"=",
"np",
".",
"ones",
"(",
"tsigs",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"# bkg[0] = True # the first value must always be background",
"else",
":",
"bkg",
"=",
"tsigs",
"<",
"thresh",
"# assign rough background and signal regions based on kde minima",
"fbkg",
"=",
"bkg",
"fsig",
"=",
"~",
"bkg",
"# remove transitions by fitting a gaussian to the gradients of",
"# each transition",
"# 1. determine the approximate index of each transition",
"zeros",
"=",
"bool_2_indices",
"(",
"fsig",
")",
"# 2. calculate the absolute gradient of the target trace.",
"g",
"=",
"abs",
"(",
"fastgrad",
"(",
"sigs",
",",
"gwin",
")",
")",
"# gradient of untransformed data.",
"if",
"zeros",
"is",
"not",
"None",
":",
"zeros",
"=",
"zeros",
".",
"flatten",
"(",
")",
"for",
"z",
"in",
"zeros",
":",
"# for each approximate transition",
"# isolate the data around the transition",
"if",
"z",
"-",
"win",
"<",
"0",
":",
"lo",
"=",
"gwin",
"//",
"2",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"elif",
"z",
"+",
"win",
">",
"(",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
")",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
"else",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"xs",
"=",
"t",
"[",
"lo",
":",
"hi",
"]",
"ys",
"=",
"g",
"[",
"lo",
":",
"hi",
"]",
"# determine type of transition (on/off)",
"mid",
"=",
"(",
"hi",
"+",
"lo",
")",
"//",
"2",
"tp",
"=",
"sigs",
"[",
"mid",
"+",
"3",
"]",
">",
"sigs",
"[",
"mid",
"-",
"3",
"]",
"# True if 'on' transition.",
"# fit a gaussian to the first derivative of each",
"# transition. Initial guess parameters:",
"# - A: maximum gradient in data",
"# - mu: c",
"# - width: 2 * time step",
"# The 'sigma' parameter of curve_fit:",
"# This weights the fit by distance from c - i.e. data closer",
"# to c are more important in the fit than data further away",
"# from c. This allows the function to fit the correct curve,",
"# even if the data window has captured two independent",
"# transitions (i.e. end of one ablation and start of next)",
"# ablation are < win time steps apart).",
"c",
"=",
"t",
"[",
"z",
"]",
"# center of transition",
"width",
"=",
"(",
"t",
"[",
"1",
"]",
"-",
"t",
"[",
"0",
"]",
")",
"*",
"2",
"try",
":",
"pg",
",",
"_",
"=",
"curve_fit",
"(",
"gauss",
",",
"xs",
",",
"ys",
",",
"p0",
"=",
"(",
"np",
".",
"nanmax",
"(",
"ys",
")",
",",
"c",
",",
"width",
")",
",",
"sigma",
"=",
"(",
"xs",
"-",
"c",
")",
"**",
"2",
"+",
".01",
")",
"# get the x positions when the fitted gaussian is at 'conf' of",
"# maximum",
"# determine transition FWHM",
"fwhm",
"=",
"abs",
"(",
"2",
"*",
"pg",
"[",
"-",
"1",
"]",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"log",
"(",
"2",
")",
")",
")",
"# apply on_mult or off_mult, as appropriate.",
"if",
"tp",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"on_mult",
"+",
"pg",
"[",
"1",
"]",
"else",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"off_mult",
"+",
"pg",
"[",
"1",
"]",
"fbkg",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"fsig",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"except",
"RuntimeError",
":",
"failed",
".",
"append",
"(",
"[",
"c",
",",
"tp",
"]",
")",
"pass",
"ftrn",
"=",
"~",
"fbkg",
"&",
"~",
"fsig",
"# if there are any failed transitions, exclude the mean transition width",
"# either side of the failures",
"if",
"len",
"(",
"failed",
")",
">",
"0",
":",
"trns",
"=",
"t",
"[",
"bool_2_indices",
"(",
"ftrn",
")",
"]",
"tr_mean",
"=",
"(",
"trns",
"[",
":",
",",
"1",
"]",
"-",
"trns",
"[",
":",
",",
"0",
"]",
")",
".",
"mean",
"(",
")",
"/",
"2",
"for",
"f",
",",
"tp",
"in",
"failed",
":",
"if",
"tp",
":",
"ind",
"=",
"(",
"t",
">=",
"f",
"-",
"tr_mean",
"*",
"on_mult",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<=",
"f",
"+",
"tr_mean",
"*",
"on_mult",
"[",
"0",
"]",
")",
"else",
":",
"ind",
"=",
"(",
"t",
">=",
"f",
"-",
"tr_mean",
"*",
"off_mult",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<=",
"f",
"+",
"tr_mean",
"*",
"off_mult",
"[",
"0",
"]",
")",
"fsig",
"[",
"ind",
"]",
"=",
"False",
"fbkg",
"[",
"ind",
"]",
"=",
"False",
"ftrn",
"[",
"ind",
"]",
"=",
"False",
"return",
"fbkg",
",",
"fsig",
",",
"ftrn",
",",
"[",
"f",
"[",
"0",
"]",
"for",
"f",
"in",
"failed",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
autorange_components
|
Returns the components underlying the autorange algorithm.
Returns
-------
t : array-like
Time axis (independent variable)
sig : array-like
Raw signal (dependent variable)
sigs : array-like
Smoothed signal (swin)
tsig : array-like
Transformed raw signal (transform)
tsigs : array-like
Transformed smoothed signal (transform, swin)
kde_x : array-like
kernel density estimate of smoothed signal.
yd : array-like
bins of kernel density estimator.
g : array-like
gradient of smoothed signal (swin, gwin)
trans : dict
per-transition data.
thresh : float
threshold identified from kernel density plot
|
latools/processes/signal_id.py
|
def autorange_components(t, sig, transform='log', gwin=7, swin=None,
win=30, on_mult=(1.5, 1.), off_mult=(1., 1.5),
thresh=None):
"""
Returns the components underlying the autorange algorithm.
Returns
-------
t : array-like
Time axis (independent variable)
sig : array-like
Raw signal (dependent variable)
sigs : array-like
Smoothed signal (swin)
tsig : array-like
Transformed raw signal (transform)
tsigs : array-like
Transformed smoothed signal (transform, swin)
kde_x : array-like
kernel density estimate of smoothed signal.
yd : array-like
bins of kernel density estimator.
g : array-like
gradient of smoothed signal (swin, gwin)
trans : dict
per-transition data.
thresh : float
threshold identified from kernel density plot
"""
failed = []
# smooth signal
if swin is not None:
sigs = fastsmooth(sig, swin)
else:
sigs = sig
# transform signal
if transform == 'log':
tsigs = np.log10(sigs)
tsig = np.log10(sig)
else:
tsigs = sigs
tsig = sig
if thresh is None:
bins = 50
kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)
kde = gaussian_kde(tsigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if len(mins) > 0:
bkg = tsigs < (mins[0]) # set background as lowest distribution
thresh = mins[0]
else:
bkg = np.ones(tsigs.size, dtype=bool)
else:
bkg = tsigs < thresh
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
# remove transitions by fitting a gaussian to the gradients of
# each transition
# 1. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
# 2. calculate the absolute gradient of the target trace.
g = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.
if zeros is not None:
zeros = zeros.flatten()
trans = dict(zeros=zeros.flatten(),
lohi=[],
pgs=[],
excl=[],
tps=[],
failed=[],
xs=[],
ys=[])
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
trans['xs'].append(xs)
trans['ys'].append(ys)
trans['lohi'].append([lo, hi])
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
trans['tps'].append(tp)
c = t[z] # center of transition
width = (t[1] - t[0]) * 2 # initial width guess
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
trans['pgs'].append(pg)
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
trans['excl'].append(lim)
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
failed.append(False)
except RuntimeError:
failed.append(True)
trans['lohi'].append([np.nan, np.nan])
trans['pgs'].append([np.nan, np.nan, np.nan])
trans['excl'].append([np.nan, np.nan])
trans['tps'].append(tp)
pass
else:
zeros = []
return t, sig, sigs, tsig, tsigs, kde_x, yd, g, trans, thresh
|
def autorange_components(t, sig, transform='log', gwin=7, swin=None,
win=30, on_mult=(1.5, 1.), off_mult=(1., 1.5),
thresh=None):
"""
Returns the components underlying the autorange algorithm.
Returns
-------
t : array-like
Time axis (independent variable)
sig : array-like
Raw signal (dependent variable)
sigs : array-like
Smoothed signal (swin)
tsig : array-like
Transformed raw signal (transform)
tsigs : array-like
Transformed smoothed signal (transform, swin)
kde_x : array-like
kernel density estimate of smoothed signal.
yd : array-like
bins of kernel density estimator.
g : array-like
gradient of smoothed signal (swin, gwin)
trans : dict
per-transition data.
thresh : float
threshold identified from kernel density plot
"""
failed = []
# smooth signal
if swin is not None:
sigs = fastsmooth(sig, swin)
else:
sigs = sig
# transform signal
if transform == 'log':
tsigs = np.log10(sigs)
tsig = np.log10(sig)
else:
tsigs = sigs
tsig = sig
if thresh is None:
bins = 50
kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)
kde = gaussian_kde(tsigs)
yd = kde.pdf(kde_x)
mins = findmins(kde_x, yd) # find minima in kde
if len(mins) > 0:
bkg = tsigs < (mins[0]) # set background as lowest distribution
thresh = mins[0]
else:
bkg = np.ones(tsigs.size, dtype=bool)
else:
bkg = tsigs < thresh
# assign rough background and signal regions based on kde minima
fbkg = bkg
fsig = ~bkg
# remove transitions by fitting a gaussian to the gradients of
# each transition
# 1. determine the approximate index of each transition
zeros = bool_2_indices(fsig)
# 2. calculate the absolute gradient of the target trace.
g = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.
if zeros is not None:
zeros = zeros.flatten()
trans = dict(zeros=zeros.flatten(),
lohi=[],
pgs=[],
excl=[],
tps=[],
failed=[],
xs=[],
ys=[])
for z in zeros: # for each approximate transition
# isolate the data around the transition
if z - win < 0:
lo = gwin // 2
hi = int(z + win)
elif z + win > (len(sig) - gwin // 2):
lo = int(z - win)
hi = len(sig) - gwin // 2
else:
lo = int(z - win)
hi = int(z + win)
xs = t[lo:hi]
ys = g[lo:hi]
trans['xs'].append(xs)
trans['ys'].append(ys)
trans['lohi'].append([lo, hi])
# determine type of transition (on/off)
mid = (hi + lo) // 2
tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.
trans['tps'].append(tp)
c = t[z] # center of transition
width = (t[1] - t[0]) * 2 # initial width guess
try:
pg, _ = curve_fit(gauss, xs, ys,
p0=(np.nanmax(ys),
c,
width),
sigma=(xs - c)**2 + .01)
trans['pgs'].append(pg)
fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))
# apply on_mult or off_mult, as appropriate.
if tp:
lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]
else:
lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]
trans['excl'].append(lim)
fbkg[(t > lim[0]) & (t < lim[1])] = False
fsig[(t > lim[0]) & (t < lim[1])] = False
failed.append(False)
except RuntimeError:
failed.append(True)
trans['lohi'].append([np.nan, np.nan])
trans['pgs'].append([np.nan, np.nan, np.nan])
trans['excl'].append([np.nan, np.nan])
trans['tps'].append(tp)
pass
else:
zeros = []
return t, sig, sigs, tsig, tsigs, kde_x, yd, g, trans, thresh
|
[
"Returns",
"the",
"components",
"underlying",
"the",
"autorange",
"algorithm",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/processes/signal_id.py#L191-L330
|
[
"def",
"autorange_components",
"(",
"t",
",",
"sig",
",",
"transform",
"=",
"'log'",
",",
"gwin",
"=",
"7",
",",
"swin",
"=",
"None",
",",
"win",
"=",
"30",
",",
"on_mult",
"=",
"(",
"1.5",
",",
"1.",
")",
",",
"off_mult",
"=",
"(",
"1.",
",",
"1.5",
")",
",",
"thresh",
"=",
"None",
")",
":",
"failed",
"=",
"[",
"]",
"# smooth signal",
"if",
"swin",
"is",
"not",
"None",
":",
"sigs",
"=",
"fastsmooth",
"(",
"sig",
",",
"swin",
")",
"else",
":",
"sigs",
"=",
"sig",
"# transform signal",
"if",
"transform",
"==",
"'log'",
":",
"tsigs",
"=",
"np",
".",
"log10",
"(",
"sigs",
")",
"tsig",
"=",
"np",
".",
"log10",
"(",
"sig",
")",
"else",
":",
"tsigs",
"=",
"sigs",
"tsig",
"=",
"sig",
"if",
"thresh",
"is",
"None",
":",
"bins",
"=",
"50",
"kde_x",
"=",
"np",
".",
"linspace",
"(",
"tsigs",
".",
"min",
"(",
")",
",",
"tsigs",
".",
"max",
"(",
")",
",",
"bins",
")",
"kde",
"=",
"gaussian_kde",
"(",
"tsigs",
")",
"yd",
"=",
"kde",
".",
"pdf",
"(",
"kde_x",
")",
"mins",
"=",
"findmins",
"(",
"kde_x",
",",
"yd",
")",
"# find minima in kde",
"if",
"len",
"(",
"mins",
")",
">",
"0",
":",
"bkg",
"=",
"tsigs",
"<",
"(",
"mins",
"[",
"0",
"]",
")",
"# set background as lowest distribution",
"thresh",
"=",
"mins",
"[",
"0",
"]",
"else",
":",
"bkg",
"=",
"np",
".",
"ones",
"(",
"tsigs",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"else",
":",
"bkg",
"=",
"tsigs",
"<",
"thresh",
"# assign rough background and signal regions based on kde minima",
"fbkg",
"=",
"bkg",
"fsig",
"=",
"~",
"bkg",
"# remove transitions by fitting a gaussian to the gradients of",
"# each transition",
"# 1. determine the approximate index of each transition",
"zeros",
"=",
"bool_2_indices",
"(",
"fsig",
")",
"# 2. calculate the absolute gradient of the target trace.",
"g",
"=",
"abs",
"(",
"fastgrad",
"(",
"sigs",
",",
"gwin",
")",
")",
"# gradient of untransformed data.",
"if",
"zeros",
"is",
"not",
"None",
":",
"zeros",
"=",
"zeros",
".",
"flatten",
"(",
")",
"trans",
"=",
"dict",
"(",
"zeros",
"=",
"zeros",
".",
"flatten",
"(",
")",
",",
"lohi",
"=",
"[",
"]",
",",
"pgs",
"=",
"[",
"]",
",",
"excl",
"=",
"[",
"]",
",",
"tps",
"=",
"[",
"]",
",",
"failed",
"=",
"[",
"]",
",",
"xs",
"=",
"[",
"]",
",",
"ys",
"=",
"[",
"]",
")",
"for",
"z",
"in",
"zeros",
":",
"# for each approximate transition",
"# isolate the data around the transition",
"if",
"z",
"-",
"win",
"<",
"0",
":",
"lo",
"=",
"gwin",
"//",
"2",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"elif",
"z",
"+",
"win",
">",
"(",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
")",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"len",
"(",
"sig",
")",
"-",
"gwin",
"//",
"2",
"else",
":",
"lo",
"=",
"int",
"(",
"z",
"-",
"win",
")",
"hi",
"=",
"int",
"(",
"z",
"+",
"win",
")",
"xs",
"=",
"t",
"[",
"lo",
":",
"hi",
"]",
"ys",
"=",
"g",
"[",
"lo",
":",
"hi",
"]",
"trans",
"[",
"'xs'",
"]",
".",
"append",
"(",
"xs",
")",
"trans",
"[",
"'ys'",
"]",
".",
"append",
"(",
"ys",
")",
"trans",
"[",
"'lohi'",
"]",
".",
"append",
"(",
"[",
"lo",
",",
"hi",
"]",
")",
"# determine type of transition (on/off)",
"mid",
"=",
"(",
"hi",
"+",
"lo",
")",
"//",
"2",
"tp",
"=",
"sigs",
"[",
"mid",
"+",
"3",
"]",
">",
"sigs",
"[",
"mid",
"-",
"3",
"]",
"# True if 'on' transition.",
"trans",
"[",
"'tps'",
"]",
".",
"append",
"(",
"tp",
")",
"c",
"=",
"t",
"[",
"z",
"]",
"# center of transition",
"width",
"=",
"(",
"t",
"[",
"1",
"]",
"-",
"t",
"[",
"0",
"]",
")",
"*",
"2",
"# initial width guess",
"try",
":",
"pg",
",",
"_",
"=",
"curve_fit",
"(",
"gauss",
",",
"xs",
",",
"ys",
",",
"p0",
"=",
"(",
"np",
".",
"nanmax",
"(",
"ys",
")",
",",
"c",
",",
"width",
")",
",",
"sigma",
"=",
"(",
"xs",
"-",
"c",
")",
"**",
"2",
"+",
".01",
")",
"trans",
"[",
"'pgs'",
"]",
".",
"append",
"(",
"pg",
")",
"fwhm",
"=",
"abs",
"(",
"2",
"*",
"pg",
"[",
"-",
"1",
"]",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"log",
"(",
"2",
")",
")",
")",
"# apply on_mult or off_mult, as appropriate.",
"if",
"tp",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"on_mult",
"+",
"pg",
"[",
"1",
"]",
"else",
":",
"lim",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"fwhm",
",",
"fwhm",
"]",
")",
"*",
"off_mult",
"+",
"pg",
"[",
"1",
"]",
"trans",
"[",
"'excl'",
"]",
".",
"append",
"(",
"lim",
")",
"fbkg",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"fsig",
"[",
"(",
"t",
">",
"lim",
"[",
"0",
"]",
")",
"&",
"(",
"t",
"<",
"lim",
"[",
"1",
"]",
")",
"]",
"=",
"False",
"failed",
".",
"append",
"(",
"False",
")",
"except",
"RuntimeError",
":",
"failed",
".",
"append",
"(",
"True",
")",
"trans",
"[",
"'lohi'",
"]",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"trans",
"[",
"'pgs'",
"]",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"trans",
"[",
"'excl'",
"]",
".",
"append",
"(",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
")",
"trans",
"[",
"'tps'",
"]",
".",
"append",
"(",
"tp",
")",
"pass",
"else",
":",
"zeros",
"=",
"[",
"]",
"return",
"t",
",",
"sig",
",",
"sigs",
",",
"tsig",
",",
"tsigs",
",",
"kde_x",
",",
"yd",
",",
"g",
",",
"trans",
",",
"thresh"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
elements
|
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
|
latools/helpers/chemistry.py
|
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl'))
if all_isotopes:
return el.set_index('element')
else:
def wmean(g):
return (g.atomic_weight * g.percent).sum() / 100
iel = el.groupby('element').apply(wmean)
iel.name = 'atomic_weight'
return iel
|
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl'))
if all_isotopes:
return el.set_index('element')
else:
def wmean(g):
return (g.atomic_weight * g.percent).sum() / 100
iel = el.groupby('element').apply(wmean)
iel.name = 'atomic_weight'
return iel
|
[
"Loads",
"a",
"DataFrame",
"of",
"all",
"elements",
"and",
"isotopes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/chemistry.py#L6-L24
|
[
"def",
"elements",
"(",
"all_isotopes",
"=",
"True",
")",
":",
"el",
"=",
"pd",
".",
"read_pickle",
"(",
"pkgrs",
".",
"resource_filename",
"(",
"'latools'",
",",
"'resources/elements.pkl'",
")",
")",
"if",
"all_isotopes",
":",
"return",
"el",
".",
"set_index",
"(",
"'element'",
")",
"else",
":",
"def",
"wmean",
"(",
"g",
")",
":",
"return",
"(",
"g",
".",
"atomic_weight",
"*",
"g",
".",
"percent",
")",
".",
"sum",
"(",
")",
"/",
"100",
"iel",
"=",
"el",
".",
"groupby",
"(",
"'element'",
")",
".",
"apply",
"(",
"wmean",
")",
"iel",
".",
"name",
"=",
"'atomic_weight'",
"return",
"iel"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
calc_M
|
Returns molecular weight of molecule.
Where molecule is in standard chemical notation,
e.g. 'CO2', 'HCO3' or B(OH)4
Returns
-------
molecular_weight : float
|
latools/helpers/chemistry.py
|
def calc_M(molecule):
"""
Returns molecular weight of molecule.
Where molecule is in standard chemical notation,
e.g. 'CO2', 'HCO3' or B(OH)4
Returns
-------
molecular_weight : float
"""
# load periodic table
els = elements()
# define regexs
parens = re.compile('\(([A-z0-9]+)\)([0-9]+)?')
stoich = re.compile('([A-Z][a-z]?)([0-9]+)?')
ps = parens.findall(molecule) # find subgroups in parentheses
rem = parens.sub('', molecule) # get remainder
m = 0
# deal with sub-groups
if len(ps) > 0:
for sub, ns in ps:
ms = 0
for e, n in stoich.findall(sub):
me = (els.loc[e, 'atomic_weight'] *
els.loc[e, 'percent'] / 100).sum()
if n == '':
n = 1
else:
n = int(n)
ms += me * n
if ns == '':
ns = 1
else:
ns = int(ns)
m += ms * ns
# deal with remainder
for e, n in stoich.findall(rem):
me = (els.loc[e, 'atomic_weight'] *
els.loc[e, 'percent'] / 100).sum()
if n == '':
n = 1
else:
n = int(n)
m += me * n
return m
|
def calc_M(molecule):
"""
Returns molecular weight of molecule.
Where molecule is in standard chemical notation,
e.g. 'CO2', 'HCO3' or B(OH)4
Returns
-------
molecular_weight : float
"""
# load periodic table
els = elements()
# define regexs
parens = re.compile('\(([A-z0-9]+)\)([0-9]+)?')
stoich = re.compile('([A-Z][a-z]?)([0-9]+)?')
ps = parens.findall(molecule) # find subgroups in parentheses
rem = parens.sub('', molecule) # get remainder
m = 0
# deal with sub-groups
if len(ps) > 0:
for sub, ns in ps:
ms = 0
for e, n in stoich.findall(sub):
me = (els.loc[e, 'atomic_weight'] *
els.loc[e, 'percent'] / 100).sum()
if n == '':
n = 1
else:
n = int(n)
ms += me * n
if ns == '':
ns = 1
else:
ns = int(ns)
m += ms * ns
# deal with remainder
for e, n in stoich.findall(rem):
me = (els.loc[e, 'atomic_weight'] *
els.loc[e, 'percent'] / 100).sum()
if n == '':
n = 1
else:
n = int(n)
m += me * n
return m
|
[
"Returns",
"molecular",
"weight",
"of",
"molecule",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/chemistry.py#L26-L75
|
[
"def",
"calc_M",
"(",
"molecule",
")",
":",
"# load periodic table",
"els",
"=",
"elements",
"(",
")",
"# define regexs",
"parens",
"=",
"re",
".",
"compile",
"(",
"'\\(([A-z0-9]+)\\)([0-9]+)?'",
")",
"stoich",
"=",
"re",
".",
"compile",
"(",
"'([A-Z][a-z]?)([0-9]+)?'",
")",
"ps",
"=",
"parens",
".",
"findall",
"(",
"molecule",
")",
"# find subgroups in parentheses",
"rem",
"=",
"parens",
".",
"sub",
"(",
"''",
",",
"molecule",
")",
"# get remainder",
"m",
"=",
"0",
"# deal with sub-groups",
"if",
"len",
"(",
"ps",
")",
">",
"0",
":",
"for",
"sub",
",",
"ns",
"in",
"ps",
":",
"ms",
"=",
"0",
"for",
"e",
",",
"n",
"in",
"stoich",
".",
"findall",
"(",
"sub",
")",
":",
"me",
"=",
"(",
"els",
".",
"loc",
"[",
"e",
",",
"'atomic_weight'",
"]",
"*",
"els",
".",
"loc",
"[",
"e",
",",
"'percent'",
"]",
"/",
"100",
")",
".",
"sum",
"(",
")",
"if",
"n",
"==",
"''",
":",
"n",
"=",
"1",
"else",
":",
"n",
"=",
"int",
"(",
"n",
")",
"ms",
"+=",
"me",
"*",
"n",
"if",
"ns",
"==",
"''",
":",
"ns",
"=",
"1",
"else",
":",
"ns",
"=",
"int",
"(",
"ns",
")",
"m",
"+=",
"ms",
"*",
"ns",
"# deal with remainder",
"for",
"e",
",",
"n",
"in",
"stoich",
".",
"findall",
"(",
"rem",
")",
":",
"me",
"=",
"(",
"els",
".",
"loc",
"[",
"e",
",",
"'atomic_weight'",
"]",
"*",
"els",
".",
"loc",
"[",
"e",
",",
"'percent'",
"]",
"/",
"100",
")",
".",
"sum",
"(",
")",
"if",
"n",
"==",
"''",
":",
"n",
"=",
"1",
"else",
":",
"n",
"=",
"int",
"(",
"n",
")",
"m",
"+=",
"me",
"*",
"n",
"return",
"m"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
gen_keywords
|
generate single escape sequence mapping.
|
amino/string/hues.py
|
def gen_keywords(*args: Union[ANSIColors, ANSIStyles], **kwargs: Union[ANSIColors, ANSIStyles]) -> tuple:
'''generate single escape sequence mapping.'''
fields: tuple = tuple()
values: tuple = tuple()
for tpl in args:
fields += tpl._fields
values += tpl
for prefix, tpl in kwargs.items():
fields += tuple(map(lambda x: '_'.join([prefix, x]), tpl._fields))
values += tpl
return namedtuple('ANSISequences', fields)(*values)
|
def gen_keywords(*args: Union[ANSIColors, ANSIStyles], **kwargs: Union[ANSIColors, ANSIStyles]) -> tuple:
'''generate single escape sequence mapping.'''
fields: tuple = tuple()
values: tuple = tuple()
for tpl in args:
fields += tpl._fields
values += tpl
for prefix, tpl in kwargs.items():
fields += tuple(map(lambda x: '_'.join([prefix, x]), tpl._fields))
values += tpl
return namedtuple('ANSISequences', fields)(*values)
|
[
"generate",
"single",
"escape",
"sequence",
"mapping",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/string/hues.py#L50-L60
|
[
"def",
"gen_keywords",
"(",
"*",
"args",
":",
"Union",
"[",
"ANSIColors",
",",
"ANSIStyles",
"]",
",",
"*",
"*",
"kwargs",
":",
"Union",
"[",
"ANSIColors",
",",
"ANSIStyles",
"]",
")",
"->",
"tuple",
":",
"fields",
":",
"tuple",
"=",
"tuple",
"(",
")",
"values",
":",
"tuple",
"=",
"tuple",
"(",
")",
"for",
"tpl",
"in",
"args",
":",
"fields",
"+=",
"tpl",
".",
"_fields",
"values",
"+=",
"tpl",
"for",
"prefix",
",",
"tpl",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"fields",
"+=",
"tuple",
"(",
"map",
"(",
"lambda",
"x",
":",
"'_'",
".",
"join",
"(",
"[",
"prefix",
",",
"x",
"]",
")",
",",
"tpl",
".",
"_fields",
")",
")",
"values",
"+=",
"tpl",
"return",
"namedtuple",
"(",
"'ANSISequences'",
",",
"fields",
")",
"(",
"*",
"values",
")"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
zero_break
|
Handle Resets in input stack.
Breaks the input stack if a Reset operator (zero) is encountered.
|
amino/string/hues.py
|
def zero_break(stack: tuple) -> tuple:
'''Handle Resets in input stack.
Breaks the input stack if a Reset operator (zero) is encountered.
'''
reducer = lambda x, y: tuple() if y == 0 else x + (y,)
return reduce(reducer, stack, tuple())
|
def zero_break(stack: tuple) -> tuple:
'''Handle Resets in input stack.
Breaks the input stack if a Reset operator (zero) is encountered.
'''
reducer = lambda x, y: tuple() if y == 0 else x + (y,)
return reduce(reducer, stack, tuple())
|
[
"Handle",
"Resets",
"in",
"input",
"stack",
".",
"Breaks",
"the",
"input",
"stack",
"if",
"a",
"Reset",
"operator",
"(",
"zero",
")",
"is",
"encountered",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/string/hues.py#L65-L70
|
[
"def",
"zero_break",
"(",
"stack",
":",
"tuple",
")",
"->",
"tuple",
":",
"reducer",
"=",
"lambda",
"x",
",",
"y",
":",
"tuple",
"(",
")",
"if",
"y",
"==",
"0",
"else",
"x",
"+",
"(",
"y",
",",
")",
"return",
"reduce",
"(",
"reducer",
",",
"stack",
",",
"tuple",
"(",
")",
")"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
annihilate
|
Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the last
match at the end of the stack.
|
amino/string/hues.py
|
def annihilate(predicate: tuple, stack: tuple) -> tuple:
'''Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the last
match at the end of the stack.
'''
extra = tuple(filter(lambda x: x not in predicate, stack))
head = reduce(lambda x, y: y if y in predicate else x, stack, None)
return extra + (head,) if head else extra
|
def annihilate(predicate: tuple, stack: tuple) -> tuple:
'''Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the last
match at the end of the stack.
'''
extra = tuple(filter(lambda x: x not in predicate, stack))
head = reduce(lambda x, y: y if y in predicate else x, stack, None)
return extra + (head,) if head else extra
|
[
"Squash",
"and",
"reduce",
"the",
"input",
"stack",
".",
"Removes",
"the",
"elements",
"of",
"input",
"that",
"match",
"predicate",
"and",
"only",
"keeps",
"the",
"last",
"match",
"at",
"the",
"end",
"of",
"the",
"stack",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/string/hues.py#L73-L80
|
[
"def",
"annihilate",
"(",
"predicate",
":",
"tuple",
",",
"stack",
":",
"tuple",
")",
"->",
"tuple",
":",
"extra",
"=",
"tuple",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"predicate",
",",
"stack",
")",
")",
"head",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"y",
"if",
"y",
"in",
"predicate",
"else",
"x",
",",
"stack",
",",
"None",
")",
"return",
"extra",
"+",
"(",
"head",
",",
")",
"if",
"head",
"else",
"extra"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
dedup
|
Remove duplicates from the stack in first-seen order.
|
amino/string/hues.py
|
def dedup(stack: tuple) -> tuple:
'''Remove duplicates from the stack in first-seen order.'''
# Initializes with an accumulator and then reduces the stack with first match
# deduplication.
reducer = lambda x, y: x if y in x else x + (y,)
return reduce(reducer, stack, tuple())
|
def dedup(stack: tuple) -> tuple:
'''Remove duplicates from the stack in first-seen order.'''
# Initializes with an accumulator and then reduces the stack with first match
# deduplication.
reducer = lambda x, y: x if y in x else x + (y,)
return reduce(reducer, stack, tuple())
|
[
"Remove",
"duplicates",
"from",
"the",
"stack",
"in",
"first",
"-",
"seen",
"order",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/string/hues.py#L88-L93
|
[
"def",
"dedup",
"(",
"stack",
":",
"tuple",
")",
"->",
"tuple",
":",
"# Initializes with an accumulator and then reduces the stack with first match",
"# deduplication.",
"reducer",
"=",
"lambda",
"x",
",",
"y",
":",
"x",
"if",
"y",
"in",
"x",
"else",
"x",
"+",
"(",
"y",
",",
")",
"return",
"reduce",
"(",
"reducer",
",",
"stack",
",",
"tuple",
"(",
")",
")"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
gauss_weighted_stats
|
Calculate gaussian weigted moving mean, SD and SE.
Parameters
----------
x : array-like
The independent variable
yarray : (n,m) array
Where n = x.size, and m is the number of
dependent variables to smooth.
x_new : array-like
The new x-scale to interpolate the data
fwhm : int
FWHM of the gaussian kernel.
Returns
-------
(mean, std, se) : tuple
|
latools/helpers/stat_fns.py
|
def gauss_weighted_stats(x, yarray, x_new, fwhm):
"""
Calculate gaussian weigted moving mean, SD and SE.
Parameters
----------
x : array-like
The independent variable
yarray : (n,m) array
Where n = x.size, and m is the number of
dependent variables to smooth.
x_new : array-like
The new x-scale to interpolate the data
fwhm : int
FWHM of the gaussian kernel.
Returns
-------
(mean, std, se) : tuple
"""
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
# create empty mask array
mask = np.zeros((x.size, yarray.shape[1], x_new.size))
# fill mask
for i, xni in enumerate(x_new):
mask[:, :, i] = gauss(x[:, np.newaxis], 1, xni, sigma)
# normalise mask
nmask = mask / mask.sum(0) # sum of each gaussian = 1
# calculate moving average
av = (nmask * yarray[:, :, np.newaxis]).sum(0) # apply mask to data
# sum along xn axis to get means
# calculate moving sd
diff = np.power(av - yarray[:, :, np.newaxis], 2)
std = np.sqrt((diff * nmask).sum(0))
# sqrt of weighted average of data-mean
# calculate moving se
se = std / np.sqrt(mask.sum(0))
# max amplitude of weights is 1, so sum of weights scales
# a fn of how many points are nearby. Use this as 'n' in
# SE calculation.
return av, std, se
|
def gauss_weighted_stats(x, yarray, x_new, fwhm):
"""
Calculate gaussian weigted moving mean, SD and SE.
Parameters
----------
x : array-like
The independent variable
yarray : (n,m) array
Where n = x.size, and m is the number of
dependent variables to smooth.
x_new : array-like
The new x-scale to interpolate the data
fwhm : int
FWHM of the gaussian kernel.
Returns
-------
(mean, std, se) : tuple
"""
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
# create empty mask array
mask = np.zeros((x.size, yarray.shape[1], x_new.size))
# fill mask
for i, xni in enumerate(x_new):
mask[:, :, i] = gauss(x[:, np.newaxis], 1, xni, sigma)
# normalise mask
nmask = mask / mask.sum(0) # sum of each gaussian = 1
# calculate moving average
av = (nmask * yarray[:, :, np.newaxis]).sum(0) # apply mask to data
# sum along xn axis to get means
# calculate moving sd
diff = np.power(av - yarray[:, :, np.newaxis], 2)
std = np.sqrt((diff * nmask).sum(0))
# sqrt of weighted average of data-mean
# calculate moving se
se = std / np.sqrt(mask.sum(0))
# max amplitude of weights is 1, so sum of weights scales
# a fn of how many points are nearby. Use this as 'n' in
# SE calculation.
return av, std, se
|
[
"Calculate",
"gaussian",
"weigted",
"moving",
"mean",
"SD",
"and",
"SE",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L51-L96
|
[
"def",
"gauss_weighted_stats",
"(",
"x",
",",
"yarray",
",",
"x_new",
",",
"fwhm",
")",
":",
"sigma",
"=",
"fwhm",
"/",
"(",
"2",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"log",
"(",
"2",
")",
")",
")",
"# create empty mask array",
"mask",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"size",
",",
"yarray",
".",
"shape",
"[",
"1",
"]",
",",
"x_new",
".",
"size",
")",
")",
"# fill mask",
"for",
"i",
",",
"xni",
"in",
"enumerate",
"(",
"x_new",
")",
":",
"mask",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"gauss",
"(",
"x",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"1",
",",
"xni",
",",
"sigma",
")",
"# normalise mask",
"nmask",
"=",
"mask",
"/",
"mask",
".",
"sum",
"(",
"0",
")",
"# sum of each gaussian = 1",
"# calculate moving average",
"av",
"=",
"(",
"nmask",
"*",
"yarray",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
")",
".",
"sum",
"(",
"0",
")",
"# apply mask to data",
"# sum along xn axis to get means",
"# calculate moving sd",
"diff",
"=",
"np",
".",
"power",
"(",
"av",
"-",
"yarray",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"2",
")",
"std",
"=",
"np",
".",
"sqrt",
"(",
"(",
"diff",
"*",
"nmask",
")",
".",
"sum",
"(",
"0",
")",
")",
"# sqrt of weighted average of data-mean",
"# calculate moving se",
"se",
"=",
"std",
"/",
"np",
".",
"sqrt",
"(",
"mask",
".",
"sum",
"(",
"0",
")",
")",
"# max amplitude of weights is 1, so sum of weights scales",
"# a fn of how many points are nearby. Use this as 'n' in",
"# SE calculation.",
"return",
"av",
",",
"std",
",",
"se"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
gauss
|
Gaussian function.
Parameters
----------
x : array_like
Independent variable.
*p : parameters unpacked to A, mu, sigma
A = amplitude, mu = centre, sigma = width
Return
------
array_like
gaussian descriped by *p.
|
latools/helpers/stat_fns.py
|
def gauss(x, *p):
""" Gaussian function.
Parameters
----------
x : array_like
Independent variable.
*p : parameters unpacked to A, mu, sigma
A = amplitude, mu = centre, sigma = width
Return
------
array_like
gaussian descriped by *p.
"""
A, mu, sigma = p
return A * np.exp(-0.5 * (-mu + x)**2 / sigma**2)
|
def gauss(x, *p):
""" Gaussian function.
Parameters
----------
x : array_like
Independent variable.
*p : parameters unpacked to A, mu, sigma
A = amplitude, mu = centre, sigma = width
Return
------
array_like
gaussian descriped by *p.
"""
A, mu, sigma = p
return A * np.exp(-0.5 * (-mu + x)**2 / sigma**2)
|
[
"Gaussian",
"function",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L99-L115
|
[
"def",
"gauss",
"(",
"x",
",",
"*",
"p",
")",
":",
"A",
",",
"mu",
",",
"sigma",
"=",
"p",
"return",
"A",
"*",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"-",
"mu",
"+",
"x",
")",
"**",
"2",
"/",
"sigma",
"**",
"2",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
stderr
|
Calculate the standard error of a.
|
latools/helpers/stat_fns.py
|
def stderr(a):
"""
Calculate the standard error of a.
"""
return np.nanstd(a) / np.sqrt(sum(np.isfinite(a)))
|
def stderr(a):
"""
Calculate the standard error of a.
"""
return np.nanstd(a) / np.sqrt(sum(np.isfinite(a)))
|
[
"Calculate",
"the",
"standard",
"error",
"of",
"a",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L119-L123
|
[
"def",
"stderr",
"(",
"a",
")",
":",
"return",
"np",
".",
"nanstd",
"(",
"a",
")",
"/",
"np",
".",
"sqrt",
"(",
"sum",
"(",
"np",
".",
"isfinite",
"(",
"a",
")",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
H15_mean
|
Calculate the Huber (H15) Robust mean of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
|
latools/helpers/stat_fns.py
|
def H15_mean(x):
"""
Calculate the Huber (H15) Robust mean of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
"""
mu = np.nanmean(x)
sd = np.nanstd(x) * 1.134
sig = 1.5
hi = x > mu + sig * sd
lo = x < mu - sig * sd
if any(hi | lo):
x[hi] = mu + sig * sd
x[lo] = mu - sig * sd
return H15_mean(x)
else:
return mu
|
def H15_mean(x):
"""
Calculate the Huber (H15) Robust mean of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
"""
mu = np.nanmean(x)
sd = np.nanstd(x) * 1.134
sig = 1.5
hi = x > mu + sig * sd
lo = x < mu - sig * sd
if any(hi | lo):
x[hi] = mu + sig * sd
x[lo] = mu - sig * sd
return H15_mean(x)
else:
return mu
|
[
"Calculate",
"the",
"Huber",
"(",
"H15",
")",
"Robust",
"mean",
"of",
"x",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L132-L152
|
[
"def",
"H15_mean",
"(",
"x",
")",
":",
"mu",
"=",
"np",
".",
"nanmean",
"(",
"x",
")",
"sd",
"=",
"np",
".",
"nanstd",
"(",
"x",
")",
"*",
"1.134",
"sig",
"=",
"1.5",
"hi",
"=",
"x",
">",
"mu",
"+",
"sig",
"*",
"sd",
"lo",
"=",
"x",
"<",
"mu",
"-",
"sig",
"*",
"sd",
"if",
"any",
"(",
"hi",
"|",
"lo",
")",
":",
"x",
"[",
"hi",
"]",
"=",
"mu",
"+",
"sig",
"*",
"sd",
"x",
"[",
"lo",
"]",
"=",
"mu",
"-",
"sig",
"*",
"sd",
"return",
"H15_mean",
"(",
"x",
")",
"else",
":",
"return",
"mu"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
H15_se
|
Calculate the Huber (H15) Robust standard deviation of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
|
latools/helpers/stat_fns.py
|
def H15_se(x):
"""
Calculate the Huber (H15) Robust standard deviation of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
"""
sd = H15_std(x)
return sd / np.sqrt(sum(np.isfinite(x)))
|
def H15_se(x):
"""
Calculate the Huber (H15) Robust standard deviation of x.
For details, see:
http://www.cscjp.co.jp/fera/document/ANALYSTVol114Decpgs1693-97_1989.pdf
http://www.rsc.org/images/robust-statistics-technical-brief-6_tcm18-214850.pdf
"""
sd = H15_std(x)
return sd / np.sqrt(sum(np.isfinite(x)))
|
[
"Calculate",
"the",
"Huber",
"(",
"H15",
")",
"Robust",
"standard",
"deviation",
"of",
"x",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L178-L187
|
[
"def",
"H15_se",
"(",
"x",
")",
":",
"sd",
"=",
"H15_std",
"(",
"x",
")",
"return",
"sd",
"/",
"np",
".",
"sqrt",
"(",
"sum",
"(",
"np",
".",
"isfinite",
"(",
"x",
")",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
reproduce
|
Reproduce a previous analysis exported with :func:`latools.analyse.minimal_export`
For normal use, supplying `log_file` and specifying a plotting option should be
enough to reproduce an analysis. All requisites (raw data, SRM table and any
custom stat functions) will then be imported from the minimal_export folder.
You may also specify your own raw_data, srm_table and custom_stat_functions,
if you wish.
Parameters
----------
log_file : str
The path to the log file produced by :func:`~latools.analyse.minimal_export`.
plotting : bool
Whether or not to output plots.
data_folder : str
Optional. Specify a different data folder. Data folder
should normally be in the same folder as the log file.
srm_table : str
Optional. Specify a different SRM table. SRM table
should normally be in the same folder as the log file.
custom_stat_functions : str
Optional. Specify a python file containing custom
stat functions for use by reproduce. Any custom
stat functions should normally be included in the
same folder as the log file.
|
latools/latools.py
|
def reproduce(past_analysis, plotting=False, data_folder=None,
srm_table=None, custom_stat_functions=None):
"""
Reproduce a previous analysis exported with :func:`latools.analyse.minimal_export`
For normal use, supplying `log_file` and specifying a plotting option should be
enough to reproduce an analysis. All requisites (raw data, SRM table and any
custom stat functions) will then be imported from the minimal_export folder.
You may also specify your own raw_data, srm_table and custom_stat_functions,
if you wish.
Parameters
----------
log_file : str
The path to the log file produced by :func:`~latools.analyse.minimal_export`.
plotting : bool
Whether or not to output plots.
data_folder : str
Optional. Specify a different data folder. Data folder
should normally be in the same folder as the log file.
srm_table : str
Optional. Specify a different SRM table. SRM table
should normally be in the same folder as the log file.
custom_stat_functions : str
Optional. Specify a python file containing custom
stat functions for use by reproduce. Any custom
stat functions should normally be included in the
same folder as the log file.
"""
if '.zip' in past_analysis:
dirpath = utils.extract_zipdir(past_analysis)
logpath = os.path.join(dirpath, 'analysis.lalog')
elif os.path.isdir(past_analysis):
if os.path.exists(os.path.join(past_analysis, 'analysis.lalog')):
logpath = os.path.join(past_analysis, 'analysis.lalog')
elif 'analysis.lalog' in past_analysis:
logpath = past_analysis
else:
raise ValueError(('\n\n{} is not a valid input.\n\n' +
'Must be one of:\n' +
' - A .zip file exported by latools\n' +
' - An analysis.lalog file\n' +
' - A directory containing an analysis.lalog files\n'))
runargs, paths = logging.read_logfile(logpath)
# parse custom stat functions
csfs = Bunch()
if custom_stat_functions is None and 'custom_stat_functions' in paths.keys():
# load custom functions as a dict
with open(paths['custom_stat_functions'], 'r') as f:
csf = f.read()
fname = re.compile('def (.*)\(.*')
for c in csf.split('\n\n\n\n'):
if fname.match(c):
csfs[fname.match(c).groups()[0]] = c
# create analysis object
rep = analyse(*runargs[0][-1]['args'], **runargs[0][-1]['kwargs'])
# rest of commands
for fname, arg in runargs:
if fname != '__init__':
if 'plot' in fname.lower() and plotting:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
elif 'sample_stats' in fname.lower():
rep.sample_stats(*arg['args'], csf_dict=csfs, **arg['kwargs'])
else:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
return rep
|
def reproduce(past_analysis, plotting=False, data_folder=None,
srm_table=None, custom_stat_functions=None):
"""
Reproduce a previous analysis exported with :func:`latools.analyse.minimal_export`
For normal use, supplying `log_file` and specifying a plotting option should be
enough to reproduce an analysis. All requisites (raw data, SRM table and any
custom stat functions) will then be imported from the minimal_export folder.
You may also specify your own raw_data, srm_table and custom_stat_functions,
if you wish.
Parameters
----------
log_file : str
The path to the log file produced by :func:`~latools.analyse.minimal_export`.
plotting : bool
Whether or not to output plots.
data_folder : str
Optional. Specify a different data folder. Data folder
should normally be in the same folder as the log file.
srm_table : str
Optional. Specify a different SRM table. SRM table
should normally be in the same folder as the log file.
custom_stat_functions : str
Optional. Specify a python file containing custom
stat functions for use by reproduce. Any custom
stat functions should normally be included in the
same folder as the log file.
"""
if '.zip' in past_analysis:
dirpath = utils.extract_zipdir(past_analysis)
logpath = os.path.join(dirpath, 'analysis.lalog')
elif os.path.isdir(past_analysis):
if os.path.exists(os.path.join(past_analysis, 'analysis.lalog')):
logpath = os.path.join(past_analysis, 'analysis.lalog')
elif 'analysis.lalog' in past_analysis:
logpath = past_analysis
else:
raise ValueError(('\n\n{} is not a valid input.\n\n' +
'Must be one of:\n' +
' - A .zip file exported by latools\n' +
' - An analysis.lalog file\n' +
' - A directory containing an analysis.lalog files\n'))
runargs, paths = logging.read_logfile(logpath)
# parse custom stat functions
csfs = Bunch()
if custom_stat_functions is None and 'custom_stat_functions' in paths.keys():
# load custom functions as a dict
with open(paths['custom_stat_functions'], 'r') as f:
csf = f.read()
fname = re.compile('def (.*)\(.*')
for c in csf.split('\n\n\n\n'):
if fname.match(c):
csfs[fname.match(c).groups()[0]] = c
# create analysis object
rep = analyse(*runargs[0][-1]['args'], **runargs[0][-1]['kwargs'])
# rest of commands
for fname, arg in runargs:
if fname != '__init__':
if 'plot' in fname.lower() and plotting:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
elif 'sample_stats' in fname.lower():
rep.sample_stats(*arg['args'], csf_dict=csfs, **arg['kwargs'])
else:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
return rep
|
[
"Reproduce",
"a",
"previous",
"analysis",
"exported",
"with",
":",
"func",
":",
"latools",
".",
"analyse",
".",
"minimal_export"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L4018-L4091
|
[
"def",
"reproduce",
"(",
"past_analysis",
",",
"plotting",
"=",
"False",
",",
"data_folder",
"=",
"None",
",",
"srm_table",
"=",
"None",
",",
"custom_stat_functions",
"=",
"None",
")",
":",
"if",
"'.zip'",
"in",
"past_analysis",
":",
"dirpath",
"=",
"utils",
".",
"extract_zipdir",
"(",
"past_analysis",
")",
"logpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"'analysis.lalog'",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"past_analysis",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"past_analysis",
",",
"'analysis.lalog'",
")",
")",
":",
"logpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"past_analysis",
",",
"'analysis.lalog'",
")",
"elif",
"'analysis.lalog'",
"in",
"past_analysis",
":",
"logpath",
"=",
"past_analysis",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'\\n\\n{} is not a valid input.\\n\\n'",
"+",
"'Must be one of:\\n'",
"+",
"' - A .zip file exported by latools\\n'",
"+",
"' - An analysis.lalog file\\n'",
"+",
"' - A directory containing an analysis.lalog files\\n'",
")",
")",
"runargs",
",",
"paths",
"=",
"logging",
".",
"read_logfile",
"(",
"logpath",
")",
"# parse custom stat functions",
"csfs",
"=",
"Bunch",
"(",
")",
"if",
"custom_stat_functions",
"is",
"None",
"and",
"'custom_stat_functions'",
"in",
"paths",
".",
"keys",
"(",
")",
":",
"# load custom functions as a dict",
"with",
"open",
"(",
"paths",
"[",
"'custom_stat_functions'",
"]",
",",
"'r'",
")",
"as",
"f",
":",
"csf",
"=",
"f",
".",
"read",
"(",
")",
"fname",
"=",
"re",
".",
"compile",
"(",
"'def (.*)\\(.*'",
")",
"for",
"c",
"in",
"csf",
".",
"split",
"(",
"'\\n\\n\\n\\n'",
")",
":",
"if",
"fname",
".",
"match",
"(",
"c",
")",
":",
"csfs",
"[",
"fname",
".",
"match",
"(",
"c",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"]",
"=",
"c",
"# create analysis object",
"rep",
"=",
"analyse",
"(",
"*",
"runargs",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"[",
"'args'",
"]",
",",
"*",
"*",
"runargs",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"[",
"'kwargs'",
"]",
")",
"# rest of commands",
"for",
"fname",
",",
"arg",
"in",
"runargs",
":",
"if",
"fname",
"!=",
"'__init__'",
":",
"if",
"'plot'",
"in",
"fname",
".",
"lower",
"(",
")",
"and",
"plotting",
":",
"getattr",
"(",
"rep",
",",
"fname",
")",
"(",
"*",
"arg",
"[",
"'args'",
"]",
",",
"*",
"*",
"arg",
"[",
"'kwargs'",
"]",
")",
"elif",
"'sample_stats'",
"in",
"fname",
".",
"lower",
"(",
")",
":",
"rep",
".",
"sample_stats",
"(",
"*",
"arg",
"[",
"'args'",
"]",
",",
"csf_dict",
"=",
"csfs",
",",
"*",
"*",
"arg",
"[",
"'kwargs'",
"]",
")",
"else",
":",
"getattr",
"(",
"rep",
",",
"fname",
")",
"(",
"*",
"arg",
"[",
"'args'",
"]",
",",
"*",
"*",
"arg",
"[",
"'kwargs'",
"]",
")",
"return",
"rep"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse._get_samples
|
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
|
latools/latools.py
|
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
samples = self.subsets[subset]
except KeyError:
raise KeyError(("Subset '{:s}' does not ".format(subset) +
"exist.\nUse 'make_subset' to create a" +
"subset."))
return samples
|
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
samples = self.subsets[subset]
except KeyError:
raise KeyError(("Subset '{:s}' does not ".format(subset) +
"exist.\nUse 'make_subset' to create a" +
"subset."))
return samples
|
[
"Helper",
"function",
"to",
"get",
"sample",
"names",
"from",
"subset",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L337-L359
|
[
"def",
"_get_samples",
"(",
"self",
",",
"subset",
"=",
"None",
")",
":",
"if",
"subset",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"subsets",
"[",
"'All_Samples'",
"]",
"else",
":",
"try",
":",
"samples",
"=",
"self",
".",
"subsets",
"[",
"subset",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"(",
"\"Subset '{:s}' does not \"",
".",
"format",
"(",
"subset",
")",
"+",
"\"exist.\\nUse 'make_subset' to create a\"",
"+",
"\"subset.\"",
")",
")",
"return",
"samples"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.autorange
|
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
This can also be 'total_counts' to use the sum of all analytes.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
smwin : int
The smoothing window used for calculating the second derivative.
Must be odd.
conf : float
The proportional intensity of the fitted gaussian tails that
determines the transition width cutoff (lower = wider transition
regions excluded).
trans_mult : array_like, len=2
Multiples of the peak FWHM to add to the transition cutoffs, e.g.
if the transitions consistently leave some bad data proceeding the
transition, set trans_mult to [0, 0.5] to ad 0.5 * the FWHM to the
right hand side of the limit.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked', or rawdata' if not despiked. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
|
latools/latools.py
|
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=20,
on_mult=[1., 1.5], off_mult=[1.5, 1],
transform='log', ploterrs=True, focus_stage='despiked'):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
This can also be 'total_counts' to use the sum of all analytes.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
smwin : int
The smoothing window used for calculating the second derivative.
Must be odd.
conf : float
The proportional intensity of the fitted gaussian tails that
determines the transition width cutoff (lower = wider transition
regions excluded).
trans_mult : array_like, len=2
Multiples of the peak FWHM to add to the transition cutoffs, e.g.
if the transitions consistently leave some bad data proceeding the
transition, set trans_mult to [0, 0.5] to ad 0.5 * the FWHM to the
right hand side of the limit.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked', or rawdata' if not despiked. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
if analyte is None:
analyte = self.internal_standard
elif analyte in self.analytes:
self.minimal_analytes.update([analyte])
fails = {} # list for catching failures.
with self.pbar.set(total=len(self.data), desc='AutoRange') as prog:
for s, d in self.data.items():
f = d.autorange(analyte=analyte, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
ploterrs=ploterrs, transform=transform)
if f is not None:
fails[s] = f
prog.update() # advance progress bar
# handle failures
if len(fails) > 0:
wstr = ('\n\n' + '*' * 41 + '\n' +
' WARNING\n' + '*' * 41 + '\n' +
'Autorange failed for some samples:\n')
kwidth = max([len(k) for k in fails.keys()]) + 1
fstr = ' {:' + '{}'.format(kwidth) + 's}: '
for k in sorted(fails.keys()):
wstr += fstr.format(k) + ', '.join(['{:.1f}'.format(f) for f in fails[k][-1]]) + '\n'
wstr += ('\n*** THIS IS NOT NECESSARILY A PROBLEM ***\n' +
'But please check the plots below to make\n' +
'sure they look OK. Failures are marked by\n' +
'dashed vertical red lines.\n\n' +
'To examine an autorange failure in more\n' +
'detail, use the `autorange_plot` method\n' +
'of the failing data object, e.g.:\n' +
"dat.data['Sample'].autorange_plot(params)\n" +
'*' * 41 + '\n')
warnings.warn(wstr)
self.stages_complete.update(['autorange'])
return
|
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=20,
on_mult=[1., 1.5], off_mult=[1.5, 1],
transform='log', ploterrs=True, focus_stage='despiked'):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
This can also be 'total_counts' to use the sum of all analytes.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
smwin : int
The smoothing window used for calculating the second derivative.
Must be odd.
conf : float
The proportional intensity of the fitted gaussian tails that
determines the transition width cutoff (lower = wider transition
regions excluded).
trans_mult : array_like, len=2
Multiples of the peak FWHM to add to the transition cutoffs, e.g.
if the transitions consistently leave some bad data proceeding the
transition, set trans_mult to [0, 0.5] to ad 0.5 * the FWHM to the
right hand side of the limit.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked', or rawdata' if not despiked. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
if analyte is None:
analyte = self.internal_standard
elif analyte in self.analytes:
self.minimal_analytes.update([analyte])
fails = {} # list for catching failures.
with self.pbar.set(total=len(self.data), desc='AutoRange') as prog:
for s, d in self.data.items():
f = d.autorange(analyte=analyte, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
ploterrs=ploterrs, transform=transform)
if f is not None:
fails[s] = f
prog.update() # advance progress bar
# handle failures
if len(fails) > 0:
wstr = ('\n\n' + '*' * 41 + '\n' +
' WARNING\n' + '*' * 41 + '\n' +
'Autorange failed for some samples:\n')
kwidth = max([len(k) for k in fails.keys()]) + 1
fstr = ' {:' + '{}'.format(kwidth) + 's}: '
for k in sorted(fails.keys()):
wstr += fstr.format(k) + ', '.join(['{:.1f}'.format(f) for f in fails[k][-1]]) + '\n'
wstr += ('\n*** THIS IS NOT NECESSARILY A PROBLEM ***\n' +
'But please check the plots below to make\n' +
'sure they look OK. Failures are marked by\n' +
'dashed vertical red lines.\n\n' +
'To examine an autorange failure in more\n' +
'detail, use the `autorange_plot` method\n' +
'of the failing data object, e.g.:\n' +
"dat.data['Sample'].autorange_plot(params)\n" +
'*' * 41 + '\n')
warnings.warn(wstr)
self.stages_complete.update(['autorange'])
return
|
[
"Automatically",
"separates",
"signal",
"and",
"background",
"data",
"regions",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L406-L525
|
[
"def",
"autorange",
"(",
"self",
",",
"analyte",
"=",
"'total_counts'",
",",
"gwin",
"=",
"5",
",",
"swin",
"=",
"3",
",",
"win",
"=",
"20",
",",
"on_mult",
"=",
"[",
"1.",
",",
"1.5",
"]",
",",
"off_mult",
"=",
"[",
"1.5",
",",
"1",
"]",
",",
"transform",
"=",
"'log'",
",",
"ploterrs",
"=",
"True",
",",
"focus_stage",
"=",
"'despiked'",
")",
":",
"if",
"focus_stage",
"==",
"'despiked'",
":",
"if",
"'despiked'",
"not",
"in",
"self",
".",
"stages_complete",
":",
"focus_stage",
"=",
"'rawdata'",
"if",
"analyte",
"is",
"None",
":",
"analyte",
"=",
"self",
".",
"internal_standard",
"elif",
"analyte",
"in",
"self",
".",
"analytes",
":",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"analyte",
"]",
")",
"fails",
"=",
"{",
"}",
"# list for catching failures.",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'AutoRange'",
")",
"as",
"prog",
":",
"for",
"s",
",",
"d",
"in",
"self",
".",
"data",
".",
"items",
"(",
")",
":",
"f",
"=",
"d",
".",
"autorange",
"(",
"analyte",
"=",
"analyte",
",",
"gwin",
"=",
"gwin",
",",
"swin",
"=",
"swin",
",",
"win",
"=",
"win",
",",
"on_mult",
"=",
"on_mult",
",",
"off_mult",
"=",
"off_mult",
",",
"ploterrs",
"=",
"ploterrs",
",",
"transform",
"=",
"transform",
")",
"if",
"f",
"is",
"not",
"None",
":",
"fails",
"[",
"s",
"]",
"=",
"f",
"prog",
".",
"update",
"(",
")",
"# advance progress bar",
"# handle failures",
"if",
"len",
"(",
"fails",
")",
">",
"0",
":",
"wstr",
"=",
"(",
"'\\n\\n'",
"+",
"'*'",
"*",
"41",
"+",
"'\\n'",
"+",
"' WARNING\\n'",
"+",
"'*'",
"*",
"41",
"+",
"'\\n'",
"+",
"'Autorange failed for some samples:\\n'",
")",
"kwidth",
"=",
"max",
"(",
"[",
"len",
"(",
"k",
")",
"for",
"k",
"in",
"fails",
".",
"keys",
"(",
")",
"]",
")",
"+",
"1",
"fstr",
"=",
"' {:'",
"+",
"'{}'",
".",
"format",
"(",
"kwidth",
")",
"+",
"'s}: '",
"for",
"k",
"in",
"sorted",
"(",
"fails",
".",
"keys",
"(",
")",
")",
":",
"wstr",
"+=",
"fstr",
".",
"format",
"(",
"k",
")",
"+",
"', '",
".",
"join",
"(",
"[",
"'{:.1f}'",
".",
"format",
"(",
"f",
")",
"for",
"f",
"in",
"fails",
"[",
"k",
"]",
"[",
"-",
"1",
"]",
"]",
")",
"+",
"'\\n'",
"wstr",
"+=",
"(",
"'\\n*** THIS IS NOT NECESSARILY A PROBLEM ***\\n'",
"+",
"'But please check the plots below to make\\n'",
"+",
"'sure they look OK. Failures are marked by\\n'",
"+",
"'dashed vertical red lines.\\n\\n'",
"+",
"'To examine an autorange failure in more\\n'",
"+",
"'detail, use the `autorange_plot` method\\n'",
"+",
"'of the failing data object, e.g.:\\n'",
"+",
"\"dat.data['Sample'].autorange_plot(params)\\n\"",
"+",
"'*'",
"*",
"41",
"+",
"'\\n'",
")",
"warnings",
".",
"warn",
"(",
"wstr",
")",
"self",
".",
"stages_complete",
".",
"update",
"(",
"[",
"'autorange'",
"]",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.find_expcoef
|
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
|
latools/latools.py
|
def find_expcoef(self, nsd_below=0., plot=False,
trimlim=None, autorange_kwargs={}):
"""
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
"""
print('Calculating exponential decay coefficient\nfrom SRM washouts...')
def findtrim(tr, lim=None):
trr = np.roll(tr, -1)
trr[-1] = 0
if lim is None:
lim = 0.5 * np.nanmax(tr - trr)
ind = (tr - trr) >= lim
return np.arange(len(ind))[ind ^ np.roll(ind, -1)][0]
if not hasattr(self.stds[0], 'trnrng'):
for s in self.stds:
s.autorange(**autorange_kwargs, ploterrs=False)
trans = []
times = []
for v in self.stds:
for trnrng in v.trnrng[-1::-2]:
tr = minmax_scale(v.data['total_counts'][(v.Time > trnrng[0]) & (v.Time < trnrng[1])])
sm = np.apply_along_axis(np.nanmean, 1,
rolling_window(tr, 3, pad=0))
sm[0] = sm[1]
trim = findtrim(sm, trimlim) + 2
trans.append(minmax_scale(tr[trim:]))
times.append(np.arange(tr[trim:].size) *
np.diff(v.Time[1:3]))
times = np.concatenate(times)
times = np.round(times, 2)
trans = np.concatenate(trans)
ti = []
tr = []
for t in np.unique(times):
ti.append(t)
tr.append(np.nanmin(trans[times == t]))
def expfit(x, e):
"""
Exponential decay function.
"""
return np.exp(e * x)
ep, ecov = curve_fit(expfit, ti, tr, p0=(-1.))
eeR2 = R2calc(trans, expfit(times, ep))
if plot:
fig, ax = plt.subplots(1, 1, figsize=[6, 4])
ax.scatter(times, trans, alpha=0.2, color='k', marker='x', zorder=-2)
ax.scatter(ti, tr, alpha=1, color='k', marker='o')
fitx = np.linspace(0, max(ti))
ax.plot(fitx, expfit(fitx, ep), color='r', label='Fit')
ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**.5, ),
color='b', label='Used')
ax.text(0.95, 0.75,
('y = $e^{%.2f \pm %.2f * x}$\n$R^2$= %.2f \nCoefficient: '
'%.2f') % (ep,
np.diag(ecov)**.5,
eeR2,
ep - nsd_below * np.diag(ecov)**.5),
transform=ax.transAxes, ha='right', va='top', size=12)
ax.set_xlim(0, ax.get_xlim()[-1])
ax.set_xlabel('Time (s)')
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel('Proportion of Signal')
plt.legend()
if isinstance(plot, str):
fig.savefig(plot)
self.expdecay_coef = ep - nsd_below * np.diag(ecov)**.5
print(' {:0.2f}'.format(self.expdecay_coef[0]))
return
|
def find_expcoef(self, nsd_below=0., plot=False,
trimlim=None, autorange_kwargs={}):
"""
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
"""
print('Calculating exponential decay coefficient\nfrom SRM washouts...')
def findtrim(tr, lim=None):
trr = np.roll(tr, -1)
trr[-1] = 0
if lim is None:
lim = 0.5 * np.nanmax(tr - trr)
ind = (tr - trr) >= lim
return np.arange(len(ind))[ind ^ np.roll(ind, -1)][0]
if not hasattr(self.stds[0], 'trnrng'):
for s in self.stds:
s.autorange(**autorange_kwargs, ploterrs=False)
trans = []
times = []
for v in self.stds:
for trnrng in v.trnrng[-1::-2]:
tr = minmax_scale(v.data['total_counts'][(v.Time > trnrng[0]) & (v.Time < trnrng[1])])
sm = np.apply_along_axis(np.nanmean, 1,
rolling_window(tr, 3, pad=0))
sm[0] = sm[1]
trim = findtrim(sm, trimlim) + 2
trans.append(minmax_scale(tr[trim:]))
times.append(np.arange(tr[trim:].size) *
np.diff(v.Time[1:3]))
times = np.concatenate(times)
times = np.round(times, 2)
trans = np.concatenate(trans)
ti = []
tr = []
for t in np.unique(times):
ti.append(t)
tr.append(np.nanmin(trans[times == t]))
def expfit(x, e):
"""
Exponential decay function.
"""
return np.exp(e * x)
ep, ecov = curve_fit(expfit, ti, tr, p0=(-1.))
eeR2 = R2calc(trans, expfit(times, ep))
if plot:
fig, ax = plt.subplots(1, 1, figsize=[6, 4])
ax.scatter(times, trans, alpha=0.2, color='k', marker='x', zorder=-2)
ax.scatter(ti, tr, alpha=1, color='k', marker='o')
fitx = np.linspace(0, max(ti))
ax.plot(fitx, expfit(fitx, ep), color='r', label='Fit')
ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**.5, ),
color='b', label='Used')
ax.text(0.95, 0.75,
('y = $e^{%.2f \pm %.2f * x}$\n$R^2$= %.2f \nCoefficient: '
'%.2f') % (ep,
np.diag(ecov)**.5,
eeR2,
ep - nsd_below * np.diag(ecov)**.5),
transform=ax.transAxes, ha='right', va='top', size=12)
ax.set_xlim(0, ax.get_xlim()[-1])
ax.set_xlabel('Time (s)')
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel('Proportion of Signal')
plt.legend()
if isinstance(plot, str):
fig.savefig(plot)
self.expdecay_coef = ep - nsd_below * np.diag(ecov)**.5
print(' {:0.2f}'.format(self.expdecay_coef[0]))
return
|
[
"Determines",
"exponential",
"decay",
"coefficient",
"for",
"despike",
"filter",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L527-L633
|
[
"def",
"find_expcoef",
"(",
"self",
",",
"nsd_below",
"=",
"0.",
",",
"plot",
"=",
"False",
",",
"trimlim",
"=",
"None",
",",
"autorange_kwargs",
"=",
"{",
"}",
")",
":",
"print",
"(",
"'Calculating exponential decay coefficient\\nfrom SRM washouts...'",
")",
"def",
"findtrim",
"(",
"tr",
",",
"lim",
"=",
"None",
")",
":",
"trr",
"=",
"np",
".",
"roll",
"(",
"tr",
",",
"-",
"1",
")",
"trr",
"[",
"-",
"1",
"]",
"=",
"0",
"if",
"lim",
"is",
"None",
":",
"lim",
"=",
"0.5",
"*",
"np",
".",
"nanmax",
"(",
"tr",
"-",
"trr",
")",
"ind",
"=",
"(",
"tr",
"-",
"trr",
")",
">=",
"lim",
"return",
"np",
".",
"arange",
"(",
"len",
"(",
"ind",
")",
")",
"[",
"ind",
"^",
"np",
".",
"roll",
"(",
"ind",
",",
"-",
"1",
")",
"]",
"[",
"0",
"]",
"if",
"not",
"hasattr",
"(",
"self",
".",
"stds",
"[",
"0",
"]",
",",
"'trnrng'",
")",
":",
"for",
"s",
"in",
"self",
".",
"stds",
":",
"s",
".",
"autorange",
"(",
"*",
"*",
"autorange_kwargs",
",",
"ploterrs",
"=",
"False",
")",
"trans",
"=",
"[",
"]",
"times",
"=",
"[",
"]",
"for",
"v",
"in",
"self",
".",
"stds",
":",
"for",
"trnrng",
"in",
"v",
".",
"trnrng",
"[",
"-",
"1",
":",
":",
"-",
"2",
"]",
":",
"tr",
"=",
"minmax_scale",
"(",
"v",
".",
"data",
"[",
"'total_counts'",
"]",
"[",
"(",
"v",
".",
"Time",
">",
"trnrng",
"[",
"0",
"]",
")",
"&",
"(",
"v",
".",
"Time",
"<",
"trnrng",
"[",
"1",
"]",
")",
"]",
")",
"sm",
"=",
"np",
".",
"apply_along_axis",
"(",
"np",
".",
"nanmean",
",",
"1",
",",
"rolling_window",
"(",
"tr",
",",
"3",
",",
"pad",
"=",
"0",
")",
")",
"sm",
"[",
"0",
"]",
"=",
"sm",
"[",
"1",
"]",
"trim",
"=",
"findtrim",
"(",
"sm",
",",
"trimlim",
")",
"+",
"2",
"trans",
".",
"append",
"(",
"minmax_scale",
"(",
"tr",
"[",
"trim",
":",
"]",
")",
")",
"times",
".",
"append",
"(",
"np",
".",
"arange",
"(",
"tr",
"[",
"trim",
":",
"]",
".",
"size",
")",
"*",
"np",
".",
"diff",
"(",
"v",
".",
"Time",
"[",
"1",
":",
"3",
"]",
")",
")",
"times",
"=",
"np",
".",
"concatenate",
"(",
"times",
")",
"times",
"=",
"np",
".",
"round",
"(",
"times",
",",
"2",
")",
"trans",
"=",
"np",
".",
"concatenate",
"(",
"trans",
")",
"ti",
"=",
"[",
"]",
"tr",
"=",
"[",
"]",
"for",
"t",
"in",
"np",
".",
"unique",
"(",
"times",
")",
":",
"ti",
".",
"append",
"(",
"t",
")",
"tr",
".",
"append",
"(",
"np",
".",
"nanmin",
"(",
"trans",
"[",
"times",
"==",
"t",
"]",
")",
")",
"def",
"expfit",
"(",
"x",
",",
"e",
")",
":",
"\"\"\"\n Exponential decay function.\n \"\"\"",
"return",
"np",
".",
"exp",
"(",
"e",
"*",
"x",
")",
"ep",
",",
"ecov",
"=",
"curve_fit",
"(",
"expfit",
",",
"ti",
",",
"tr",
",",
"p0",
"=",
"(",
"-",
"1.",
")",
")",
"eeR2",
"=",
"R2calc",
"(",
"trans",
",",
"expfit",
"(",
"times",
",",
"ep",
")",
")",
"if",
"plot",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
",",
"figsize",
"=",
"[",
"6",
",",
"4",
"]",
")",
"ax",
".",
"scatter",
"(",
"times",
",",
"trans",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"'k'",
",",
"marker",
"=",
"'x'",
",",
"zorder",
"=",
"-",
"2",
")",
"ax",
".",
"scatter",
"(",
"ti",
",",
"tr",
",",
"alpha",
"=",
"1",
",",
"color",
"=",
"'k'",
",",
"marker",
"=",
"'o'",
")",
"fitx",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"max",
"(",
"ti",
")",
")",
"ax",
".",
"plot",
"(",
"fitx",
",",
"expfit",
"(",
"fitx",
",",
"ep",
")",
",",
"color",
"=",
"'r'",
",",
"label",
"=",
"'Fit'",
")",
"ax",
".",
"plot",
"(",
"fitx",
",",
"expfit",
"(",
"fitx",
",",
"ep",
"-",
"nsd_below",
"*",
"np",
".",
"diag",
"(",
"ecov",
")",
"**",
".5",
",",
")",
",",
"color",
"=",
"'b'",
",",
"label",
"=",
"'Used'",
")",
"ax",
".",
"text",
"(",
"0.95",
",",
"0.75",
",",
"(",
"'y = $e^{%.2f \\pm %.2f * x}$\\n$R^2$= %.2f \\nCoefficient: '",
"'%.2f'",
")",
"%",
"(",
"ep",
",",
"np",
".",
"diag",
"(",
"ecov",
")",
"**",
".5",
",",
"eeR2",
",",
"ep",
"-",
"nsd_below",
"*",
"np",
".",
"diag",
"(",
"ecov",
")",
"**",
".5",
")",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
",",
"size",
"=",
"12",
")",
"ax",
".",
"set_xlim",
"(",
"0",
",",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"-",
"1",
"]",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"0.05",
",",
"1.05",
")",
"ax",
".",
"set_ylabel",
"(",
"'Proportion of Signal'",
")",
"plt",
".",
"legend",
"(",
")",
"if",
"isinstance",
"(",
"plot",
",",
"str",
")",
":",
"fig",
".",
"savefig",
"(",
"plot",
")",
"self",
".",
"expdecay_coef",
"=",
"ep",
"-",
"nsd_below",
"*",
"np",
".",
"diag",
"(",
"ecov",
")",
"**",
".5",
"print",
"(",
"' {:0.2f}'",
".",
"format",
"(",
"self",
".",
"expdecay_coef",
"[",
"0",
"]",
")",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.despike
|
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentplot : bool
Whether or not to show a plot of the automatically determined
exponential decay exponent.
maxiter : int
The max number of times that the fitler is applied.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'rawdata'. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
None
|
latools/latools.py
|
def despike(self, expdecay_despiker=False, exponent=None,
noise_despiker=True, win=3, nlim=12., exponentplot=False,
maxiter=4, autorange_kwargs={}, focus_stage='rawdata'):
"""
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentplot : bool
Whether or not to show a plot of the automatically determined
exponential decay exponent.
maxiter : int
The max number of times that the fitler is applied.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'rawdata'. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
None
"""
if focus_stage != self.focus_stage:
self.set_focus(focus_stage)
if expdecay_despiker and exponent is None:
if not hasattr(self, 'expdecay_coef'):
self.find_expcoef(plot=exponentplot,
autorange_kwargs=autorange_kwargs)
exponent = self.expdecay_coef
time.sleep(0.1)
with self.pbar.set(total=len(self.data), desc='Despiking') as prog:
for d in self.data.values():
d.despike(expdecay_despiker, exponent,
noise_despiker, win, nlim, maxiter)
prog.update()
self.stages_complete.update(['despiked'])
self.focus_stage = 'despiked'
return
|
def despike(self, expdecay_despiker=False, exponent=None,
noise_despiker=True, win=3, nlim=12., exponentplot=False,
maxiter=4, autorange_kwargs={}, focus_stage='rawdata'):
"""
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentplot : bool
Whether or not to show a plot of the automatically determined
exponential decay exponent.
maxiter : int
The max number of times that the fitler is applied.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'rawdata'. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
None
"""
if focus_stage != self.focus_stage:
self.set_focus(focus_stage)
if expdecay_despiker and exponent is None:
if not hasattr(self, 'expdecay_coef'):
self.find_expcoef(plot=exponentplot,
autorange_kwargs=autorange_kwargs)
exponent = self.expdecay_coef
time.sleep(0.1)
with self.pbar.set(total=len(self.data), desc='Despiking') as prog:
for d in self.data.values():
d.despike(expdecay_despiker, exponent,
noise_despiker, win, nlim, maxiter)
prog.update()
self.stages_complete.update(['despiked'])
self.focus_stage = 'despiked'
return
|
[
"Despikes",
"data",
"with",
"exponential",
"decay",
"and",
"noise",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L636-L700
|
[
"def",
"despike",
"(",
"self",
",",
"expdecay_despiker",
"=",
"False",
",",
"exponent",
"=",
"None",
",",
"noise_despiker",
"=",
"True",
",",
"win",
"=",
"3",
",",
"nlim",
"=",
"12.",
",",
"exponentplot",
"=",
"False",
",",
"maxiter",
"=",
"4",
",",
"autorange_kwargs",
"=",
"{",
"}",
",",
"focus_stage",
"=",
"'rawdata'",
")",
":",
"if",
"focus_stage",
"!=",
"self",
".",
"focus_stage",
":",
"self",
".",
"set_focus",
"(",
"focus_stage",
")",
"if",
"expdecay_despiker",
"and",
"exponent",
"is",
"None",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'expdecay_coef'",
")",
":",
"self",
".",
"find_expcoef",
"(",
"plot",
"=",
"exponentplot",
",",
"autorange_kwargs",
"=",
"autorange_kwargs",
")",
"exponent",
"=",
"self",
".",
"expdecay_coef",
"time",
".",
"sleep",
"(",
"0.1",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'Despiking'",
")",
"as",
"prog",
":",
"for",
"d",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"d",
".",
"despike",
"(",
"expdecay_despiker",
",",
"exponent",
",",
"noise_despiker",
",",
"win",
",",
"nlim",
",",
"maxiter",
")",
"prog",
".",
"update",
"(",
")",
"self",
".",
"stages_complete",
".",
"update",
"(",
"[",
"'despiked'",
"]",
")",
"self",
".",
"focus_stage",
"=",
"'despiked'",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.get_background
|
Extract all background data from all samples on universal time scale.
Used by both 'polynomial' and 'weightedmean' methods.
Parameters
----------
n_min : int
The minimum number of points a background region must
have to be included in calculation.
n_max : int
The maximum number of points a background region must
have to be included in calculation.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
pandas.DataFrame object containing background data.
|
latools/latools.py
|
def get_background(self, n_min=10, n_max=None, focus_stage='despiked', bkg_filter=False, f_win=5, f_n_lim=3):
"""
Extract all background data from all samples on universal time scale.
Used by both 'polynomial' and 'weightedmean' methods.
Parameters
----------
n_min : int
The minimum number of points a background region must
have to be included in calculation.
n_max : int
The maximum number of points a background region must
have to be included in calculation.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
pandas.DataFrame object containing background data.
"""
allbkgs = {'uTime': [],
'ns': []}
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
for a in self.analytes:
allbkgs[a] = []
n0 = 0
for s in self.data.values():
if sum(s.bkg) > 0:
allbkgs['uTime'].append(s.uTime[s.bkg])
allbkgs['ns'].append(enumerate_bool(s.bkg, n0)[s.bkg])
n0 = allbkgs['ns'][-1][-1]
for a in self.analytes:
allbkgs[a].append(s.data[focus_stage][a][s.bkg])
allbkgs.update((k, np.concatenate(v)) for k, v in allbkgs.items())
bkgs = pd.DataFrame(allbkgs) # using pandas here because it's much more efficient than loops.
self.bkg = Bunch()
# extract background data from whole dataset
if n_max is None:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: len(x) > n_min)
else:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: (len(x) > n_min) & (len(x) < n_max))
# calculate per - background region stats
self.bkg['summary'] = self.bkg['raw'].groupby('ns').aggregate([np.mean, np.std, stderr])
# sort summary by uTime
self.bkg['summary'].sort_values(('uTime', 'mean'), inplace=True)
# self.bkg['summary'].index = np.arange(self.bkg['summary'].shape[0])
# self.bkg['summary'].index.name = 'ns'
if bkg_filter:
# calculate rolling mean and std from summary
t = self.bkg['summary'].loc[:, idx[:, 'mean']]
r = t.rolling(f_win).aggregate([np.nanmean, np.nanstd])
# calculate upper threshold
upper = r.loc[:, idx[:, :, 'nanmean']] + f_n_lim * r.loc[:, idx[:, :, 'nanstd']].values
# calculate which are over upper threshold
over = r.loc[:, idx[:, :, 'nanmean']] > np.roll(upper.values, 1, 0)
# identify them
ns_drop = over.loc[over.apply(any, 1), :].index.values
# drop them from summary
self.bkg['summary'].drop(ns_drop, inplace=True)
# remove them from raw
ind = np.ones(self.bkg['raw'].shape[0], dtype=bool)
for ns in ns_drop:
ind = ind & (self.bkg['raw'].loc[:, 'ns'] != ns)
self.bkg['raw'] = self.bkg['raw'].loc[ind, :]
return
|
def get_background(self, n_min=10, n_max=None, focus_stage='despiked', bkg_filter=False, f_win=5, f_n_lim=3):
"""
Extract all background data from all samples on universal time scale.
Used by both 'polynomial' and 'weightedmean' methods.
Parameters
----------
n_min : int
The minimum number of points a background region must
have to be included in calculation.
n_max : int
The maximum number of points a background region must
have to be included in calculation.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
pandas.DataFrame object containing background data.
"""
allbkgs = {'uTime': [],
'ns': []}
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
for a in self.analytes:
allbkgs[a] = []
n0 = 0
for s in self.data.values():
if sum(s.bkg) > 0:
allbkgs['uTime'].append(s.uTime[s.bkg])
allbkgs['ns'].append(enumerate_bool(s.bkg, n0)[s.bkg])
n0 = allbkgs['ns'][-1][-1]
for a in self.analytes:
allbkgs[a].append(s.data[focus_stage][a][s.bkg])
allbkgs.update((k, np.concatenate(v)) for k, v in allbkgs.items())
bkgs = pd.DataFrame(allbkgs) # using pandas here because it's much more efficient than loops.
self.bkg = Bunch()
# extract background data from whole dataset
if n_max is None:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: len(x) > n_min)
else:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: (len(x) > n_min) & (len(x) < n_max))
# calculate per - background region stats
self.bkg['summary'] = self.bkg['raw'].groupby('ns').aggregate([np.mean, np.std, stderr])
# sort summary by uTime
self.bkg['summary'].sort_values(('uTime', 'mean'), inplace=True)
# self.bkg['summary'].index = np.arange(self.bkg['summary'].shape[0])
# self.bkg['summary'].index.name = 'ns'
if bkg_filter:
# calculate rolling mean and std from summary
t = self.bkg['summary'].loc[:, idx[:, 'mean']]
r = t.rolling(f_win).aggregate([np.nanmean, np.nanstd])
# calculate upper threshold
upper = r.loc[:, idx[:, :, 'nanmean']] + f_n_lim * r.loc[:, idx[:, :, 'nanstd']].values
# calculate which are over upper threshold
over = r.loc[:, idx[:, :, 'nanmean']] > np.roll(upper.values, 1, 0)
# identify them
ns_drop = over.loc[over.apply(any, 1), :].index.values
# drop them from summary
self.bkg['summary'].drop(ns_drop, inplace=True)
# remove them from raw
ind = np.ones(self.bkg['raw'].shape[0], dtype=bool)
for ns in ns_drop:
ind = ind & (self.bkg['raw'].loc[:, 'ns'] != ns)
self.bkg['raw'] = self.bkg['raw'].loc[ind, :]
return
|
[
"Extract",
"all",
"background",
"data",
"from",
"all",
"samples",
"on",
"universal",
"time",
"scale",
".",
"Used",
"by",
"both",
"polynomial",
"and",
"weightedmean",
"methods",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L703-L795
|
[
"def",
"get_background",
"(",
"self",
",",
"n_min",
"=",
"10",
",",
"n_max",
"=",
"None",
",",
"focus_stage",
"=",
"'despiked'",
",",
"bkg_filter",
"=",
"False",
",",
"f_win",
"=",
"5",
",",
"f_n_lim",
"=",
"3",
")",
":",
"allbkgs",
"=",
"{",
"'uTime'",
":",
"[",
"]",
",",
"'ns'",
":",
"[",
"]",
"}",
"if",
"focus_stage",
"==",
"'despiked'",
":",
"if",
"'despiked'",
"not",
"in",
"self",
".",
"stages_complete",
":",
"focus_stage",
"=",
"'rawdata'",
"for",
"a",
"in",
"self",
".",
"analytes",
":",
"allbkgs",
"[",
"a",
"]",
"=",
"[",
"]",
"n0",
"=",
"0",
"for",
"s",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"if",
"sum",
"(",
"s",
".",
"bkg",
")",
">",
"0",
":",
"allbkgs",
"[",
"'uTime'",
"]",
".",
"append",
"(",
"s",
".",
"uTime",
"[",
"s",
".",
"bkg",
"]",
")",
"allbkgs",
"[",
"'ns'",
"]",
".",
"append",
"(",
"enumerate_bool",
"(",
"s",
".",
"bkg",
",",
"n0",
")",
"[",
"s",
".",
"bkg",
"]",
")",
"n0",
"=",
"allbkgs",
"[",
"'ns'",
"]",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"for",
"a",
"in",
"self",
".",
"analytes",
":",
"allbkgs",
"[",
"a",
"]",
".",
"append",
"(",
"s",
".",
"data",
"[",
"focus_stage",
"]",
"[",
"a",
"]",
"[",
"s",
".",
"bkg",
"]",
")",
"allbkgs",
".",
"update",
"(",
"(",
"k",
",",
"np",
".",
"concatenate",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"allbkgs",
".",
"items",
"(",
")",
")",
"bkgs",
"=",
"pd",
".",
"DataFrame",
"(",
"allbkgs",
")",
"# using pandas here because it's much more efficient than loops.",
"self",
".",
"bkg",
"=",
"Bunch",
"(",
")",
"# extract background data from whole dataset",
"if",
"n_max",
"is",
"None",
":",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
"=",
"bkgs",
".",
"groupby",
"(",
"'ns'",
")",
".",
"filter",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
">",
"n_min",
")",
"else",
":",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
"=",
"bkgs",
".",
"groupby",
"(",
"'ns'",
")",
".",
"filter",
"(",
"lambda",
"x",
":",
"(",
"len",
"(",
"x",
")",
">",
"n_min",
")",
"&",
"(",
"len",
"(",
"x",
")",
"<",
"n_max",
")",
")",
"# calculate per - background region stats",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
"=",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"groupby",
"(",
"'ns'",
")",
".",
"aggregate",
"(",
"[",
"np",
".",
"mean",
",",
"np",
".",
"std",
",",
"stderr",
"]",
")",
"# sort summary by uTime",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
".",
"sort_values",
"(",
"(",
"'uTime'",
",",
"'mean'",
")",
",",
"inplace",
"=",
"True",
")",
"# self.bkg['summary'].index = np.arange(self.bkg['summary'].shape[0])",
"# self.bkg['summary'].index.name = 'ns'",
"if",
"bkg_filter",
":",
"# calculate rolling mean and std from summary",
"t",
"=",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
".",
"loc",
"[",
":",
",",
"idx",
"[",
":",
",",
"'mean'",
"]",
"]",
"r",
"=",
"t",
".",
"rolling",
"(",
"f_win",
")",
".",
"aggregate",
"(",
"[",
"np",
".",
"nanmean",
",",
"np",
".",
"nanstd",
"]",
")",
"# calculate upper threshold",
"upper",
"=",
"r",
".",
"loc",
"[",
":",
",",
"idx",
"[",
":",
",",
":",
",",
"'nanmean'",
"]",
"]",
"+",
"f_n_lim",
"*",
"r",
".",
"loc",
"[",
":",
",",
"idx",
"[",
":",
",",
":",
",",
"'nanstd'",
"]",
"]",
".",
"values",
"# calculate which are over upper threshold",
"over",
"=",
"r",
".",
"loc",
"[",
":",
",",
"idx",
"[",
":",
",",
":",
",",
"'nanmean'",
"]",
"]",
">",
"np",
".",
"roll",
"(",
"upper",
".",
"values",
",",
"1",
",",
"0",
")",
"# identify them",
"ns_drop",
"=",
"over",
".",
"loc",
"[",
"over",
".",
"apply",
"(",
"any",
",",
"1",
")",
",",
":",
"]",
".",
"index",
".",
"values",
"# drop them from summary",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
".",
"drop",
"(",
"ns_drop",
",",
"inplace",
"=",
"True",
")",
"# remove them from raw",
"ind",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"for",
"ns",
"in",
"ns_drop",
":",
"ind",
"=",
"ind",
"&",
"(",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"loc",
"[",
":",
",",
"'ns'",
"]",
"!=",
"ns",
")",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
"=",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"loc",
"[",
"ind",
",",
":",
"]",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.bkg_calc_weightedmean
|
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
|
latools/latools.py
|
def bkg_calc_weightedmean(self, analytes=None, weight_fwhm=None,
n_min=20, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
if weight_fwhm is None:
weight_fwhm = 600 # 10 minute default window
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
# Gaussian - weighted average
if 'calc' not in self.bkg.keys():
# create time points to calculate background
if cstep is None:
cstep = weight_fwhm / 20
elif cstep > weight_fwhm:
warnings.warn("\ncstep should be less than weight_fwhm. Your backgrounds\n" +
"might not behave as expected.\n")
bkg_t = np.linspace(0,
self.max_time,
self.max_time // cstep)
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
# TODO : calculation then dict assignment is clumsy...
mean, std, stderr = gauss_weighted_stats(self.bkg['raw'].uTime,
self.bkg['raw'].loc[:, analytes].values,
self.bkg['calc']['uTime'],
fwhm=weight_fwhm)
for i, a in enumerate(analytes):
self.bkg['calc'][a] = {'mean': mean[i],
'std': std[i],
'stderr': stderr[i]}
|
def bkg_calc_weightedmean(self, analytes=None, weight_fwhm=None,
n_min=20, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
if weight_fwhm is None:
weight_fwhm = 600 # 10 minute default window
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
# Gaussian - weighted average
if 'calc' not in self.bkg.keys():
# create time points to calculate background
if cstep is None:
cstep = weight_fwhm / 20
elif cstep > weight_fwhm:
warnings.warn("\ncstep should be less than weight_fwhm. Your backgrounds\n" +
"might not behave as expected.\n")
bkg_t = np.linspace(0,
self.max_time,
self.max_time // cstep)
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
# TODO : calculation then dict assignment is clumsy...
mean, std, stderr = gauss_weighted_stats(self.bkg['raw'].uTime,
self.bkg['raw'].loc[:, analytes].values,
self.bkg['calc']['uTime'],
fwhm=weight_fwhm)
for i, a in enumerate(analytes):
self.bkg['calc'][a] = {'mean': mean[i],
'std': std[i],
'stderr': stderr[i]}
|
[
"Background",
"calculation",
"using",
"a",
"gaussian",
"weighted",
"mean",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L798-L875
|
[
"def",
"bkg_calc_weightedmean",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"weight_fwhm",
"=",
"None",
",",
"n_min",
"=",
"20",
",",
"n_max",
"=",
"None",
",",
"cstep",
"=",
"None",
",",
"bkg_filter",
"=",
"False",
",",
"f_win",
"=",
"7",
",",
"f_n_lim",
"=",
"3",
",",
"focus_stage",
"=",
"'despiked'",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"self",
".",
"bkg",
"=",
"Bunch",
"(",
")",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"weight_fwhm",
"is",
"None",
":",
"weight_fwhm",
"=",
"600",
"# 10 minute default window",
"self",
".",
"get_background",
"(",
"n_min",
"=",
"n_min",
",",
"n_max",
"=",
"n_max",
",",
"bkg_filter",
"=",
"bkg_filter",
",",
"f_win",
"=",
"f_win",
",",
"f_n_lim",
"=",
"f_n_lim",
",",
"focus_stage",
"=",
"focus_stage",
")",
"# Gaussian - weighted average",
"if",
"'calc'",
"not",
"in",
"self",
".",
"bkg",
".",
"keys",
"(",
")",
":",
"# create time points to calculate background",
"if",
"cstep",
"is",
"None",
":",
"cstep",
"=",
"weight_fwhm",
"/",
"20",
"elif",
"cstep",
">",
"weight_fwhm",
":",
"warnings",
".",
"warn",
"(",
"\"\\ncstep should be less than weight_fwhm. Your backgrounds\\n\"",
"+",
"\"might not behave as expected.\\n\"",
")",
"bkg_t",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"self",
".",
"max_time",
",",
"self",
".",
"max_time",
"//",
"cstep",
")",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"=",
"Bunch",
"(",
")",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"'uTime'",
"]",
"=",
"bkg_t",
"# TODO : calculation then dict assignment is clumsy...",
"mean",
",",
"std",
",",
"stderr",
"=",
"gauss_weighted_stats",
"(",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"uTime",
",",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"loc",
"[",
":",
",",
"analytes",
"]",
".",
"values",
",",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"'uTime'",
"]",
",",
"fwhm",
"=",
"weight_fwhm",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"analytes",
")",
":",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"=",
"{",
"'mean'",
":",
"mean",
"[",
"i",
"]",
",",
"'std'",
":",
"std",
"[",
"i",
"]",
",",
"'stderr'",
":",
"stderr",
"[",
"i",
"]",
"}"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.bkg_calc_interp1d
|
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
|
latools/latools.py
|
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
def pad(a, lo=None, hi=None):
if lo is None:
lo = [a[0]]
if hi is None:
hi = [a[-1]]
return np.concatenate((lo, a, hi))
if 'calc' not in self.bkg.keys():
# create time points to calculate background
# if cstep is None:
# cstep = self.bkg['raw']['uTime'].ptp() / 100
# bkg_t = np.arange(self.bkg['summary']['uTime']['mean'].min(),
# self.bkg['summary']['uTime']['mean'].max(),
# cstep)
bkg_t = pad(self.bkg['summary'].loc[:, ('uTime', 'mean')], [0], [self.max_time])
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
d = self.bkg['summary']
with self.pbar.set(total=len(analytes), desc='Calculating Analyte Backgrounds') as prog:
for a in analytes:
self.bkg['calc'][a] = {'mean': pad(d.loc[:, (a, 'mean')].values),
'std': pad(d.loc[:, (a, 'std')].values),
'stderr': pad(d.loc[:, (a, 'stderr')].values)}
prog.update()
self.bkg['calc']
return
|
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
def pad(a, lo=None, hi=None):
if lo is None:
lo = [a[0]]
if hi is None:
hi = [a[-1]]
return np.concatenate((lo, a, hi))
if 'calc' not in self.bkg.keys():
# create time points to calculate background
# if cstep is None:
# cstep = self.bkg['raw']['uTime'].ptp() / 100
# bkg_t = np.arange(self.bkg['summary']['uTime']['mean'].min(),
# self.bkg['summary']['uTime']['mean'].max(),
# cstep)
bkg_t = pad(self.bkg['summary'].loc[:, ('uTime', 'mean')], [0], [self.max_time])
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
d = self.bkg['summary']
with self.pbar.set(total=len(analytes), desc='Calculating Analyte Backgrounds') as prog:
for a in analytes:
self.bkg['calc'][a] = {'mean': pad(d.loc[:, (a, 'mean')].values),
'std': pad(d.loc[:, (a, 'std')].values),
'stderr': pad(d.loc[:, (a, 'stderr')].values)}
prog.update()
self.bkg['calc']
return
|
[
"Background",
"calculation",
"using",
"a",
"1D",
"interpolation",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L878-L961
|
[
"def",
"bkg_calc_interp1d",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"kind",
"=",
"1",
",",
"n_min",
"=",
"10",
",",
"n_max",
"=",
"None",
",",
"cstep",
"=",
"None",
",",
"bkg_filter",
"=",
"False",
",",
"f_win",
"=",
"7",
",",
"f_n_lim",
"=",
"3",
",",
"focus_stage",
"=",
"'despiked'",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"self",
".",
"bkg",
"=",
"Bunch",
"(",
")",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"get_background",
"(",
"n_min",
"=",
"n_min",
",",
"n_max",
"=",
"n_max",
",",
"bkg_filter",
"=",
"bkg_filter",
",",
"f_win",
"=",
"f_win",
",",
"f_n_lim",
"=",
"f_n_lim",
",",
"focus_stage",
"=",
"focus_stage",
")",
"def",
"pad",
"(",
"a",
",",
"lo",
"=",
"None",
",",
"hi",
"=",
"None",
")",
":",
"if",
"lo",
"is",
"None",
":",
"lo",
"=",
"[",
"a",
"[",
"0",
"]",
"]",
"if",
"hi",
"is",
"None",
":",
"hi",
"=",
"[",
"a",
"[",
"-",
"1",
"]",
"]",
"return",
"np",
".",
"concatenate",
"(",
"(",
"lo",
",",
"a",
",",
"hi",
")",
")",
"if",
"'calc'",
"not",
"in",
"self",
".",
"bkg",
".",
"keys",
"(",
")",
":",
"# create time points to calculate background",
"# if cstep is None:",
"# cstep = self.bkg['raw']['uTime'].ptp() / 100",
"# bkg_t = np.arange(self.bkg['summary']['uTime']['mean'].min(),",
"# self.bkg['summary']['uTime']['mean'].max(),",
"# cstep)",
"bkg_t",
"=",
"pad",
"(",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
".",
"loc",
"[",
":",
",",
"(",
"'uTime'",
",",
"'mean'",
")",
"]",
",",
"[",
"0",
"]",
",",
"[",
"self",
".",
"max_time",
"]",
")",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"=",
"Bunch",
"(",
")",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"'uTime'",
"]",
"=",
"bkg_t",
"d",
"=",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"analytes",
")",
",",
"desc",
"=",
"'Calculating Analyte Backgrounds'",
")",
"as",
"prog",
":",
"for",
"a",
"in",
"analytes",
":",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"=",
"{",
"'mean'",
":",
"pad",
"(",
"d",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'mean'",
")",
"]",
".",
"values",
")",
",",
"'std'",
":",
"pad",
"(",
"d",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'std'",
")",
"]",
".",
"values",
")",
",",
"'stderr'",
":",
"pad",
"(",
"d",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'stderr'",
")",
"]",
".",
"values",
")",
"}",
"prog",
".",
"update",
"(",
")",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.bkg_subtract
|
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
|
latools/latools.py
|
def bkg_subtract(self, analytes=None, errtype='stderr', focus_stage='despiked'):
"""
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
# make uncertainty-aware background interpolators
bkg_interps = {}
for a in analytes:
bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
y=un.uarray(self.bkg['calc'][a]['mean'],
self.bkg['calc'][a][errtype]))
self.bkg_interps = bkg_interps
# apply background corrections
with self.pbar.set(total=len(self.data), desc='Background Subtraction') as prog:
for d in self.data.values():
# [d.bkg_subtract(a, bkg_interps[a].new(d.uTime), None, focus_stage=focus_stage) for a in analytes]
[d.bkg_subtract(a, bkg_interps[a].new(d.uTime), ~d.sig, focus_stage=focus_stage) for a in analytes]
d.setfocus('bkgsub')
prog.update()
self.stages_complete.update(['bkgsub'])
self.focus_stage = 'bkgsub'
return
|
def bkg_subtract(self, analytes=None, errtype='stderr', focus_stage='despiked'):
"""
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
# make uncertainty-aware background interpolators
bkg_interps = {}
for a in analytes:
bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
y=un.uarray(self.bkg['calc'][a]['mean'],
self.bkg['calc'][a][errtype]))
self.bkg_interps = bkg_interps
# apply background corrections
with self.pbar.set(total=len(self.data), desc='Background Subtraction') as prog:
for d in self.data.values():
# [d.bkg_subtract(a, bkg_interps[a].new(d.uTime), None, focus_stage=focus_stage) for a in analytes]
[d.bkg_subtract(a, bkg_interps[a].new(d.uTime), ~d.sig, focus_stage=focus_stage) for a in analytes]
d.setfocus('bkgsub')
prog.update()
self.stages_complete.update(['bkgsub'])
self.focus_stage = 'bkgsub'
return
|
[
"Subtract",
"calculated",
"background",
"from",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L964-L1018
|
[
"def",
"bkg_subtract",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"errtype",
"=",
"'stderr'",
",",
"focus_stage",
"=",
"'despiked'",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"focus_stage",
"==",
"'despiked'",
":",
"if",
"'despiked'",
"not",
"in",
"self",
".",
"stages_complete",
":",
"focus_stage",
"=",
"'rawdata'",
"# make uncertainty-aware background interpolators",
"bkg_interps",
"=",
"{",
"}",
"for",
"a",
"in",
"analytes",
":",
"bkg_interps",
"[",
"a",
"]",
"=",
"un_interp1d",
"(",
"x",
"=",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"'uTime'",
"]",
",",
"y",
"=",
"un",
".",
"uarray",
"(",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"'mean'",
"]",
",",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"errtype",
"]",
")",
")",
"self",
".",
"bkg_interps",
"=",
"bkg_interps",
"# apply background corrections",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'Background Subtraction'",
")",
"as",
"prog",
":",
"for",
"d",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"# [d.bkg_subtract(a, bkg_interps[a].new(d.uTime), None, focus_stage=focus_stage) for a in analytes]",
"[",
"d",
".",
"bkg_subtract",
"(",
"a",
",",
"bkg_interps",
"[",
"a",
"]",
".",
"new",
"(",
"d",
".",
"uTime",
")",
",",
"~",
"d",
".",
"sig",
",",
"focus_stage",
"=",
"focus_stage",
")",
"for",
"a",
"in",
"analytes",
"]",
"d",
".",
"setfocus",
"(",
"'bkgsub'",
")",
"prog",
".",
"update",
"(",
")",
"self",
".",
"stages_complete",
".",
"update",
"(",
"[",
"'bkgsub'",
"]",
")",
"self",
".",
"focus_stage",
"=",
"'bkgsub'",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.correct_spectral_interference
|
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub'). To undo a correction,
rerun `self.bkg_subtract()`.
Example
-------
To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da
peaks are known:
f = abundance(88Sr) / (abundance(87Sr)
counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
|
latools/latools.py
|
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub'). To undo a correction,
rerun `self.bkg_subtract()`.
Example
-------
To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da
peaks are known:
f = abundance(88Sr) / (abundance(87Sr)
counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
with self.pbar.set(total=len(self.data), desc='Interference Correction') as prog:
for d in self.data.values():
d.correct_spectral_interference(target_analyte, source_analyte, f)
prog.update()
|
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub'). To undo a correction,
rerun `self.bkg_subtract()`.
Example
-------
To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da
peaks are known:
f = abundance(88Sr) / (abundance(87Sr)
counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
with self.pbar.set(total=len(self.data), desc='Interference Correction') as prog:
for d in self.data.values():
d.correct_spectral_interference(target_analyte, source_analyte, f)
prog.update()
|
[
"Correct",
"spectral",
"interference",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1021-L1069
|
[
"def",
"correct_spectral_interference",
"(",
"self",
",",
"target_analyte",
",",
"source_analyte",
",",
"f",
")",
":",
"if",
"target_analyte",
"not",
"in",
"self",
".",
"analytes",
":",
"raise",
"ValueError",
"(",
"'target_analyte: {:} not in available analytes ({:})'",
".",
"format",
"(",
"target_analyte",
",",
"', '",
".",
"join",
"(",
"self",
".",
"analytes",
")",
")",
")",
"if",
"source_analyte",
"not",
"in",
"self",
".",
"analytes",
":",
"raise",
"ValueError",
"(",
"'source_analyte: {:} not in available analytes ({:})'",
".",
"format",
"(",
"source_analyte",
",",
"', '",
".",
"join",
"(",
"self",
".",
"analytes",
")",
")",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'Interference Correction'",
")",
"as",
"prog",
":",
"for",
"d",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"d",
".",
"correct_spectral_interference",
"(",
"target_analyte",
",",
"source_analyte",
",",
"f",
")",
"prog",
".",
"update",
"(",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.bkg_plot
|
Plot the calculated background.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot.
figsize : tuple
The (width, height) of the figure, in inches.
If None, calculated based on number of samples.
yscale : str
'log' (default) or 'linear'.
ylim : tuple
Manually specify the y scale.
err : str
What type of error to plot. Default is stderr.
save : bool
If True, figure is saved.
Returns
-------
fig, ax : matplotlib.figure, matplotlib.axes
|
latools/latools.py
|
def bkg_plot(self, analytes=None, figsize=None, yscale='log',
ylim=None, err='stderr', save=True):
"""
Plot the calculated background.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot.
figsize : tuple
The (width, height) of the figure, in inches.
If None, calculated based on number of samples.
yscale : str
'log' (default) or 'linear'.
ylim : tuple
Manually specify the y scale.
err : str
What type of error to plot. Default is stderr.
save : bool
If True, figure is saved.
Returns
-------
fig, ax : matplotlib.figure, matplotlib.axes
"""
if not hasattr(self, 'bkg'):
raise ValueError("\nPlease calculate a background before attempting to\n" +
"plot it... either:\n" +
" bkg_calc_interp1d\n" +
" bkg_calc_weightedmean\n")
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if figsize is None:
if len(self.samples) > 50:
figsize = (len(self.samples) * 0.15, 5)
else:
figsize = (7.5, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.07, .1, .84, .8])
with self.pbar.set(total=len(analytes), desc='Plotting backgrounds') as prog:
for a in analytes:
# draw data points
ax.scatter(self.bkg['raw'].uTime, self.bkg['raw'].loc[:, a],
alpha=0.5, s=3, c=self.cmaps[a],
lw=0.5)
# draw STD boxes
for i, r in self.bkg['summary'].iterrows():
x = (r.loc['uTime', 'mean'] - r.loc['uTime', 'std'] * 2,
r.loc['uTime', 'mean'] + r.loc['uTime', 'std'] * 2)
yl = [r.loc[a, 'mean'] - r.loc[a, err]] * 2
yu = [r.loc[a, 'mean'] + r.loc[a, err]] * 2
ax.fill_between(x, yl, yu, alpha=0.8, lw=0.5, color=self.cmaps[a], zorder=1)
prog.update()
if yscale == 'log':
ax.set_yscale('log')
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim() * np.array([1, 10])) # x10 to make sample names readable.
for a in analytes:
# draw confidence intervals of calculated
x = self.bkg['calc']['uTime']
y = self.bkg['calc'][a]['mean']
yl = self.bkg['calc'][a]['mean'] - self.bkg['calc'][a][err]
yu = self.bkg['calc'][a]['mean'] + self.bkg['calc'][a][err]
# trim values below zero if log scale=
if yscale == 'log':
yl[yl < ax.get_ylim()[0]] = ax.get_ylim()[0]
ax.plot(x, y,
c=self.cmaps[a], zorder=2, label=a)
ax.fill_between(x, yl, yu,
color=self.cmaps[a], alpha=0.3, zorder=-1)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Background Counts')
ax.set_title('Points = raw data; Bars = {:s}; Lines = Calculated Background; Envelope = Background {:s}'.format(err, err),
fontsize=10)
ha, la = ax.get_legend_handles_labels()
ax.legend(labels=la[:len(analytes)], handles=ha[:len(analytes)], bbox_to_anchor=(1, 1))
# scale x axis to range ± 2.5%
xlim = rangecalc(self.bkg['raw']['uTime'], 0.025)
ax.set_xlim(xlim)
# add sample labels
for s, d in self.data.items():
ax.axvline(d.uTime[0], alpha=0.2, color='k', zorder=-1)
ax.text(d.uTime[0], ax.get_ylim()[1], s, rotation=90,
va='top', ha='left', zorder=-1, fontsize=7)
if save:
fig.savefig(self.report_dir + '/background.png', dpi=200)
return fig, ax
|
def bkg_plot(self, analytes=None, figsize=None, yscale='log',
ylim=None, err='stderr', save=True):
"""
Plot the calculated background.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot.
figsize : tuple
The (width, height) of the figure, in inches.
If None, calculated based on number of samples.
yscale : str
'log' (default) or 'linear'.
ylim : tuple
Manually specify the y scale.
err : str
What type of error to plot. Default is stderr.
save : bool
If True, figure is saved.
Returns
-------
fig, ax : matplotlib.figure, matplotlib.axes
"""
if not hasattr(self, 'bkg'):
raise ValueError("\nPlease calculate a background before attempting to\n" +
"plot it... either:\n" +
" bkg_calc_interp1d\n" +
" bkg_calc_weightedmean\n")
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if figsize is None:
if len(self.samples) > 50:
figsize = (len(self.samples) * 0.15, 5)
else:
figsize = (7.5, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.07, .1, .84, .8])
with self.pbar.set(total=len(analytes), desc='Plotting backgrounds') as prog:
for a in analytes:
# draw data points
ax.scatter(self.bkg['raw'].uTime, self.bkg['raw'].loc[:, a],
alpha=0.5, s=3, c=self.cmaps[a],
lw=0.5)
# draw STD boxes
for i, r in self.bkg['summary'].iterrows():
x = (r.loc['uTime', 'mean'] - r.loc['uTime', 'std'] * 2,
r.loc['uTime', 'mean'] + r.loc['uTime', 'std'] * 2)
yl = [r.loc[a, 'mean'] - r.loc[a, err]] * 2
yu = [r.loc[a, 'mean'] + r.loc[a, err]] * 2
ax.fill_between(x, yl, yu, alpha=0.8, lw=0.5, color=self.cmaps[a], zorder=1)
prog.update()
if yscale == 'log':
ax.set_yscale('log')
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim() * np.array([1, 10])) # x10 to make sample names readable.
for a in analytes:
# draw confidence intervals of calculated
x = self.bkg['calc']['uTime']
y = self.bkg['calc'][a]['mean']
yl = self.bkg['calc'][a]['mean'] - self.bkg['calc'][a][err]
yu = self.bkg['calc'][a]['mean'] + self.bkg['calc'][a][err]
# trim values below zero if log scale=
if yscale == 'log':
yl[yl < ax.get_ylim()[0]] = ax.get_ylim()[0]
ax.plot(x, y,
c=self.cmaps[a], zorder=2, label=a)
ax.fill_between(x, yl, yu,
color=self.cmaps[a], alpha=0.3, zorder=-1)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Background Counts')
ax.set_title('Points = raw data; Bars = {:s}; Lines = Calculated Background; Envelope = Background {:s}'.format(err, err),
fontsize=10)
ha, la = ax.get_legend_handles_labels()
ax.legend(labels=la[:len(analytes)], handles=ha[:len(analytes)], bbox_to_anchor=(1, 1))
# scale x axis to range ± 2.5%
xlim = rangecalc(self.bkg['raw']['uTime'], 0.025)
ax.set_xlim(xlim)
# add sample labels
for s, d in self.data.items():
ax.axvline(d.uTime[0], alpha=0.2, color='k', zorder=-1)
ax.text(d.uTime[0], ax.get_ylim()[1], s, rotation=90,
va='top', ha='left', zorder=-1, fontsize=7)
if save:
fig.savefig(self.report_dir + '/background.png', dpi=200)
return fig, ax
|
[
"Plot",
"the",
"calculated",
"background",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1072-L1181
|
[
"def",
"bkg_plot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"yscale",
"=",
"'log'",
",",
"ylim",
"=",
"None",
",",
"err",
"=",
"'stderr'",
",",
"save",
"=",
"True",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'bkg'",
")",
":",
"raise",
"ValueError",
"(",
"\"\\nPlease calculate a background before attempting to\\n\"",
"+",
"\"plot it... either:\\n\"",
"+",
"\" bkg_calc_interp1d\\n\"",
"+",
"\" bkg_calc_weightedmean\\n\"",
")",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"figsize",
"is",
"None",
":",
"if",
"len",
"(",
"self",
".",
"samples",
")",
">",
"50",
":",
"figsize",
"=",
"(",
"len",
"(",
"self",
".",
"samples",
")",
"*",
"0.15",
",",
"5",
")",
"else",
":",
"figsize",
"=",
"(",
"7.5",
",",
"5",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".07",
",",
".1",
",",
".84",
",",
".8",
"]",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"analytes",
")",
",",
"desc",
"=",
"'Plotting backgrounds'",
")",
"as",
"prog",
":",
"for",
"a",
"in",
"analytes",
":",
"# draw data points",
"ax",
".",
"scatter",
"(",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"uTime",
",",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
".",
"loc",
"[",
":",
",",
"a",
"]",
",",
"alpha",
"=",
"0.5",
",",
"s",
"=",
"3",
",",
"c",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"lw",
"=",
"0.5",
")",
"# draw STD boxes",
"for",
"i",
",",
"r",
"in",
"self",
".",
"bkg",
"[",
"'summary'",
"]",
".",
"iterrows",
"(",
")",
":",
"x",
"=",
"(",
"r",
".",
"loc",
"[",
"'uTime'",
",",
"'mean'",
"]",
"-",
"r",
".",
"loc",
"[",
"'uTime'",
",",
"'std'",
"]",
"*",
"2",
",",
"r",
".",
"loc",
"[",
"'uTime'",
",",
"'mean'",
"]",
"+",
"r",
".",
"loc",
"[",
"'uTime'",
",",
"'std'",
"]",
"*",
"2",
")",
"yl",
"=",
"[",
"r",
".",
"loc",
"[",
"a",
",",
"'mean'",
"]",
"-",
"r",
".",
"loc",
"[",
"a",
",",
"err",
"]",
"]",
"*",
"2",
"yu",
"=",
"[",
"r",
".",
"loc",
"[",
"a",
",",
"'mean'",
"]",
"+",
"r",
".",
"loc",
"[",
"a",
",",
"err",
"]",
"]",
"*",
"2",
"ax",
".",
"fill_between",
"(",
"x",
",",
"yl",
",",
"yu",
",",
"alpha",
"=",
"0.8",
",",
"lw",
"=",
"0.5",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"zorder",
"=",
"1",
")",
"prog",
".",
"update",
"(",
")",
"if",
"yscale",
"==",
"'log'",
":",
"ax",
".",
"set_yscale",
"(",
"'log'",
")",
"if",
"ylim",
"is",
"not",
"None",
":",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"else",
":",
"ax",
".",
"set_ylim",
"(",
"ax",
".",
"get_ylim",
"(",
")",
"*",
"np",
".",
"array",
"(",
"[",
"1",
",",
"10",
"]",
")",
")",
"# x10 to make sample names readable.",
"for",
"a",
"in",
"analytes",
":",
"# draw confidence intervals of calculated",
"x",
"=",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"'uTime'",
"]",
"y",
"=",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"'mean'",
"]",
"yl",
"=",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"'mean'",
"]",
"-",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"err",
"]",
"yu",
"=",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"'mean'",
"]",
"+",
"self",
".",
"bkg",
"[",
"'calc'",
"]",
"[",
"a",
"]",
"[",
"err",
"]",
"# trim values below zero if log scale= ",
"if",
"yscale",
"==",
"'log'",
":",
"yl",
"[",
"yl",
"<",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"]",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"c",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"zorder",
"=",
"2",
",",
"label",
"=",
"a",
")",
"ax",
".",
"fill_between",
"(",
"x",
",",
"yl",
",",
"yu",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
",",
"alpha",
"=",
"0.3",
",",
"zorder",
"=",
"-",
"1",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time (s)'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Background Counts'",
")",
"ax",
".",
"set_title",
"(",
"'Points = raw data; Bars = {:s}; Lines = Calculated Background; Envelope = Background {:s}'",
".",
"format",
"(",
"err",
",",
"err",
")",
",",
"fontsize",
"=",
"10",
")",
"ha",
",",
"la",
"=",
"ax",
".",
"get_legend_handles_labels",
"(",
")",
"ax",
".",
"legend",
"(",
"labels",
"=",
"la",
"[",
":",
"len",
"(",
"analytes",
")",
"]",
",",
"handles",
"=",
"ha",
"[",
":",
"len",
"(",
"analytes",
")",
"]",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"1",
")",
")",
"# scale x axis to range ± 2.5%",
"xlim",
"=",
"rangecalc",
"(",
"self",
".",
"bkg",
"[",
"'raw'",
"]",
"[",
"'uTime'",
"]",
",",
"0.025",
")",
"ax",
".",
"set_xlim",
"(",
"xlim",
")",
"# add sample labels",
"for",
"s",
",",
"d",
"in",
"self",
".",
"data",
".",
"items",
"(",
")",
":",
"ax",
".",
"axvline",
"(",
"d",
".",
"uTime",
"[",
"0",
"]",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"'k'",
",",
"zorder",
"=",
"-",
"1",
")",
"ax",
".",
"text",
"(",
"d",
".",
"uTime",
"[",
"0",
"]",
",",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
",",
"s",
",",
"rotation",
"=",
"90",
",",
"va",
"=",
"'top'",
",",
"ha",
"=",
"'left'",
",",
"zorder",
"=",
"-",
"1",
",",
"fontsize",
"=",
"7",
")",
"if",
"save",
":",
"fig",
".",
"savefig",
"(",
"self",
".",
"report_dir",
"+",
"'/background.png'",
",",
"dpi",
"=",
"200",
")",
"return",
"fig",
",",
"ax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.ratio
|
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
|
latools/latools.py
|
def ratio(self, internal_standard=None):
"""
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
"""
if 'bkgsub' not in self.stages_complete:
raise RuntimeError('Cannot calculate ratios before background subtraction.')
if internal_standard is not None:
self.internal_standard = internal_standard
self.minimal_analytes.update([internal_standard])
with self.pbar.set(total=len(self.data), desc='Ratio Calculation') as prog:
for s in self.data.values():
s.ratio(internal_standard=self.internal_standard)
prog.update()
self.stages_complete.update(['ratios'])
self.focus_stage = 'ratios'
return
|
def ratio(self, internal_standard=None):
"""
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
"""
if 'bkgsub' not in self.stages_complete:
raise RuntimeError('Cannot calculate ratios before background subtraction.')
if internal_standard is not None:
self.internal_standard = internal_standard
self.minimal_analytes.update([internal_standard])
with self.pbar.set(total=len(self.data), desc='Ratio Calculation') as prog:
for s in self.data.values():
s.ratio(internal_standard=self.internal_standard)
prog.update()
self.stages_complete.update(['ratios'])
self.focus_stage = 'ratios'
return
|
[
"Calculates",
"the",
"ratio",
"of",
"all",
"analytes",
"to",
"a",
"single",
"analyte",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1185-L1213
|
[
"def",
"ratio",
"(",
"self",
",",
"internal_standard",
"=",
"None",
")",
":",
"if",
"'bkgsub'",
"not",
"in",
"self",
".",
"stages_complete",
":",
"raise",
"RuntimeError",
"(",
"'Cannot calculate ratios before background subtraction.'",
")",
"if",
"internal_standard",
"is",
"not",
"None",
":",
"self",
".",
"internal_standard",
"=",
"internal_standard",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"internal_standard",
"]",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'Ratio Calculation'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"s",
".",
"ratio",
"(",
"internal_standard",
"=",
"self",
".",
"internal_standard",
")",
"prog",
".",
"update",
"(",
")",
"self",
".",
"stages_complete",
".",
"update",
"(",
"[",
"'ratios'",
"]",
")",
"self",
".",
"focus_stage",
"=",
"'ratios'",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.srm_id_auto
|
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
|
latools/latools.py
|
def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
"""
if isinstance(srms_used, str):
srms_used = [srms_used]
# get mean and standard deviations of measured standards
self.srm_compile_measured(n_min)
stdtab = self.stdtab.copy()
# load corresponding SRM database
self.srm_load_database(srms_used, reload_srm_database)
# create blank srm table
srm_tab = self.srmdat.loc[:, ['mol_ratio', 'element']].reset_index().pivot(index='SRM', columns='element', values='mol_ratio')
# Auto - ID STDs
# 1. identify elements in measured SRMS with biggest range of values
meas_tab = stdtab.loc[:, (slice(None), 'mean')] # isolate means of standards
meas_tab.columns = meas_tab.columns.droplevel(1) # drop 'mean' column names
meas_tab.columns = [re.findall('[A-Za-z]+', a)[0] for a in meas_tab.columns] # rename to element names
meas_tab = meas_tab.T.groupby(level=0).first().T # remove duplicate columns
ranges = nominal_values(meas_tab.apply(lambda a: np.ptp(a) / np.nanmean(a), 0)) # calculate relative ranges of all elements
# (used as weights later)
# 2. Work out which standard is which
# normalise all elements between 0-1
def normalise(a):
a = nominal_values(a)
if np.nanmin(a) < np.nanmax(a):
return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))
else:
return np.ones(a.shape)
nmeas = meas_tab.apply(normalise, 0)
nmeas.dropna(1, inplace=True) # remove elements with NaN values
# nmeas.replace(np.nan, 1, inplace=True)
nsrm_tab = srm_tab.apply(normalise, 0)
nsrm_tab.dropna(1, inplace=True)
# nsrm_tab.replace(np.nan, 1, inplace=True)
for uT, r in nmeas.iterrows(): # for each standard...
idx = np.nansum(((nsrm_tab - r) * ranges)**2, 1)
idx = abs((nsrm_tab - r) * ranges).sum(1)
# calculate the absolute difference between the normalised elemental
# values for each measured SRM and the SRM table. Each element is
# multiplied by the relative range seen in that element (i.e. range / mean
# measuerd value), so that elements with a large difference are given
# more importance in identifying the SRM.
# This produces a table, where wach row contains the difference between
# a known vs. measured SRM. The measured SRM is identified as the SRM that
# has the smallest weighted sum value.
stdtab.loc[uT, 'SRM'] = srm_tab.index[idx == min(idx)].values[0]
# calculate mean time for each SRM
# reset index and sort
stdtab.reset_index(inplace=True)
stdtab.sort_index(1, inplace=True)
# isolate STD and uTime
uT = stdtab.loc[:, ['gTime', 'STD']].set_index('STD')
uT.sort_index(inplace=True)
uTm = uT.groupby(level=0).mean() # mean uTime for each SRM
# replace uTime values with means
stdtab.set_index(['STD'], inplace=True)
stdtab.loc[:, 'gTime'] = uTm
# reset index
stdtab.reset_index(inplace=True)
stdtab.set_index(['STD', 'SRM', 'gTime'], inplace=True)
# combine to make SRM reference tables
srmtabs = Bunch()
for a in self.analytes:
el = re.findall('[A-Za-z]+', a)[0]
sub = stdtab.loc[:, a]
srmsub = self.srmdat.loc[self.srmdat.element == el, ['mol_ratio', 'mol_ratio_err']]
srmtab = sub.join(srmsub)
srmtab.columns = ['meas_err', 'meas_mean', 'srm_mean', 'srm_err']
srmtabs[a] = srmtab
self.srmtabs = pd.concat(srmtabs).apply(nominal_values).sort_index()
self.srmtabs.dropna(subset=['srm_mean'], inplace=True)
# replace any nan error values with zeros - nans cause problems later.
self.srmtabs.loc[:, ['meas_err', 'srm_err']] = self.srmtabs.loc[:, ['meas_err', 'srm_err']].replace(np.nan, 0)
# remove internal standard from calibration elements
self.srmtabs.drop(self.internal_standard, inplace=True)
self.srms_ided = True
return
|
def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
"""
if isinstance(srms_used, str):
srms_used = [srms_used]
# get mean and standard deviations of measured standards
self.srm_compile_measured(n_min)
stdtab = self.stdtab.copy()
# load corresponding SRM database
self.srm_load_database(srms_used, reload_srm_database)
# create blank srm table
srm_tab = self.srmdat.loc[:, ['mol_ratio', 'element']].reset_index().pivot(index='SRM', columns='element', values='mol_ratio')
# Auto - ID STDs
# 1. identify elements in measured SRMS with biggest range of values
meas_tab = stdtab.loc[:, (slice(None), 'mean')] # isolate means of standards
meas_tab.columns = meas_tab.columns.droplevel(1) # drop 'mean' column names
meas_tab.columns = [re.findall('[A-Za-z]+', a)[0] for a in meas_tab.columns] # rename to element names
meas_tab = meas_tab.T.groupby(level=0).first().T # remove duplicate columns
ranges = nominal_values(meas_tab.apply(lambda a: np.ptp(a) / np.nanmean(a), 0)) # calculate relative ranges of all elements
# (used as weights later)
# 2. Work out which standard is which
# normalise all elements between 0-1
def normalise(a):
a = nominal_values(a)
if np.nanmin(a) < np.nanmax(a):
return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))
else:
return np.ones(a.shape)
nmeas = meas_tab.apply(normalise, 0)
nmeas.dropna(1, inplace=True) # remove elements with NaN values
# nmeas.replace(np.nan, 1, inplace=True)
nsrm_tab = srm_tab.apply(normalise, 0)
nsrm_tab.dropna(1, inplace=True)
# nsrm_tab.replace(np.nan, 1, inplace=True)
for uT, r in nmeas.iterrows(): # for each standard...
idx = np.nansum(((nsrm_tab - r) * ranges)**2, 1)
idx = abs((nsrm_tab - r) * ranges).sum(1)
# calculate the absolute difference between the normalised elemental
# values for each measured SRM and the SRM table. Each element is
# multiplied by the relative range seen in that element (i.e. range / mean
# measuerd value), so that elements with a large difference are given
# more importance in identifying the SRM.
# This produces a table, where wach row contains the difference between
# a known vs. measured SRM. The measured SRM is identified as the SRM that
# has the smallest weighted sum value.
stdtab.loc[uT, 'SRM'] = srm_tab.index[idx == min(idx)].values[0]
# calculate mean time for each SRM
# reset index and sort
stdtab.reset_index(inplace=True)
stdtab.sort_index(1, inplace=True)
# isolate STD and uTime
uT = stdtab.loc[:, ['gTime', 'STD']].set_index('STD')
uT.sort_index(inplace=True)
uTm = uT.groupby(level=0).mean() # mean uTime for each SRM
# replace uTime values with means
stdtab.set_index(['STD'], inplace=True)
stdtab.loc[:, 'gTime'] = uTm
# reset index
stdtab.reset_index(inplace=True)
stdtab.set_index(['STD', 'SRM', 'gTime'], inplace=True)
# combine to make SRM reference tables
srmtabs = Bunch()
for a in self.analytes:
el = re.findall('[A-Za-z]+', a)[0]
sub = stdtab.loc[:, a]
srmsub = self.srmdat.loc[self.srmdat.element == el, ['mol_ratio', 'mol_ratio_err']]
srmtab = sub.join(srmsub)
srmtab.columns = ['meas_err', 'meas_mean', 'srm_mean', 'srm_err']
srmtabs[a] = srmtab
self.srmtabs = pd.concat(srmtabs).apply(nominal_values).sort_index()
self.srmtabs.dropna(subset=['srm_mean'], inplace=True)
# replace any nan error values with zeros - nans cause problems later.
self.srmtabs.loc[:, ['meas_err', 'srm_err']] = self.srmtabs.loc[:, ['meas_err', 'srm_err']].replace(np.nan, 0)
# remove internal standard from calibration elements
self.srmtabs.drop(self.internal_standard, inplace=True)
self.srms_ided = True
return
|
[
"Function",
"for",
"automarically",
"identifying",
"SRMs",
"Parameters",
"----------",
"srms_used",
":",
"iterable",
"Which",
"SRMs",
"have",
"been",
"used",
".",
"Must",
"match",
"SRM",
"names",
"in",
"SRM",
"database",
"*",
"exactly",
"*",
"(",
"case",
"sensitive!",
")",
".",
"n_min",
":",
"int",
"The",
"minimum",
"number",
"of",
"data",
"points",
"a",
"SRM",
"measurement",
"must",
"contain",
"to",
"be",
"included",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1319-L1422
|
[
"def",
"srm_id_auto",
"(",
"self",
",",
"srms_used",
"=",
"[",
"'NIST610'",
",",
"'NIST612'",
",",
"'NIST614'",
"]",
",",
"n_min",
"=",
"10",
",",
"reload_srm_database",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"srms_used",
",",
"str",
")",
":",
"srms_used",
"=",
"[",
"srms_used",
"]",
"# get mean and standard deviations of measured standards",
"self",
".",
"srm_compile_measured",
"(",
"n_min",
")",
"stdtab",
"=",
"self",
".",
"stdtab",
".",
"copy",
"(",
")",
"# load corresponding SRM database",
"self",
".",
"srm_load_database",
"(",
"srms_used",
",",
"reload_srm_database",
")",
"# create blank srm table",
"srm_tab",
"=",
"self",
".",
"srmdat",
".",
"loc",
"[",
":",
",",
"[",
"'mol_ratio'",
",",
"'element'",
"]",
"]",
".",
"reset_index",
"(",
")",
".",
"pivot",
"(",
"index",
"=",
"'SRM'",
",",
"columns",
"=",
"'element'",
",",
"values",
"=",
"'mol_ratio'",
")",
"# Auto - ID STDs",
"# 1. identify elements in measured SRMS with biggest range of values",
"meas_tab",
"=",
"stdtab",
".",
"loc",
"[",
":",
",",
"(",
"slice",
"(",
"None",
")",
",",
"'mean'",
")",
"]",
"# isolate means of standards",
"meas_tab",
".",
"columns",
"=",
"meas_tab",
".",
"columns",
".",
"droplevel",
"(",
"1",
")",
"# drop 'mean' column names",
"meas_tab",
".",
"columns",
"=",
"[",
"re",
".",
"findall",
"(",
"'[A-Za-z]+'",
",",
"a",
")",
"[",
"0",
"]",
"for",
"a",
"in",
"meas_tab",
".",
"columns",
"]",
"# rename to element names",
"meas_tab",
"=",
"meas_tab",
".",
"T",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
".",
"first",
"(",
")",
".",
"T",
"# remove duplicate columns",
"ranges",
"=",
"nominal_values",
"(",
"meas_tab",
".",
"apply",
"(",
"lambda",
"a",
":",
"np",
".",
"ptp",
"(",
"a",
")",
"/",
"np",
".",
"nanmean",
"(",
"a",
")",
",",
"0",
")",
")",
"# calculate relative ranges of all elements",
"# (used as weights later)",
"# 2. Work out which standard is which",
"# normalise all elements between 0-1",
"def",
"normalise",
"(",
"a",
")",
":",
"a",
"=",
"nominal_values",
"(",
"a",
")",
"if",
"np",
".",
"nanmin",
"(",
"a",
")",
"<",
"np",
".",
"nanmax",
"(",
"a",
")",
":",
"return",
"(",
"a",
"-",
"np",
".",
"nanmin",
"(",
"a",
")",
")",
"/",
"np",
".",
"nanmax",
"(",
"a",
"-",
"np",
".",
"nanmin",
"(",
"a",
")",
")",
"else",
":",
"return",
"np",
".",
"ones",
"(",
"a",
".",
"shape",
")",
"nmeas",
"=",
"meas_tab",
".",
"apply",
"(",
"normalise",
",",
"0",
")",
"nmeas",
".",
"dropna",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"# remove elements with NaN values",
"# nmeas.replace(np.nan, 1, inplace=True)",
"nsrm_tab",
"=",
"srm_tab",
".",
"apply",
"(",
"normalise",
",",
"0",
")",
"nsrm_tab",
".",
"dropna",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"# nsrm_tab.replace(np.nan, 1, inplace=True)",
"for",
"uT",
",",
"r",
"in",
"nmeas",
".",
"iterrows",
"(",
")",
":",
"# for each standard...",
"idx",
"=",
"np",
".",
"nansum",
"(",
"(",
"(",
"nsrm_tab",
"-",
"r",
")",
"*",
"ranges",
")",
"**",
"2",
",",
"1",
")",
"idx",
"=",
"abs",
"(",
"(",
"nsrm_tab",
"-",
"r",
")",
"*",
"ranges",
")",
".",
"sum",
"(",
"1",
")",
"# calculate the absolute difference between the normalised elemental",
"# values for each measured SRM and the SRM table. Each element is",
"# multiplied by the relative range seen in that element (i.e. range / mean",
"# measuerd value), so that elements with a large difference are given",
"# more importance in identifying the SRM. ",
"# This produces a table, where wach row contains the difference between",
"# a known vs. measured SRM. The measured SRM is identified as the SRM that",
"# has the smallest weighted sum value.",
"stdtab",
".",
"loc",
"[",
"uT",
",",
"'SRM'",
"]",
"=",
"srm_tab",
".",
"index",
"[",
"idx",
"==",
"min",
"(",
"idx",
")",
"]",
".",
"values",
"[",
"0",
"]",
"# calculate mean time for each SRM",
"# reset index and sort",
"stdtab",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"stdtab",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"# isolate STD and uTime",
"uT",
"=",
"stdtab",
".",
"loc",
"[",
":",
",",
"[",
"'gTime'",
",",
"'STD'",
"]",
"]",
".",
"set_index",
"(",
"'STD'",
")",
"uT",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",
"uTm",
"=",
"uT",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
".",
"mean",
"(",
")",
"# mean uTime for each SRM",
"# replace uTime values with means",
"stdtab",
".",
"set_index",
"(",
"[",
"'STD'",
"]",
",",
"inplace",
"=",
"True",
")",
"stdtab",
".",
"loc",
"[",
":",
",",
"'gTime'",
"]",
"=",
"uTm",
"# reset index",
"stdtab",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"stdtab",
".",
"set_index",
"(",
"[",
"'STD'",
",",
"'SRM'",
",",
"'gTime'",
"]",
",",
"inplace",
"=",
"True",
")",
"# combine to make SRM reference tables",
"srmtabs",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"self",
".",
"analytes",
":",
"el",
"=",
"re",
".",
"findall",
"(",
"'[A-Za-z]+'",
",",
"a",
")",
"[",
"0",
"]",
"sub",
"=",
"stdtab",
".",
"loc",
"[",
":",
",",
"a",
"]",
"srmsub",
"=",
"self",
".",
"srmdat",
".",
"loc",
"[",
"self",
".",
"srmdat",
".",
"element",
"==",
"el",
",",
"[",
"'mol_ratio'",
",",
"'mol_ratio_err'",
"]",
"]",
"srmtab",
"=",
"sub",
".",
"join",
"(",
"srmsub",
")",
"srmtab",
".",
"columns",
"=",
"[",
"'meas_err'",
",",
"'meas_mean'",
",",
"'srm_mean'",
",",
"'srm_err'",
"]",
"srmtabs",
"[",
"a",
"]",
"=",
"srmtab",
"self",
".",
"srmtabs",
"=",
"pd",
".",
"concat",
"(",
"srmtabs",
")",
".",
"apply",
"(",
"nominal_values",
")",
".",
"sort_index",
"(",
")",
"self",
".",
"srmtabs",
".",
"dropna",
"(",
"subset",
"=",
"[",
"'srm_mean'",
"]",
",",
"inplace",
"=",
"True",
")",
"# replace any nan error values with zeros - nans cause problems later.",
"self",
".",
"srmtabs",
".",
"loc",
"[",
":",
",",
"[",
"'meas_err'",
",",
"'srm_err'",
"]",
"]",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
":",
",",
"[",
"'meas_err'",
",",
"'srm_err'",
"]",
"]",
".",
"replace",
"(",
"np",
".",
"nan",
",",
"0",
")",
"# remove internal standard from calibration elements",
"self",
".",
"srmtabs",
".",
"drop",
"(",
"self",
".",
"internal_standard",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"srms_ided",
"=",
"True",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.calibrate
|
Calibrates the data to measured SRM values.
Assumes that y intercept is zero.
Parameters
----------
analytes : str or iterable
Which analytes you'd like to calibrate. Defaults to all.
drift_correct : bool
Whether to pool all SRM measurements into a single calibration,
or vary the calibration through the run, interpolating
coefficients between measured SRMs.
srms_used : str or iterable
Which SRMs have been measured. Must match names given in
SRM data file *exactly*.
n_min : int
The minimum number of data points an SRM measurement
must have to be included.
Returns
-------
None
|
latools/latools.py
|
def calibrate(self, analytes=None, drift_correct=True,
srms_used=['NIST610', 'NIST612', 'NIST614'],
zero_intercept=True, n_min=10, reload_srm_database=False):
"""
Calibrates the data to measured SRM values.
Assumes that y intercept is zero.
Parameters
----------
analytes : str or iterable
Which analytes you'd like to calibrate. Defaults to all.
drift_correct : bool
Whether to pool all SRM measurements into a single calibration,
or vary the calibration through the run, interpolating
coefficients between measured SRMs.
srms_used : str or iterable
Which SRMs have been measured. Must match names given in
SRM data file *exactly*.
n_min : int
The minimum number of data points an SRM measurement
must have to be included.
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes[self.analytes != self.internal_standard]
elif isinstance(analytes, str):
analytes = [analytes]
if not hasattr(self, 'srmtabs'):
self.srm_id_auto(srms_used=srms_used, n_min=n_min, reload_srm_database=reload_srm_database)
# make container for calibration params
if not hasattr(self, 'calib_params'):
gTime = self.stdtab.gTime.unique()
self.calib_params = pd.DataFrame(columns=pd.MultiIndex.from_product([analytes, ['m']]),
index=gTime)
calib_analytes = self.srmtabs.index.get_level_values(0).unique()
if zero_intercept:
fn = lambda x, m: x * m
else:
fn = lambda x, m, c: x * m + c
for a in calib_analytes:
if zero_intercept:
if (a, 'c') in self.calib_params:
self.calib_params.drop((a, 'c'), 1, inplace=True)
if drift_correct:
for g in self.stdtab.gTime.unique():
ind = idx[a, :, :, g]
if self.srmtabs.loc[ind].size == 0:
continue
# try:
meas = self.srmtabs.loc[ind, 'meas_mean']
srm = self.srmtabs.loc[ind, 'srm_mean']
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation.
merr = self.srmtabs.loc[ind, 'meas_err']
serr = self.srmtabs.loc[ind, 'srm_err']
sigma = np.sqrt(merr**2 + serr**2)
if len(meas) > 1:
# multiple SRMs - do a regression
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[g, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = pe[1]
else:
# deal with case where there's only one datum
self.calib_params.loc[g, (a, 'm')] = (un.uarray(srm, serr) /
un.uarray(meas, merr))[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = 0
# This should be obsolete, because no-longer sourcing locator from calib_params index.
# except KeyError:
# # If the calibration is being recalculated, calib_params
# # will have t=0 and t=max(uTime) values that are outside
# # the srmtabs index.
# # If this happens, drop them, and re-fill them at the end.
# self.calib_params.drop(g, inplace=True)
else:
ind = idx[a, :, :, :]
meas = self.srmtabs.loc[ind, 'meas_mean']
srm = self.srmtabs.loc[ind, 'srm_mean']
merr = self.srmtabs.loc[ind, 'meas_err']
serr = self.srmtabs.loc[ind, 'srm_err']
sigma = np.sqrt(merr**2 + serr**2)
if len(meas) > 1:
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[:, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = pe[1]
else:
self.calib_params.loc[:, (a, 'm')] = (un.uarray(srm, serr) /
un.uarray(meas, merr))[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = 0
# if fill:
# fill in uTime=0 and uTime = max cases for interpolation
if self.calib_params.index.min() == 0:
self.calib_params.drop(0, inplace=True)
self.calib_params.drop(self.calib_params.index.max(), inplace=True)
self.calib_params.loc[0, :] = self.calib_params.loc[self.calib_params.index.min(), :]
maxuT = np.max([d.uTime.max() for d in self.data.values()]) # calculate max uTime
self.calib_params.loc[maxuT, :] = self.calib_params.loc[self.calib_params.index.max(), :]
# sort indices for slice access
self.calib_params.sort_index(1, inplace=True)
self.calib_params.sort_index(0, inplace=True)
# calculcate interpolators for applying calibrations
self.calib_ps = Bunch()
for a in analytes:
# TODO: revisit un_interp1d to see whether it plays well with correlated values.
# Possible re-write to deal with covariance matrices?
self.calib_ps[a] = {'m': un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'm')].values)}
if not zero_intercept:
self.calib_ps[a]['c'] = un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'c')].values)
with self.pbar.set(total=len(self.data), desc='Applying Calibrations') as prog:
for d in self.data.values():
d.calibrate(self.calib_ps, analytes)
prog.update()
# record SRMs used for plotting
markers = 'osDsv<>PX' # for future implementation of SRM-specific markers.
if not hasattr(self, 'srms_used'):
self.srms_used = set(srms_used)
else:
self.srms_used.update(srms_used)
self.srm_mdict = {k: markers[i] for i, k in enumerate(self.srms_used)}
self.stages_complete.update(['calibrated'])
self.focus_stage = 'calibrated'
return
|
def calibrate(self, analytes=None, drift_correct=True,
srms_used=['NIST610', 'NIST612', 'NIST614'],
zero_intercept=True, n_min=10, reload_srm_database=False):
"""
Calibrates the data to measured SRM values.
Assumes that y intercept is zero.
Parameters
----------
analytes : str or iterable
Which analytes you'd like to calibrate. Defaults to all.
drift_correct : bool
Whether to pool all SRM measurements into a single calibration,
or vary the calibration through the run, interpolating
coefficients between measured SRMs.
srms_used : str or iterable
Which SRMs have been measured. Must match names given in
SRM data file *exactly*.
n_min : int
The minimum number of data points an SRM measurement
must have to be included.
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes[self.analytes != self.internal_standard]
elif isinstance(analytes, str):
analytes = [analytes]
if not hasattr(self, 'srmtabs'):
self.srm_id_auto(srms_used=srms_used, n_min=n_min, reload_srm_database=reload_srm_database)
# make container for calibration params
if not hasattr(self, 'calib_params'):
gTime = self.stdtab.gTime.unique()
self.calib_params = pd.DataFrame(columns=pd.MultiIndex.from_product([analytes, ['m']]),
index=gTime)
calib_analytes = self.srmtabs.index.get_level_values(0).unique()
if zero_intercept:
fn = lambda x, m: x * m
else:
fn = lambda x, m, c: x * m + c
for a in calib_analytes:
if zero_intercept:
if (a, 'c') in self.calib_params:
self.calib_params.drop((a, 'c'), 1, inplace=True)
if drift_correct:
for g in self.stdtab.gTime.unique():
ind = idx[a, :, :, g]
if self.srmtabs.loc[ind].size == 0:
continue
# try:
meas = self.srmtabs.loc[ind, 'meas_mean']
srm = self.srmtabs.loc[ind, 'srm_mean']
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation.
merr = self.srmtabs.loc[ind, 'meas_err']
serr = self.srmtabs.loc[ind, 'srm_err']
sigma = np.sqrt(merr**2 + serr**2)
if len(meas) > 1:
# multiple SRMs - do a regression
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[g, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = pe[1]
else:
# deal with case where there's only one datum
self.calib_params.loc[g, (a, 'm')] = (un.uarray(srm, serr) /
un.uarray(meas, merr))[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = 0
# This should be obsolete, because no-longer sourcing locator from calib_params index.
# except KeyError:
# # If the calibration is being recalculated, calib_params
# # will have t=0 and t=max(uTime) values that are outside
# # the srmtabs index.
# # If this happens, drop them, and re-fill them at the end.
# self.calib_params.drop(g, inplace=True)
else:
ind = idx[a, :, :, :]
meas = self.srmtabs.loc[ind, 'meas_mean']
srm = self.srmtabs.loc[ind, 'srm_mean']
merr = self.srmtabs.loc[ind, 'meas_err']
serr = self.srmtabs.loc[ind, 'srm_err']
sigma = np.sqrt(merr**2 + serr**2)
if len(meas) > 1:
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[:, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = pe[1]
else:
self.calib_params.loc[:, (a, 'm')] = (un.uarray(srm, serr) /
un.uarray(meas, merr))[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = 0
# if fill:
# fill in uTime=0 and uTime = max cases for interpolation
if self.calib_params.index.min() == 0:
self.calib_params.drop(0, inplace=True)
self.calib_params.drop(self.calib_params.index.max(), inplace=True)
self.calib_params.loc[0, :] = self.calib_params.loc[self.calib_params.index.min(), :]
maxuT = np.max([d.uTime.max() for d in self.data.values()]) # calculate max uTime
self.calib_params.loc[maxuT, :] = self.calib_params.loc[self.calib_params.index.max(), :]
# sort indices for slice access
self.calib_params.sort_index(1, inplace=True)
self.calib_params.sort_index(0, inplace=True)
# calculcate interpolators for applying calibrations
self.calib_ps = Bunch()
for a in analytes:
# TODO: revisit un_interp1d to see whether it plays well with correlated values.
# Possible re-write to deal with covariance matrices?
self.calib_ps[a] = {'m': un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'm')].values)}
if not zero_intercept:
self.calib_ps[a]['c'] = un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'c')].values)
with self.pbar.set(total=len(self.data), desc='Applying Calibrations') as prog:
for d in self.data.values():
d.calibrate(self.calib_ps, analytes)
prog.update()
# record SRMs used for plotting
markers = 'osDsv<>PX' # for future implementation of SRM-specific markers.
if not hasattr(self, 'srms_used'):
self.srms_used = set(srms_used)
else:
self.srms_used.update(srms_used)
self.srm_mdict = {k: markers[i] for i, k in enumerate(self.srms_used)}
self.stages_complete.update(['calibrated'])
self.focus_stage = 'calibrated'
return
|
[
"Calibrates",
"the",
"data",
"to",
"measured",
"SRM",
"values",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1431-L1576
|
[
"def",
"calibrate",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"drift_correct",
"=",
"True",
",",
"srms_used",
"=",
"[",
"'NIST610'",
",",
"'NIST612'",
",",
"'NIST614'",
"]",
",",
"zero_intercept",
"=",
"True",
",",
"n_min",
"=",
"10",
",",
"reload_srm_database",
"=",
"False",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"[",
"self",
".",
"analytes",
"!=",
"self",
".",
"internal_standard",
"]",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'srmtabs'",
")",
":",
"self",
".",
"srm_id_auto",
"(",
"srms_used",
"=",
"srms_used",
",",
"n_min",
"=",
"n_min",
",",
"reload_srm_database",
"=",
"reload_srm_database",
")",
"# make container for calibration params",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'calib_params'",
")",
":",
"gTime",
"=",
"self",
".",
"stdtab",
".",
"gTime",
".",
"unique",
"(",
")",
"self",
".",
"calib_params",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"[",
"analytes",
",",
"[",
"'m'",
"]",
"]",
")",
",",
"index",
"=",
"gTime",
")",
"calib_analytes",
"=",
"self",
".",
"srmtabs",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
".",
"unique",
"(",
")",
"if",
"zero_intercept",
":",
"fn",
"=",
"lambda",
"x",
",",
"m",
":",
"x",
"*",
"m",
"else",
":",
"fn",
"=",
"lambda",
"x",
",",
"m",
",",
"c",
":",
"x",
"*",
"m",
"+",
"c",
"for",
"a",
"in",
"calib_analytes",
":",
"if",
"zero_intercept",
":",
"if",
"(",
"a",
",",
"'c'",
")",
"in",
"self",
".",
"calib_params",
":",
"self",
".",
"calib_params",
".",
"drop",
"(",
"(",
"a",
",",
"'c'",
")",
",",
"1",
",",
"inplace",
"=",
"True",
")",
"if",
"drift_correct",
":",
"for",
"g",
"in",
"self",
".",
"stdtab",
".",
"gTime",
".",
"unique",
"(",
")",
":",
"ind",
"=",
"idx",
"[",
"a",
",",
":",
",",
":",
",",
"g",
"]",
"if",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
"]",
".",
"size",
"==",
"0",
":",
"continue",
"# try:",
"meas",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'meas_mean'",
"]",
"srm",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'srm_mean'",
"]",
"# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation.",
"merr",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'meas_err'",
"]",
"serr",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'srm_err'",
"]",
"sigma",
"=",
"np",
".",
"sqrt",
"(",
"merr",
"**",
"2",
"+",
"serr",
"**",
"2",
")",
"if",
"len",
"(",
"meas",
")",
">",
"1",
":",
"# multiple SRMs - do a regression",
"p",
",",
"cov",
"=",
"curve_fit",
"(",
"fn",
",",
"meas",
",",
"srm",
",",
"sigma",
"=",
"sigma",
")",
"pe",
"=",
"unc",
".",
"correlated_values",
"(",
"p",
",",
"cov",
")",
"self",
".",
"calib_params",
".",
"loc",
"[",
"g",
",",
"(",
"a",
",",
"'m'",
")",
"]",
"=",
"pe",
"[",
"0",
"]",
"if",
"not",
"zero_intercept",
":",
"self",
".",
"calib_params",
".",
"loc",
"[",
"g",
",",
"(",
"a",
",",
"'c'",
")",
"]",
"=",
"pe",
"[",
"1",
"]",
"else",
":",
"# deal with case where there's only one datum",
"self",
".",
"calib_params",
".",
"loc",
"[",
"g",
",",
"(",
"a",
",",
"'m'",
")",
"]",
"=",
"(",
"un",
".",
"uarray",
"(",
"srm",
",",
"serr",
")",
"/",
"un",
".",
"uarray",
"(",
"meas",
",",
"merr",
")",
")",
"[",
"0",
"]",
"if",
"not",
"zero_intercept",
":",
"self",
".",
"calib_params",
".",
"loc",
"[",
"g",
",",
"(",
"a",
",",
"'c'",
")",
"]",
"=",
"0",
"# This should be obsolete, because no-longer sourcing locator from calib_params index.",
"# except KeyError:",
"# # If the calibration is being recalculated, calib_params",
"# # will have t=0 and t=max(uTime) values that are outside",
"# # the srmtabs index.",
"# # If this happens, drop them, and re-fill them at the end.",
"# self.calib_params.drop(g, inplace=True)",
"else",
":",
"ind",
"=",
"idx",
"[",
"a",
",",
":",
",",
":",
",",
":",
"]",
"meas",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'meas_mean'",
"]",
"srm",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'srm_mean'",
"]",
"merr",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'meas_err'",
"]",
"serr",
"=",
"self",
".",
"srmtabs",
".",
"loc",
"[",
"ind",
",",
"'srm_err'",
"]",
"sigma",
"=",
"np",
".",
"sqrt",
"(",
"merr",
"**",
"2",
"+",
"serr",
"**",
"2",
")",
"if",
"len",
"(",
"meas",
")",
">",
"1",
":",
"p",
",",
"cov",
"=",
"curve_fit",
"(",
"fn",
",",
"meas",
",",
"srm",
",",
"sigma",
"=",
"sigma",
")",
"pe",
"=",
"unc",
".",
"correlated_values",
"(",
"p",
",",
"cov",
")",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'m'",
")",
"]",
"=",
"pe",
"[",
"0",
"]",
"if",
"not",
"zero_intercept",
":",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'c'",
")",
"]",
"=",
"pe",
"[",
"1",
"]",
"else",
":",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'m'",
")",
"]",
"=",
"(",
"un",
".",
"uarray",
"(",
"srm",
",",
"serr",
")",
"/",
"un",
".",
"uarray",
"(",
"meas",
",",
"merr",
")",
")",
"[",
"0",
"]",
"if",
"not",
"zero_intercept",
":",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'c'",
")",
"]",
"=",
"0",
"# if fill:",
"# fill in uTime=0 and uTime = max cases for interpolation",
"if",
"self",
".",
"calib_params",
".",
"index",
".",
"min",
"(",
")",
"==",
"0",
":",
"self",
".",
"calib_params",
".",
"drop",
"(",
"0",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"calib_params",
".",
"drop",
"(",
"self",
".",
"calib_params",
".",
"index",
".",
"max",
"(",
")",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"calib_params",
".",
"loc",
"[",
"0",
",",
":",
"]",
"=",
"self",
".",
"calib_params",
".",
"loc",
"[",
"self",
".",
"calib_params",
".",
"index",
".",
"min",
"(",
")",
",",
":",
"]",
"maxuT",
"=",
"np",
".",
"max",
"(",
"[",
"d",
".",
"uTime",
".",
"max",
"(",
")",
"for",
"d",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
"]",
")",
"# calculate max uTime",
"self",
".",
"calib_params",
".",
"loc",
"[",
"maxuT",
",",
":",
"]",
"=",
"self",
".",
"calib_params",
".",
"loc",
"[",
"self",
".",
"calib_params",
".",
"index",
".",
"max",
"(",
")",
",",
":",
"]",
"# sort indices for slice access",
"self",
".",
"calib_params",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"calib_params",
".",
"sort_index",
"(",
"0",
",",
"inplace",
"=",
"True",
")",
"# calculcate interpolators for applying calibrations",
"self",
".",
"calib_ps",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"analytes",
":",
"# TODO: revisit un_interp1d to see whether it plays well with correlated values. ",
"# Possible re-write to deal with covariance matrices?",
"self",
".",
"calib_ps",
"[",
"a",
"]",
"=",
"{",
"'m'",
":",
"un_interp1d",
"(",
"self",
".",
"calib_params",
".",
"index",
".",
"values",
",",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'m'",
")",
"]",
".",
"values",
")",
"}",
"if",
"not",
"zero_intercept",
":",
"self",
".",
"calib_ps",
"[",
"a",
"]",
"[",
"'c'",
"]",
"=",
"un_interp1d",
"(",
"self",
".",
"calib_params",
".",
"index",
".",
"values",
",",
"self",
".",
"calib_params",
".",
"loc",
"[",
":",
",",
"(",
"a",
",",
"'c'",
")",
"]",
".",
"values",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"data",
")",
",",
"desc",
"=",
"'Applying Calibrations'",
")",
"as",
"prog",
":",
"for",
"d",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"d",
".",
"calibrate",
"(",
"self",
".",
"calib_ps",
",",
"analytes",
")",
"prog",
".",
"update",
"(",
")",
"# record SRMs used for plotting",
"markers",
"=",
"'osDsv<>PX'",
"# for future implementation of SRM-specific markers.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'srms_used'",
")",
":",
"self",
".",
"srms_used",
"=",
"set",
"(",
"srms_used",
")",
"else",
":",
"self",
".",
"srms_used",
".",
"update",
"(",
"srms_used",
")",
"self",
".",
"srm_mdict",
"=",
"{",
"k",
":",
"markers",
"[",
"i",
"]",
"for",
"i",
",",
"k",
"in",
"enumerate",
"(",
"self",
".",
"srms_used",
")",
"}",
"self",
".",
"stages_complete",
".",
"update",
"(",
"[",
"'calibrated'",
"]",
")",
"self",
".",
"focus_stage",
"=",
"'calibrated'",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.make_subset
|
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array - like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
|
latools/latools.py
|
def make_subset(self, samples=None, name=None):
"""
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array - like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
"""
# Check if a subset containing the same samples already exists.
for k, v in self.subsets.items():
if set(v) == set(samples) and k != 'not_in_set':
return k
if isinstance(samples, str):
samples = [samples]
not_exists = [s for s in samples if s not in self.subsets['All_Analyses']]
if len(not_exists) > 0:
raise ValueError(', '.join(not_exists) + ' not in the list of sample names.\nPlease check your sample names.\nNote: Sample names are stored in the .samples attribute of your analysis.')
if name is None:
name = max([-1] + [x for x in self.subsets.keys() if isinstance(x, int)]) + 1
self._subset_names.append(name)
if samples is not None:
self.subsets[name] = samples
for s in samples:
try:
self.subsets['not_in_set'].remove(s)
except ValueError:
pass
self._has_subsets = True
# for subset in np.unique(list(self.subsets.values())):
# self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])
return name
|
def make_subset(self, samples=None, name=None):
"""
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array - like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
"""
# Check if a subset containing the same samples already exists.
for k, v in self.subsets.items():
if set(v) == set(samples) and k != 'not_in_set':
return k
if isinstance(samples, str):
samples = [samples]
not_exists = [s for s in samples if s not in self.subsets['All_Analyses']]
if len(not_exists) > 0:
raise ValueError(', '.join(not_exists) + ' not in the list of sample names.\nPlease check your sample names.\nNote: Sample names are stored in the .samples attribute of your analysis.')
if name is None:
name = max([-1] + [x for x in self.subsets.keys() if isinstance(x, int)]) + 1
self._subset_names.append(name)
if samples is not None:
self.subsets[name] = samples
for s in samples:
try:
self.subsets['not_in_set'].remove(s)
except ValueError:
pass
self._has_subsets = True
# for subset in np.unique(list(self.subsets.values())):
# self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])
return name
|
[
"Creates",
"a",
"subset",
"of",
"samples",
"which",
"can",
"be",
"treated",
"independently",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1711-L1753
|
[
"def",
"make_subset",
"(",
"self",
",",
"samples",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# Check if a subset containing the same samples already exists.",
"for",
"k",
",",
"v",
"in",
"self",
".",
"subsets",
".",
"items",
"(",
")",
":",
"if",
"set",
"(",
"v",
")",
"==",
"set",
"(",
"samples",
")",
"and",
"k",
"!=",
"'not_in_set'",
":",
"return",
"k",
"if",
"isinstance",
"(",
"samples",
",",
"str",
")",
":",
"samples",
"=",
"[",
"samples",
"]",
"not_exists",
"=",
"[",
"s",
"for",
"s",
"in",
"samples",
"if",
"s",
"not",
"in",
"self",
".",
"subsets",
"[",
"'All_Analyses'",
"]",
"]",
"if",
"len",
"(",
"not_exists",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"', '",
".",
"join",
"(",
"not_exists",
")",
"+",
"' not in the list of sample names.\\nPlease check your sample names.\\nNote: Sample names are stored in the .samples attribute of your analysis.'",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"max",
"(",
"[",
"-",
"1",
"]",
"+",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"subsets",
".",
"keys",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"int",
")",
"]",
")",
"+",
"1",
"self",
".",
"_subset_names",
".",
"append",
"(",
"name",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"self",
".",
"subsets",
"[",
"name",
"]",
"=",
"samples",
"for",
"s",
"in",
"samples",
":",
"try",
":",
"self",
".",
"subsets",
"[",
"'not_in_set'",
"]",
".",
"remove",
"(",
"s",
")",
"except",
"ValueError",
":",
"pass",
"self",
".",
"_has_subsets",
"=",
"True",
"# for subset in np.unique(list(self.subsets.values())):",
"# self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])",
"return",
"name"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.zeroscreen
|
Remove all points containing data below zero (which are impossible!)
|
latools/latools.py
|
def zeroscreen(self, focus_stage=None):
"""
Remove all points containing data below zero (which are impossible!)
"""
if focus_stage is None:
focus_stage = self.focus_stage
for s in self.data.values():
ind = np.ones(len(s.Time), dtype=bool)
for v in s.data[focus_stage].values():
ind = ind & (nominal_values(v) > 0)
for k in s.data[focus_stage].keys():
s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)
self.set_focus(focus_stage)
return
|
def zeroscreen(self, focus_stage=None):
"""
Remove all points containing data below zero (which are impossible!)
"""
if focus_stage is None:
focus_stage = self.focus_stage
for s in self.data.values():
ind = np.ones(len(s.Time), dtype=bool)
for v in s.data[focus_stage].values():
ind = ind & (nominal_values(v) > 0)
for k in s.data[focus_stage].keys():
s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)
self.set_focus(focus_stage)
return
|
[
"Remove",
"all",
"points",
"containing",
"data",
"below",
"zero",
"(",
"which",
"are",
"impossible!",
")"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1756-L1773
|
[
"def",
"zeroscreen",
"(",
"self",
",",
"focus_stage",
"=",
"None",
")",
":",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"for",
"s",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
":",
"ind",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"s",
".",
"Time",
")",
",",
"dtype",
"=",
"bool",
")",
"for",
"v",
"in",
"s",
".",
"data",
"[",
"focus_stage",
"]",
".",
"values",
"(",
")",
":",
"ind",
"=",
"ind",
"&",
"(",
"nominal_values",
"(",
"v",
")",
">",
"0",
")",
"for",
"k",
"in",
"s",
".",
"data",
"[",
"focus_stage",
"]",
".",
"keys",
"(",
")",
":",
"s",
".",
"data",
"[",
"focus_stage",
"]",
"[",
"k",
"]",
"[",
"~",
"ind",
"]",
"=",
"unc",
".",
"ufloat",
"(",
"np",
".",
"nan",
",",
"np",
".",
"nan",
")",
"self",
".",
"set_focus",
"(",
"focus_stage",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_threshold
|
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
threshold : float
The threshold value.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
|
latools/latools.py
|
def filter_threshold(self, analyte, threshold,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
threshold : float
The threshold value.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Threshold Filter') as prog:
for s in samples:
self.data[s].filter_threshold(analyte, threshold)
prog.update()
|
def filter_threshold(self, analyte, threshold,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
threshold : float
The threshold value.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Threshold Filter') as prog:
for s in samples:
self.data[s].filter_threshold(analyte, threshold)
prog.update()
|
[
"Applies",
"a",
"threshold",
"filter",
"to",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1776-L1814
|
[
"def",
"filter_threshold",
"(",
"self",
",",
"analyte",
",",
"threshold",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"analyte",
"]",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Threshold Filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_threshold",
"(",
"analyte",
",",
"threshold",
")",
"prog",
".",
"update",
"(",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_threshold_percentile
|
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
percentiles : float or iterable of len=2
The percentile values.
level : str
Whether to calculate percentiles from the entire dataset
('population') or for each individual sample ('individual')
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
|
latools/latools.py
|
def filter_threshold_percentile(self, analyte, percentiles, level='population', filt=False,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
percentiles : float or iterable of len=2
The percentile values.
level : str
Whether to calculate percentiles from the entire dataset
('population') or for each individual sample ('individual')
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# Get all samples
self.get_focus(filt=filt, subset=subset, nominal=True)
dat = self.focus[analyte][~np.isnan(self.focus[analyte])]
# calculate filter limits
lims = np.percentile(dat, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile theshold filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = d.focus[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-pcnt_below'.format(percentiles[0]),
below,
'Values below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-pcnt_above'.format(percentiles[0]),
above,
'Values above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-pcnt_inside',
inside,
'Values between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-pcnt_outside',
outside,
'Values outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
|
def filter_threshold_percentile(self, analyte, percentiles, level='population', filt=False,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
percentiles : float or iterable of len=2
The percentile values.
level : str
Whether to calculate percentiles from the entire dataset
('population') or for each individual sample ('individual')
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# Get all samples
self.get_focus(filt=filt, subset=subset, nominal=True)
dat = self.focus[analyte][~np.isnan(self.focus[analyte])]
# calculate filter limits
lims = np.percentile(dat, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile theshold filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = d.focus[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-pcnt_below'.format(percentiles[0]),
below,
'Values below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-pcnt_above'.format(percentiles[0]),
above,
'Values above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-pcnt_inside',
inside,
'Values between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-pcnt_outside',
outside,
'Values outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
|
[
"Applies",
"a",
"threshold",
"filter",
"to",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1817-L1907
|
[
"def",
"filter_threshold_percentile",
"(",
"self",
",",
"analyte",
",",
"percentiles",
",",
"level",
"=",
"'population'",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"analyte",
"]",
")",
"if",
"isinstance",
"(",
"percentiles",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"percentiles",
"=",
"[",
"percentiles",
"]",
"if",
"level",
"==",
"'population'",
":",
"# Get all samples",
"self",
".",
"get_focus",
"(",
"filt",
"=",
"filt",
",",
"subset",
"=",
"subset",
",",
"nominal",
"=",
"True",
")",
"dat",
"=",
"self",
".",
"focus",
"[",
"analyte",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"self",
".",
"focus",
"[",
"analyte",
"]",
")",
"]",
"# calculate filter limits",
"lims",
"=",
"np",
".",
"percentile",
"(",
"dat",
",",
"percentiles",
")",
"# Calculate filter for individual samples",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Percentile theshold filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
"setn",
"=",
"d",
".",
"filt",
".",
"maxset",
"+",
"1",
"g",
"=",
"d",
".",
"focus",
"[",
"analyte",
"]",
"if",
"level",
"==",
"'individual'",
":",
"gt",
"=",
"nominal_values",
"(",
"g",
")",
"lims",
"=",
"np",
".",
"percentile",
"(",
"gt",
"[",
"~",
"np",
".",
"isnan",
"(",
"gt",
")",
"]",
",",
"percentiles",
")",
"if",
"len",
"(",
"lims",
")",
"==",
"1",
":",
"above",
"=",
"g",
">=",
"lims",
"[",
"0",
"]",
"below",
"=",
"g",
"<",
"lims",
"[",
"0",
"]",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-pcnt_below'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"below",
",",
"'Values below {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-pcnt_above'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"above",
",",
"'Values above {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"elif",
"len",
"(",
"lims",
")",
"==",
"2",
":",
"inside",
"=",
"(",
"g",
">=",
"min",
"(",
"lims",
")",
")",
"&",
"(",
"g",
"<=",
"max",
"(",
"lims",
")",
")",
"outside",
"=",
"(",
"g",
"<",
"min",
"(",
"lims",
")",
")",
"|",
"(",
"g",
">",
"max",
"(",
"lims",
")",
")",
"lpc",
"=",
"'-'",
".",
"join",
"(",
"[",
"'{:.1f}'",
".",
"format",
"(",
"p",
")",
"for",
"p",
"in",
"percentiles",
"]",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-pcnt_inside'",
",",
"inside",
",",
"'Values between '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-pcnt_outside'",
",",
"outside",
",",
"'Values outside '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_gradient_threshold_percentile
|
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
|
latools/latools.py
|
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-grd-pcnt_below'.format(percentiles[0]),
below,
'Gradients below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-grd-pcnt_above'.format(percentiles[0]),
above,
'Gradients above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_inside',
inside,
'Gradients between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_outside',
outside,
'Gradients outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
|
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-grd-pcnt_below'.format(percentiles[0]),
below,
'Gradients below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-grd-pcnt_above'.format(percentiles[0]),
above,
'Gradients above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_inside',
inside,
'Gradients between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_outside',
outside,
'Gradients outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
|
[
"Calculate",
"a",
"gradient",
"threshold",
"filter",
"to",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1954-L2043
|
[
"def",
"filter_gradient_threshold_percentile",
"(",
"self",
",",
"analyte",
",",
"percentiles",
",",
"level",
"=",
"'population'",
",",
"win",
"=",
"15",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"analyte",
"]",
")",
"# Calculate gradients of all samples",
"self",
".",
"get_gradients",
"(",
"analytes",
"=",
"[",
"analyte",
"]",
",",
"win",
"=",
"win",
",",
"filt",
"=",
"filt",
",",
"subset",
"=",
"subset",
")",
"grad",
"=",
"self",
".",
"gradients",
"[",
"analyte",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"self",
".",
"gradients",
"[",
"analyte",
"]",
")",
"]",
"if",
"isinstance",
"(",
"percentiles",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"percentiles",
"=",
"[",
"percentiles",
"]",
"if",
"level",
"==",
"'population'",
":",
"# calculate filter limits",
"lims",
"=",
"np",
".",
"percentile",
"(",
"grad",
",",
"percentiles",
")",
"# Calculate filter for individual samples",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Percentile Threshold Filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
"setn",
"=",
"d",
".",
"filt",
".",
"maxset",
"+",
"1",
"g",
"=",
"calc_grads",
"(",
"d",
".",
"Time",
",",
"d",
".",
"focus",
",",
"[",
"analyte",
"]",
",",
"win",
")",
"[",
"analyte",
"]",
"if",
"level",
"==",
"'individual'",
":",
"gt",
"=",
"nominal_values",
"(",
"g",
")",
"lims",
"=",
"np",
".",
"percentile",
"(",
"gt",
"[",
"~",
"np",
".",
"isnan",
"(",
"gt",
")",
"]",
",",
"percentiles",
")",
"if",
"len",
"(",
"lims",
")",
"==",
"1",
":",
"above",
"=",
"g",
">=",
"lims",
"[",
"0",
"]",
"below",
"=",
"g",
"<",
"lims",
"[",
"0",
"]",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-grd-pcnt_below'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"below",
",",
"'Gradients below {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-grd-pcnt_above'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"above",
",",
"'Gradients above {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"elif",
"len",
"(",
"lims",
")",
"==",
"2",
":",
"inside",
"=",
"(",
"g",
">=",
"min",
"(",
"lims",
")",
")",
"&",
"(",
"g",
"<=",
"max",
"(",
"lims",
")",
")",
"outside",
"=",
"(",
"g",
"<",
"min",
"(",
"lims",
")",
")",
"|",
"(",
"g",
">",
"max",
"(",
"lims",
")",
")",
"lpc",
"=",
"'-'",
".",
"join",
"(",
"[",
"'{:.1f}'",
".",
"format",
"(",
"p",
")",
"for",
"p",
"in",
"percentiles",
"]",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-grd-pcnt_inside'",
",",
"inside",
",",
"'Gradients between '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-grd-pcnt_outside'",
",",
"outside",
",",
"'Gradients outside '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_clustering
|
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
|
latools/latools.py
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='kmeans', include_time=False, samples=None,
sort=True, subset=None, level='sample', min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
if level == 'sample':
with self.pbar.set(total=len(samples), desc='Clustering Filter') as prog:
for s in samples:
self.data[s].filter_clustering(analytes=analytes, filt=filt,
normalise=normalise,
method=method,
include_time=include_time,
min_data=min_data,
sort=sort,
**kwargs)
prog.update()
if level == 'population':
if isinstance(sort, bool):
sort_by = 0
else:
sort_by = sort
name = '_'.join(analytes) + '_{}'.format(method)
self.fit_classifier(name=name, analytes=analytes, method=method,
subset=subset, filt=filt, sort_by=sort_by, **kwargs)
self.apply_classifier(name=name, subset=subset)
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='kmeans', include_time=False, samples=None,
sort=True, subset=None, level='sample', min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
if level == 'sample':
with self.pbar.set(total=len(samples), desc='Clustering Filter') as prog:
for s in samples:
self.data[s].filter_clustering(analytes=analytes, filt=filt,
normalise=normalise,
method=method,
include_time=include_time,
min_data=min_data,
sort=sort,
**kwargs)
prog.update()
if level == 'population':
if isinstance(sort, bool):
sort_by = 0
else:
sort_by = sort
name = '_'.join(analytes) + '_{}'.format(method)
self.fit_classifier(name=name, analytes=analytes, method=method,
subset=subset, filt=filt, sort_by=sort_by, **kwargs)
self.apply_classifier(name=name, subset=subset)
|
[
"Applies",
"an",
"n",
"-",
"dimensional",
"clustering",
"filter",
"to",
"the",
"data",
".",
"Parameters",
"----------",
"analytes",
":",
"str",
"The",
"analyte",
"(",
"s",
")",
"that",
"the",
"filter",
"applies",
"to",
".",
"filt",
":",
"bool",
"Whether",
"or",
"not",
"to",
"apply",
"existing",
"filters",
"to",
"the",
"data",
"before",
"calculating",
"this",
"filter",
".",
"normalise",
":",
"bool",
"Whether",
"or",
"not",
"to",
"normalise",
"the",
"data",
"to",
"zero",
"mean",
"and",
"unit",
"variance",
".",
"Reccomended",
"if",
"clustering",
"based",
"on",
"more",
"than",
"1",
"analyte",
".",
"Uses",
"sklearn",
".",
"preprocessing",
".",
"scale",
".",
"method",
":",
"str",
"Which",
"clustering",
"algorithm",
"to",
"use",
":",
"*",
"meanshift",
":",
"The",
"sklearn",
".",
"cluster",
".",
"MeanShift",
"algorithm",
".",
"Automatically",
"determines",
"number",
"of",
"clusters",
"in",
"data",
"based",
"on",
"the",
"bandwidth",
"of",
"expected",
"variation",
".",
"*",
"kmeans",
":",
"The",
"sklearn",
".",
"cluster",
".",
"KMeans",
"algorithm",
".",
"Determines",
"the",
"characteristics",
"of",
"a",
"known",
"number",
"of",
"clusters",
"within",
"the",
"data",
".",
"Must",
"provide",
"n_clusters",
"to",
"specify",
"the",
"expected",
"number",
"of",
"clusters",
".",
"level",
":",
"str",
"Whether",
"to",
"conduct",
"the",
"clustering",
"analysis",
"at",
"the",
"sample",
"or",
"population",
"level",
".",
"include_time",
":",
"bool",
"Whether",
"or",
"not",
"to",
"include",
"the",
"Time",
"variable",
"in",
"the",
"clustering",
"analysis",
".",
"Useful",
"if",
"you",
"re",
"looking",
"for",
"spatially",
"continuous",
"clusters",
"in",
"your",
"data",
"i",
".",
"e",
".",
"this",
"will",
"identify",
"each",
"spot",
"in",
"your",
"analysis",
"as",
"an",
"individual",
"cluster",
".",
"samples",
":",
"optional",
"array_like",
"or",
"None",
"Which",
"samples",
"to",
"apply",
"this",
"filter",
"to",
".",
"If",
"None",
"applies",
"to",
"all",
"samples",
".",
"sort",
":",
"bool",
"Whether",
"or",
"not",
"you",
"want",
"the",
"cluster",
"labels",
"to",
"be",
"sorted",
"by",
"the",
"mean",
"magnitude",
"of",
"the",
"signals",
"they",
"are",
"based",
"on",
"(",
"0",
"=",
"lowest",
")",
"min_data",
":",
"int",
"The",
"minimum",
"number",
"of",
"data",
"points",
"that",
"should",
"be",
"considered",
"by",
"the",
"filter",
".",
"Default",
"=",
"10",
".",
"**",
"kwargs",
"Parameters",
"passed",
"to",
"the",
"clustering",
"algorithm",
"specified",
"by",
"method",
".",
"Meanshift",
"Parameters",
"bandwidth",
":",
"str",
"or",
"float",
"The",
"bandwith",
"(",
"float",
")",
"or",
"bandwidth",
"method",
"(",
"scott",
"or",
"silverman",
")",
"used",
"to",
"estimate",
"the",
"data",
"bandwidth",
".",
"bin_seeding",
":",
"bool",
"Modifies",
"the",
"behaviour",
"of",
"the",
"meanshift",
"algorithm",
".",
"Refer",
"to",
"sklearn",
".",
"cluster",
".",
"meanshift",
"documentation",
".",
"K",
"-",
"Means",
"Parameters",
"n_clusters",
":",
"int",
"The",
"number",
"of",
"clusters",
"expected",
"in",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2046-L2143
|
[
"def",
"filter_clustering",
"(",
"self",
",",
"analytes",
",",
"filt",
"=",
"False",
",",
"normalise",
"=",
"True",
",",
"method",
"=",
"'kmeans'",
",",
"include_time",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"sort",
"=",
"True",
",",
"subset",
"=",
"None",
",",
"level",
"=",
"'sample'",
",",
"min_data",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"analytes",
")",
"if",
"level",
"==",
"'sample'",
":",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Clustering Filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_clustering",
"(",
"analytes",
"=",
"analytes",
",",
"filt",
"=",
"filt",
",",
"normalise",
"=",
"normalise",
",",
"method",
"=",
"method",
",",
"include_time",
"=",
"include_time",
",",
"min_data",
"=",
"min_data",
",",
"sort",
"=",
"sort",
",",
"*",
"*",
"kwargs",
")",
"prog",
".",
"update",
"(",
")",
"if",
"level",
"==",
"'population'",
":",
"if",
"isinstance",
"(",
"sort",
",",
"bool",
")",
":",
"sort_by",
"=",
"0",
"else",
":",
"sort_by",
"=",
"sort",
"name",
"=",
"'_'",
".",
"join",
"(",
"analytes",
")",
"+",
"'_{}'",
".",
"format",
"(",
"method",
")",
"self",
".",
"fit_classifier",
"(",
"name",
"=",
"name",
",",
"analytes",
"=",
"analytes",
",",
"method",
"=",
"method",
",",
"subset",
"=",
"subset",
",",
"filt",
"=",
"filt",
",",
"sort_by",
"=",
"sort_by",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"apply_classifier",
"(",
"name",
"=",
"name",
",",
"subset",
"=",
"subset",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.fit_classifier
|
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
|
latools/latools.py
|
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
|
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
|
[
"Create",
"a",
"clustering",
"classifier",
"based",
"on",
"all",
"samples",
"or",
"a",
"subset",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2146-L2211
|
[
"def",
"fit_classifier",
"(",
"self",
",",
"name",
",",
"analytes",
",",
"method",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"filt",
"=",
"True",
",",
"sort_by",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"# isolate data",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"self",
".",
"get_focus",
"(",
"subset",
"=",
"subset",
",",
"filt",
"=",
"filt",
")",
"# create classifer",
"c",
"=",
"classifier",
"(",
"analytes",
",",
"sort_by",
")",
"# fit classifier",
"c",
".",
"fit",
"(",
"data",
"=",
"self",
".",
"focus",
",",
"method",
"=",
"method",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"classifiers",
"[",
"name",
"]",
"=",
"c",
"return",
"name"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.apply_classifier
|
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
|
latools/latools.py
|
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name
|
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name
|
[
"Apply",
"a",
"clustering",
"classifier",
"based",
"on",
"all",
"samples",
"or",
"a",
"subset",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2214-L2252
|
[
"def",
"apply_classifier",
"(",
"self",
",",
"name",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"c",
"=",
"self",
".",
"classifiers",
"[",
"name",
"]",
"labs",
"=",
"c",
".",
"classifier",
".",
"ulabels_",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Applying '",
"+",
"name",
"+",
"' classifier'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
"try",
":",
"f",
"=",
"c",
".",
"predict",
"(",
"d",
".",
"focus",
")",
"except",
"ValueError",
":",
"# in case there's no data",
"f",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"2",
"]",
"*",
"len",
"(",
"d",
".",
"Time",
")",
")",
"for",
"l",
"in",
"labs",
":",
"ind",
"=",
"f",
"==",
"l",
"d",
".",
"filt",
".",
"add",
"(",
"name",
"=",
"name",
"+",
"'_{:.0f}'",
".",
"format",
"(",
"l",
")",
",",
"filt",
"=",
"ind",
",",
"info",
"=",
"name",
"+",
"' '",
"+",
"c",
".",
"method",
"+",
"' classifier'",
",",
"params",
"=",
"(",
"c",
".",
"analytes",
",",
"c",
".",
"method",
")",
")",
"prog",
".",
"update",
"(",
")",
"return",
"name"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_correlation
|
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
Returns
-------
None
|
latools/latools.py
|
def filter_correlation(self, x_analyte, y_analyte, window=None,
r_threshold=0.9, p_threshold=0.05, filt=True,
samples=None, subset=None):
"""
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([x_analyte, y_analyte])
with self.pbar.set(total=len(samples), desc='Correlation Filter') as prog:
for s in samples:
self.data[s].filter_correlation(x_analyte, y_analyte,
window=window,
r_threshold=r_threshold,
p_threshold=p_threshold,
filt=filt)
prog.update()
|
def filter_correlation(self, x_analyte, y_analyte, window=None,
r_threshold=0.9, p_threshold=0.05, filt=True,
samples=None, subset=None):
"""
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([x_analyte, y_analyte])
with self.pbar.set(total=len(samples), desc='Correlation Filter') as prog:
for s in samples:
self.data[s].filter_correlation(x_analyte, y_analyte,
window=window,
r_threshold=r_threshold,
p_threshold=p_threshold,
filt=filt)
prog.update()
|
[
"Applies",
"a",
"correlation",
"filter",
"to",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2255-L2305
|
[
"def",
"filter_correlation",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"None",
",",
"r_threshold",
"=",
"0.9",
",",
"p_threshold",
"=",
"0.05",
",",
"filt",
"=",
"True",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"x_analyte",
",",
"y_analyte",
"]",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Correlation Filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_correlation",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"window",
",",
"r_threshold",
"=",
"r_threshold",
",",
"p_threshold",
"=",
"p_threshold",
",",
"filt",
"=",
"filt",
")",
"prog",
".",
"update",
"(",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.correlation_plots
|
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
|
latools/latools.py
|
def correlation_plots(self, x_analyte, y_analyte, window=15, filt=True, recalc=False, samples=None, subset=None, outdir=None):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
if outdir is None:
outdir = self.report_dir + '/correlations/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte,
window=window, filt=filt, recalc=recalc)
f.savefig('{}/{}_{}-{}.pdf'.format(outdir, s, x_analyte, y_analyte))
plt.close(f)
prog.update()
return
|
def correlation_plots(self, x_analyte, y_analyte, window=15, filt=True, recalc=False, samples=None, subset=None, outdir=None):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
if outdir is None:
outdir = self.report_dir + '/correlations/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte,
window=window, filt=filt, recalc=recalc)
f.savefig('{}/{}_{}-{}.pdf'.format(outdir, s, x_analyte, y_analyte))
plt.close(f)
prog.update()
return
|
[
"Plot",
"the",
"local",
"correlation",
"between",
"two",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2308-L2347
|
[
"def",
"correlation_plots",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"15",
",",
"filt",
"=",
"True",
",",
"recalc",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"outdir",
"=",
"None",
")",
":",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"self",
".",
"report_dir",
"+",
"'/correlations/'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"if",
"subset",
"is",
"not",
"None",
":",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"elif",
"samples",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"subsets",
"[",
"'All_Analyses'",
"]",
"elif",
"isinstance",
"(",
"samples",
",",
"str",
")",
":",
"samples",
"=",
"[",
"samples",
"]",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Drawing Plots'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"f",
",",
"a",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"correlation_plot",
"(",
"x_analyte",
"=",
"x_analyte",
",",
"y_analyte",
"=",
"y_analyte",
",",
"window",
"=",
"window",
",",
"filt",
"=",
"filt",
",",
"recalc",
"=",
"recalc",
")",
"f",
".",
"savefig",
"(",
"'{}/{}_{}-{}.pdf'",
".",
"format",
"(",
"outdir",
",",
"s",
",",
"x_analyte",
",",
"y_analyte",
")",
")",
"plt",
".",
"close",
"(",
"f",
")",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_on
|
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
|
latools/latools.py
|
def filter_on(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.on(analyte, filt)
except:
warnings.warn("filt.on failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
|
def filter_on(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.on(analyte, filt)
except:
warnings.warn("filt.on failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
|
[
"Turns",
"data",
"filters",
"on",
"for",
"particular",
"analytes",
"and",
"samples",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2352-L2386
|
[
"def",
"filter_on",
"(",
"self",
",",
"filt",
"=",
"None",
",",
"analyte",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"show_status",
"=",
"False",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"try",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"on",
"(",
"analyte",
",",
"filt",
")",
"except",
":",
"warnings",
".",
"warn",
"(",
"\"filt.on failure in sample \"",
"+",
"s",
")",
"if",
"show_status",
":",
"self",
".",
"filter_status",
"(",
"subset",
"=",
"subset",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_off
|
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
|
latools/latools.py
|
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
|
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
|
[
"Turns",
"data",
"filters",
"off",
"for",
"particular",
"analytes",
"and",
"samples",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2389-L2423
|
[
"def",
"filter_off",
"(",
"self",
",",
"filt",
"=",
"None",
",",
"analyte",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"show_status",
"=",
"False",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"try",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"off",
"(",
"analyte",
",",
"filt",
")",
"except",
":",
"warnings",
".",
"warn",
"(",
"\"filt.off failure in sample \"",
"+",
"s",
")",
"if",
"show_status",
":",
"self",
".",
"filter_status",
"(",
"subset",
"=",
"subset",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_status
|
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
|
latools/latools.py
|
def filter_status(self, sample=None, subset=None, stds=False):
"""
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
"""
s = ''
if sample is None and subset is None:
if not self._has_subsets:
s += 'Subset: All Samples\n\n'
s += self.data[self.subsets['All_Samples'][0]].filt.__repr__()
else:
for n in sorted(str(sn) for sn in self._subset_names):
if n in self.subsets:
pass
elif int(n) in self.subsets:
n = int(n)
pass
s += 'Subset: ' + str(n) + '\n'
s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n'
s += self.data[self.subsets[n][0]].filt.__repr__()
if len(self.subsets['not_in_set']) > 0:
s += '\nNot in Subset:\n'
s += 'Samples: ' + ', '.join(self.subsets['not_in_set']) + '\n\n'
s += self.data[self.subsets['not_in_set'][0]].filt.__repr__()
print(s)
return
elif sample is not None:
s += 'Sample: ' + sample + '\n'
s += self.data[sample].filt.__repr__()
print(s)
return
elif subset is not None:
if isinstance(subset, (str, int, float)):
subset = [subset]
for n in subset:
s += 'Subset: ' + str(n) + '\n'
s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n'
s += self.data[self.subsets[n][0]].filt.__repr__()
print(s)
return
|
def filter_status(self, sample=None, subset=None, stds=False):
"""
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
"""
s = ''
if sample is None and subset is None:
if not self._has_subsets:
s += 'Subset: All Samples\n\n'
s += self.data[self.subsets['All_Samples'][0]].filt.__repr__()
else:
for n in sorted(str(sn) for sn in self._subset_names):
if n in self.subsets:
pass
elif int(n) in self.subsets:
n = int(n)
pass
s += 'Subset: ' + str(n) + '\n'
s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n'
s += self.data[self.subsets[n][0]].filt.__repr__()
if len(self.subsets['not_in_set']) > 0:
s += '\nNot in Subset:\n'
s += 'Samples: ' + ', '.join(self.subsets['not_in_set']) + '\n\n'
s += self.data[self.subsets['not_in_set'][0]].filt.__repr__()
print(s)
return
elif sample is not None:
s += 'Sample: ' + sample + '\n'
s += self.data[sample].filt.__repr__()
print(s)
return
elif subset is not None:
if isinstance(subset, (str, int, float)):
subset = [subset]
for n in subset:
s += 'Subset: ' + str(n) + '\n'
s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n'
s += self.data[self.subsets[n][0]].filt.__repr__()
print(s)
return
|
[
"Prints",
"the",
"current",
"status",
"of",
"filters",
"for",
"specified",
"samples",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2425-L2474
|
[
"def",
"filter_status",
"(",
"self",
",",
"sample",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"stds",
"=",
"False",
")",
":",
"s",
"=",
"''",
"if",
"sample",
"is",
"None",
"and",
"subset",
"is",
"None",
":",
"if",
"not",
"self",
".",
"_has_subsets",
":",
"s",
"+=",
"'Subset: All Samples\\n\\n'",
"s",
"+=",
"self",
".",
"data",
"[",
"self",
".",
"subsets",
"[",
"'All_Samples'",
"]",
"[",
"0",
"]",
"]",
".",
"filt",
".",
"__repr__",
"(",
")",
"else",
":",
"for",
"n",
"in",
"sorted",
"(",
"str",
"(",
"sn",
")",
"for",
"sn",
"in",
"self",
".",
"_subset_names",
")",
":",
"if",
"n",
"in",
"self",
".",
"subsets",
":",
"pass",
"elif",
"int",
"(",
"n",
")",
"in",
"self",
".",
"subsets",
":",
"n",
"=",
"int",
"(",
"n",
")",
"pass",
"s",
"+=",
"'Subset: '",
"+",
"str",
"(",
"n",
")",
"+",
"'\\n'",
"s",
"+=",
"'Samples: '",
"+",
"', '",
".",
"join",
"(",
"self",
".",
"subsets",
"[",
"n",
"]",
")",
"+",
"'\\n\\n'",
"s",
"+=",
"self",
".",
"data",
"[",
"self",
".",
"subsets",
"[",
"n",
"]",
"[",
"0",
"]",
"]",
".",
"filt",
".",
"__repr__",
"(",
")",
"if",
"len",
"(",
"self",
".",
"subsets",
"[",
"'not_in_set'",
"]",
")",
">",
"0",
":",
"s",
"+=",
"'\\nNot in Subset:\\n'",
"s",
"+=",
"'Samples: '",
"+",
"', '",
".",
"join",
"(",
"self",
".",
"subsets",
"[",
"'not_in_set'",
"]",
")",
"+",
"'\\n\\n'",
"s",
"+=",
"self",
".",
"data",
"[",
"self",
".",
"subsets",
"[",
"'not_in_set'",
"]",
"[",
"0",
"]",
"]",
".",
"filt",
".",
"__repr__",
"(",
")",
"print",
"(",
"s",
")",
"return",
"elif",
"sample",
"is",
"not",
"None",
":",
"s",
"+=",
"'Sample: '",
"+",
"sample",
"+",
"'\\n'",
"s",
"+=",
"self",
".",
"data",
"[",
"sample",
"]",
".",
"filt",
".",
"__repr__",
"(",
")",
"print",
"(",
"s",
")",
"return",
"elif",
"subset",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"subset",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"subset",
"=",
"[",
"subset",
"]",
"for",
"n",
"in",
"subset",
":",
"s",
"+=",
"'Subset: '",
"+",
"str",
"(",
"n",
")",
"+",
"'\\n'",
"s",
"+=",
"'Samples: '",
"+",
"', '",
".",
"join",
"(",
"self",
".",
"subsets",
"[",
"n",
"]",
")",
"+",
"'\\n\\n'",
"s",
"+=",
"self",
".",
"data",
"[",
"self",
".",
"subsets",
"[",
"n",
"]",
"[",
"0",
"]",
"]",
".",
"filt",
".",
"__repr__",
"(",
")",
"print",
"(",
"s",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_clear
|
Clears (deletes) all data filters.
|
latools/latools.py
|
def filter_clear(self, samples=None, subset=None):
"""
Clears (deletes) all data filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filt.clear()
|
def filter_clear(self, samples=None, subset=None):
"""
Clears (deletes) all data filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filt.clear()
|
[
"Clears",
"(",
"deletes",
")",
"all",
"data",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2477-L2487
|
[
"def",
"filter_clear",
"(",
"self",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"clear",
"(",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_defragment
|
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
|
latools/latools.py
|
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
f = self.data[s].filt.grab_filt(filt)
self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold),
filt=filters.defrag(f, threshold, mode),
info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold),
params=(threshold, mode, filt, samples, subset))
|
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
f = self.data[s].filt.grab_filt(filt)
self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold),
filt=filters.defrag(f, threshold, mode),
info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold),
params=(threshold, mode, filt, samples, subset))
|
[
"Remove",
"fragments",
"from",
"the",
"calculated",
"filter"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2490-L2525
|
[
"def",
"filter_defragment",
"(",
"self",
",",
"threshold",
",",
"mode",
"=",
"'include'",
",",
"filt",
"=",
"True",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"f",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"add",
"(",
"name",
"=",
"'defrag_{:s}_{:.0f}'",
".",
"format",
"(",
"mode",
",",
"threshold",
")",
",",
"filt",
"=",
"filters",
".",
"defrag",
"(",
"f",
",",
"threshold",
",",
"mode",
")",
",",
"info",
"=",
"'Defrag {:s} filter with threshold {:.0f}'",
".",
"format",
"(",
"mode",
",",
"threshold",
")",
",",
"params",
"=",
"(",
"threshold",
",",
"mode",
",",
"filt",
",",
"samples",
",",
"subset",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_exclude_downhole
|
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
|
latools/latools.py
|
def filter_exclude_downhole(self, threshold, filt=True, samples=None, subset=None):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_exclude_downhole(threshold, filt)
|
def filter_exclude_downhole(self, threshold, filt=True, samples=None, subset=None):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_exclude_downhole(threshold, filt)
|
[
"Exclude",
"all",
"points",
"down",
"-",
"hole",
"(",
"after",
")",
"the",
"first",
"excluded",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2528-L2547
|
[
"def",
"filter_exclude_downhole",
"(",
"self",
",",
"threshold",
",",
"filt",
"=",
"True",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_exclude_downhole",
"(",
"threshold",
",",
"filt",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_trim
|
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
|
latools/latools.py
|
def filter_trim(self, start=1, end=1, filt=True, samples=None, subset=None):
"""
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_trim(start, end, filt)
|
def filter_trim(self, start=1, end=1, filt=True, samples=None, subset=None):
"""
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_trim(start, end, filt)
|
[
"Remove",
"points",
"from",
"the",
"start",
"and",
"end",
"of",
"filter",
"regions",
".",
"Parameters",
"----------",
"start",
"end",
":",
"int",
"The",
"number",
"of",
"points",
"to",
"remove",
"from",
"the",
"start",
"and",
"end",
"of",
"the",
"specified",
"filter",
".",
"filt",
":",
"valid",
"filter",
"string",
"or",
"bool",
"Which",
"filter",
"to",
"trim",
".",
"If",
"True",
"applies",
"to",
"currently",
"active",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2550-L2569
|
[
"def",
"filter_trim",
"(",
"self",
",",
"start",
"=",
"1",
",",
"end",
"=",
"1",
",",
"filt",
"=",
"True",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_trim",
"(",
"start",
",",
"end",
",",
"filt",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_nremoved
|
Report how many data are removed by the active filters.
|
latools/latools.py
|
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo
|
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo
|
[
"Report",
"how",
"many",
"data",
"are",
"removed",
"by",
"the",
"active",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2571-L2591
|
[
"def",
"filter_nremoved",
"(",
"self",
",",
"filt",
"=",
"True",
",",
"quiet",
"=",
"False",
")",
":",
"rminfo",
"=",
"{",
"}",
"for",
"n",
"in",
"self",
".",
"subsets",
"[",
"'All_Samples'",
"]",
":",
"s",
"=",
"self",
".",
"data",
"[",
"n",
"]",
"rminfo",
"[",
"n",
"]",
"=",
"s",
".",
"filt_nremoved",
"(",
"filt",
")",
"if",
"not",
"quiet",
":",
"maxL",
"=",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"rminfo",
".",
"keys",
"(",
")",
"]",
")",
"print",
"(",
"'{string:{number}s}'",
".",
"format",
"(",
"string",
"=",
"'Sample '",
",",
"number",
"=",
"maxL",
"+",
"3",
")",
"+",
"'{total:4s}'",
".",
"format",
"(",
"total",
"=",
"'tot'",
")",
"+",
"'{removed:4s}'",
".",
"format",
"(",
"removed",
"=",
"'flt'",
")",
"+",
"'{percent:4s}'",
".",
"format",
"(",
"percent",
"=",
"'%rm'",
")",
")",
"for",
"k",
",",
"(",
"ntot",
",",
"nfilt",
",",
"pcrm",
")",
"in",
"rminfo",
".",
"items",
"(",
")",
":",
"print",
"(",
"'{string:{number}s}'",
".",
"format",
"(",
"string",
"=",
"k",
",",
"number",
"=",
"maxL",
"+",
"3",
")",
"+",
"'{total:4.0f}'",
".",
"format",
"(",
"total",
"=",
"ntot",
")",
"+",
"'{removed:4.0f}'",
".",
"format",
"(",
"removed",
"=",
"nfilt",
")",
"+",
"'{percent:4.0f}'",
".",
"format",
"(",
"percent",
"=",
"pcrm",
")",
")",
"return",
"rminfo"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.optimise_signal
|
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
|
latools/latools.py
|
def optimise_signal(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0, filt=True,
weights=None, mode='minimise',
samples=None, subset=None):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
errs = []
with self.pbar.set(total=len(samples), desc='Optimising Data selection') as prog:
for s in samples:
e = self.data[s].signal_optimiser(analytes=analytes, min_points=min_points,
threshold_mode=threshold_mode, threshold_mult=threshold_mult,
x_bias=x_bias, weights=weights, filt=filt, mode=mode)
if e != '':
errs.append(e)
prog.update()
if len(errs) > 0:
print('\nA Few Problems:\n' + '\n'.join(errs) + '\n\n *** Check Optimisation Plots ***')
|
def optimise_signal(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0, filt=True,
weights=None, mode='minimise',
samples=None, subset=None):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
errs = []
with self.pbar.set(total=len(samples), desc='Optimising Data selection') as prog:
for s in samples:
e = self.data[s].signal_optimiser(analytes=analytes, min_points=min_points,
threshold_mode=threshold_mode, threshold_mult=threshold_mult,
x_bias=x_bias, weights=weights, filt=filt, mode=mode)
if e != '':
errs.append(e)
prog.update()
if len(errs) > 0:
print('\nA Few Problems:\n' + '\n'.join(errs) + '\n\n *** Check Optimisation Plots ***')
|
[
"Optimise",
"data",
"selection",
"based",
"on",
"specified",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2594-L2668
|
[
"def",
"optimise_signal",
"(",
"self",
",",
"analytes",
",",
"min_points",
"=",
"5",
",",
"threshold_mode",
"=",
"'kde_first_max'",
",",
"threshold_mult",
"=",
"1.",
",",
"x_bias",
"=",
"0",
",",
"filt",
"=",
"True",
",",
"weights",
"=",
"None",
",",
"mode",
"=",
"'minimise'",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"analytes",
")",
"errs",
"=",
"[",
"]",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Optimising Data selection'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"e",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"signal_optimiser",
"(",
"analytes",
"=",
"analytes",
",",
"min_points",
"=",
"min_points",
",",
"threshold_mode",
"=",
"threshold_mode",
",",
"threshold_mult",
"=",
"threshold_mult",
",",
"x_bias",
"=",
"x_bias",
",",
"weights",
"=",
"weights",
",",
"filt",
"=",
"filt",
",",
"mode",
"=",
"mode",
")",
"if",
"e",
"!=",
"''",
":",
"errs",
".",
"append",
"(",
"e",
")",
"prog",
".",
"update",
"(",
")",
"if",
"len",
"(",
"errs",
")",
">",
"0",
":",
"print",
"(",
"'\\nA Few Problems:\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"errs",
")",
"+",
"'\\n\\n *** Check Optimisation Plots ***'",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.optimisation_plots
|
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
|
latools/latools.py
|
def optimisation_plots(self, overlay_alpha=0.5, samples=None, subset=None, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
outdir=self.report_dir + '/optimisation_plots/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
figs = self.data[s].optimisation_plot(overlay_alpha, **kwargs)
n = 1
for f, _ in figs:
if f is not None:
f.savefig(outdir + '/' + s + '_optim_{:.0f}.pdf'.format(n))
plt.close(f)
n += 1
prog.update()
return
|
def optimisation_plots(self, overlay_alpha=0.5, samples=None, subset=None, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
outdir=self.report_dir + '/optimisation_plots/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
figs = self.data[s].optimisation_plot(overlay_alpha, **kwargs)
n = 1
for f, _ in figs:
if f is not None:
f.savefig(outdir + '/' + s + '_optim_{:.0f}.pdf'.format(n))
plt.close(f)
n += 1
prog.update()
return
|
[
"Plot",
"the",
"result",
"of",
"signal_optimise",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2671-L2706
|
[
"def",
"optimisation_plots",
"(",
"self",
",",
"overlay_alpha",
"=",
"0.5",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"outdir",
"=",
"self",
".",
"report_dir",
"+",
"'/optimisation_plots/'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Drawing Plots'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"figs",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"optimisation_plot",
"(",
"overlay_alpha",
",",
"*",
"*",
"kwargs",
")",
"n",
"=",
"1",
"for",
"f",
",",
"_",
"in",
"figs",
":",
"if",
"f",
"is",
"not",
"None",
":",
"f",
".",
"savefig",
"(",
"outdir",
"+",
"'/'",
"+",
"s",
"+",
"'_optim_{:.0f}.pdf'",
".",
"format",
"(",
"n",
")",
")",
"plt",
".",
"close",
"(",
"f",
")",
"n",
"+=",
"1",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.set_focus
|
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
|
latools/latools.py
|
def set_focus(self, focus_stage=None, samples=None, subset=None):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
if subset is None:
subset = 'All_Analyses'
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
else:
self.focus_stage = focus_stage
for s in samples:
self.data[s].setfocus(focus_stage)
|
def set_focus(self, focus_stage=None, samples=None, subset=None):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
if subset is None:
subset = 'All_Analyses'
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
else:
self.focus_stage = focus_stage
for s in samples:
self.data[s].setfocus(focus_stage)
|
[
"Set",
"the",
"focus",
"attribute",
"of",
"the",
"data",
"file",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2716-L2763
|
[
"def",
"set_focus",
"(",
"self",
",",
"focus_stage",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"if",
"subset",
"is",
"None",
":",
"subset",
"=",
"'All_Analyses'",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"else",
":",
"self",
".",
"focus_stage",
"=",
"focus_stage",
"for",
"s",
"in",
"samples",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"setfocus",
"(",
"focus_stage",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.get_focus
|
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
|
latools/latools.py
|
def get_focus(self, filt=False, samples=None, subset=None, nominal=False):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in self.analytes})
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
for a in self.analytes:
tmp = s.focus[a].copy()
tmp[~ind] = np.nan
focus[a].append(tmp)
if nominal:
self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})
else:
self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})
return
|
def get_focus(self, filt=False, samples=None, subset=None, nominal=False):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in self.analytes})
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
for a in self.analytes:
tmp = s.focus[a].copy()
tmp[~ind] = np.nan
focus[a].append(tmp)
if nominal:
self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})
else:
self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})
return
|
[
"Collect",
"all",
"data",
"from",
"all",
"samples",
"into",
"a",
"single",
"array",
".",
"Data",
"from",
"standards",
"is",
"not",
"collected",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2766-L2810
|
[
"def",
"get_focus",
"(",
"self",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"nominal",
"=",
"False",
")",
":",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"# t = 0",
"focus",
"=",
"{",
"'uTime'",
":",
"[",
"]",
"}",
"focus",
".",
"update",
"(",
"{",
"a",
":",
"[",
"]",
"for",
"a",
"in",
"self",
".",
"analytes",
"}",
")",
"for",
"sa",
"in",
"samples",
":",
"s",
"=",
"self",
".",
"data",
"[",
"sa",
"]",
"focus",
"[",
"'uTime'",
"]",
".",
"append",
"(",
"s",
".",
"uTime",
")",
"ind",
"=",
"s",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"for",
"a",
"in",
"self",
".",
"analytes",
":",
"tmp",
"=",
"s",
".",
"focus",
"[",
"a",
"]",
".",
"copy",
"(",
")",
"tmp",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"focus",
"[",
"a",
"]",
".",
"append",
"(",
"tmp",
")",
"if",
"nominal",
":",
"self",
".",
"focus",
".",
"update",
"(",
"{",
"k",
":",
"nominal_values",
"(",
"np",
".",
"concatenate",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
",",
"in",
"focus",
".",
"items",
"(",
")",
"}",
")",
"else",
":",
"self",
".",
"focus",
".",
"update",
"(",
"{",
"k",
":",
"np",
".",
"concatenate",
"(",
"v",
")",
"for",
"k",
",",
"v",
",",
"in",
"focus",
".",
"items",
"(",
")",
"}",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.get_gradients
|
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
|
latools/latools.py
|
def get_gradients(self, analytes=None, win=15, filt=False, samples=None, subset=None, recalc=True):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
# check if gradients already calculated
if all([self.data[s].grads_calced for s in samples]) and hasattr(self, 'gradients'):
if not recalc:
print("Using existing gradients. Set recalc=True to re-calculate.")
return
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in analytes})
with self.pbar.set(total=len(samples), desc='Calculating Gradients') as prog:
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
grads = calc_grads(s.uTime, s.focus, keys=analytes, win=win)
for a in analytes:
tmp = grads[a]
tmp[~ind] = np.nan
focus[a].append(tmp)
s.grads = tmp
s.grads_calced = True
prog.update()
self.gradients.update({k: np.concatenate(v) for k, v, in focus.items()})
return
|
def get_gradients(self, analytes=None, win=15, filt=False, samples=None, subset=None, recalc=True):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
# check if gradients already calculated
if all([self.data[s].grads_calced for s in samples]) and hasattr(self, 'gradients'):
if not recalc:
print("Using existing gradients. Set recalc=True to re-calculate.")
return
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in analytes})
with self.pbar.set(total=len(samples), desc='Calculating Gradients') as prog:
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
grads = calc_grads(s.uTime, s.focus, keys=analytes, win=win)
for a in analytes:
tmp = grads[a]
tmp[~ind] = np.nan
focus[a].append(tmp)
s.grads = tmp
s.grads_calced = True
prog.update()
self.gradients.update({k: np.concatenate(v) for k, v, in focus.items()})
return
|
[
"Collect",
"all",
"data",
"from",
"all",
"samples",
"into",
"a",
"single",
"array",
".",
"Data",
"from",
"standards",
"is",
"not",
"collected",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2813-L2870
|
[
"def",
"get_gradients",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"15",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"recalc",
"=",
"True",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"# check if gradients already calculated",
"if",
"all",
"(",
"[",
"self",
".",
"data",
"[",
"s",
"]",
".",
"grads_calced",
"for",
"s",
"in",
"samples",
"]",
")",
"and",
"hasattr",
"(",
"self",
",",
"'gradients'",
")",
":",
"if",
"not",
"recalc",
":",
"print",
"(",
"\"Using existing gradients. Set recalc=True to re-calculate.\"",
")",
"return",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'gradients'",
")",
":",
"self",
".",
"gradients",
"=",
"Bunch",
"(",
")",
"# t = 0",
"focus",
"=",
"{",
"'uTime'",
":",
"[",
"]",
"}",
"focus",
".",
"update",
"(",
"{",
"a",
":",
"[",
"]",
"for",
"a",
"in",
"analytes",
"}",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Calculating Gradients'",
")",
"as",
"prog",
":",
"for",
"sa",
"in",
"samples",
":",
"s",
"=",
"self",
".",
"data",
"[",
"sa",
"]",
"focus",
"[",
"'uTime'",
"]",
".",
"append",
"(",
"s",
".",
"uTime",
")",
"ind",
"=",
"s",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"grads",
"=",
"calc_grads",
"(",
"s",
".",
"uTime",
",",
"s",
".",
"focus",
",",
"keys",
"=",
"analytes",
",",
"win",
"=",
"win",
")",
"for",
"a",
"in",
"analytes",
":",
"tmp",
"=",
"grads",
"[",
"a",
"]",
"tmp",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"focus",
"[",
"a",
"]",
".",
"append",
"(",
"tmp",
")",
"s",
".",
"grads",
"=",
"tmp",
"s",
".",
"grads_calced",
"=",
"True",
"prog",
".",
"update",
"(",
")",
"self",
".",
"gradients",
".",
"update",
"(",
"{",
"k",
":",
"np",
".",
"concatenate",
"(",
"v",
")",
"for",
"k",
",",
"v",
",",
"in",
"focus",
".",
"items",
"(",
")",
"}",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.gradient_histogram
|
Plot a histogram of the gradients in all samples.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
bins : None or array-like
The bins to use in the histogram
samples : str or list
which samples to get
subset : str or int
which subset to get
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
fig, ax
|
latools/latools.py
|
def gradient_histogram(self, analytes=None, win=15, filt=False, bins=None, samples=None, subset=None, recalc=True, ncol=4):
"""
Plot a histogram of the gradients in all samples.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
bins : None or array-like
The bins to use in the histogram
samples : str or list
which samples to get
subset : str or int
which subset to get
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
fig, ax
"""
if analytes is None:
analytes = [a for a in self.analytes if self.internal_standard not in a]
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
ncol = int(ncol)
n = len(analytes)
nrow = plot.calc_nrow(n, ncol)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
fig, axs = plt.subplots(nrow, ncol, figsize=[3. * ncol, 2.5 * nrow])
if not isinstance(axs, np.ndarray):
axs = [axs]
i = 0
for a, ax in zip(analytes, axs.flatten()):
d = nominal_values(self.gradients[a])
d = d[~np.isnan(d)]
m, u = unitpicker(d, focus_stage=self.focus_stage, denominator=self.internal_standard)
if bins is None:
ibins = np.linspace(*np.percentile(d * m, [1, 99]), 50)
else:
ibins = bins
ax.hist(d * m, bins=ibins, color=self.cmaps[a])
ax.axvline(0, ls='dashed', lw=1, c=(0,0,0,0.7))
ax.set_title(a, loc='left')
if ax.is_first_col():
ax.set_ylabel('N')
ax.set_xlabel(u + '/s')
i += 1
if i < ncol * nrow:
for ax in axs.flatten()[i:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs
|
def gradient_histogram(self, analytes=None, win=15, filt=False, bins=None, samples=None, subset=None, recalc=True, ncol=4):
"""
Plot a histogram of the gradients in all samples.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
bins : None or array-like
The bins to use in the histogram
samples : str or list
which samples to get
subset : str or int
which subset to get
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
fig, ax
"""
if analytes is None:
analytes = [a for a in self.analytes if self.internal_standard not in a]
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
ncol = int(ncol)
n = len(analytes)
nrow = plot.calc_nrow(n, ncol)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
fig, axs = plt.subplots(nrow, ncol, figsize=[3. * ncol, 2.5 * nrow])
if not isinstance(axs, np.ndarray):
axs = [axs]
i = 0
for a, ax in zip(analytes, axs.flatten()):
d = nominal_values(self.gradients[a])
d = d[~np.isnan(d)]
m, u = unitpicker(d, focus_stage=self.focus_stage, denominator=self.internal_standard)
if bins is None:
ibins = np.linspace(*np.percentile(d * m, [1, 99]), 50)
else:
ibins = bins
ax.hist(d * m, bins=ibins, color=self.cmaps[a])
ax.axvline(0, ls='dashed', lw=1, c=(0,0,0,0.7))
ax.set_title(a, loc='left')
if ax.is_first_col():
ax.set_ylabel('N')
ax.set_xlabel(u + '/s')
i += 1
if i < ncol * nrow:
for ax in axs.flatten()[i:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs
|
[
"Plot",
"a",
"histogram",
"of",
"the",
"gradients",
"in",
"all",
"samples",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2872-L2944
|
[
"def",
"gradient_histogram",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"15",
",",
"filt",
"=",
"False",
",",
"bins",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"recalc",
"=",
"True",
",",
"ncol",
"=",
"4",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'gradients'",
")",
":",
"self",
".",
"gradients",
"=",
"Bunch",
"(",
")",
"ncol",
"=",
"int",
"(",
"ncol",
")",
"n",
"=",
"len",
"(",
"analytes",
")",
"nrow",
"=",
"plot",
".",
"calc_nrow",
"(",
"n",
",",
"ncol",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"get_gradients",
"(",
"analytes",
"=",
"analytes",
",",
"win",
"=",
"win",
",",
"filt",
"=",
"filt",
",",
"subset",
"=",
"subset",
",",
"recalc",
"=",
"recalc",
")",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"nrow",
",",
"ncol",
",",
"figsize",
"=",
"[",
"3.",
"*",
"ncol",
",",
"2.5",
"*",
"nrow",
"]",
")",
"if",
"not",
"isinstance",
"(",
"axs",
",",
"np",
".",
"ndarray",
")",
":",
"axs",
"=",
"[",
"axs",
"]",
"i",
"=",
"0",
"for",
"a",
",",
"ax",
"in",
"zip",
"(",
"analytes",
",",
"axs",
".",
"flatten",
"(",
")",
")",
":",
"d",
"=",
"nominal_values",
"(",
"self",
".",
"gradients",
"[",
"a",
"]",
")",
"d",
"=",
"d",
"[",
"~",
"np",
".",
"isnan",
"(",
"d",
")",
"]",
"m",
",",
"u",
"=",
"unitpicker",
"(",
"d",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
")",
"if",
"bins",
"is",
"None",
":",
"ibins",
"=",
"np",
".",
"linspace",
"(",
"*",
"np",
".",
"percentile",
"(",
"d",
"*",
"m",
",",
"[",
"1",
",",
"99",
"]",
")",
",",
"50",
")",
"else",
":",
"ibins",
"=",
"bins",
"ax",
".",
"hist",
"(",
"d",
"*",
"m",
",",
"bins",
"=",
"ibins",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"a",
"]",
")",
"ax",
".",
"axvline",
"(",
"0",
",",
"ls",
"=",
"'dashed'",
",",
"lw",
"=",
"1",
",",
"c",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.7",
")",
")",
"ax",
".",
"set_title",
"(",
"a",
",",
"loc",
"=",
"'left'",
")",
"if",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"set_ylabel",
"(",
"'N'",
")",
"ax",
".",
"set_xlabel",
"(",
"u",
"+",
"'/s'",
")",
"i",
"+=",
"1",
"if",
"i",
"<",
"ncol",
"*",
"nrow",
":",
"for",
"ax",
"in",
"axs",
".",
"flatten",
"(",
")",
"[",
"i",
":",
"]",
":",
"ax",
".",
"set_visible",
"(",
"False",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig",
",",
"axs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.crossplot
|
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
Returns
-------
(fig, axes)
|
latools/latools.py
|
def crossplot(self, analytes=None, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', **kwargs):
"""
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
# sort analytes
try:
analytes = sorted(analytes, key=lambda x: float(re.findall('[0-9.-]+', x)[0]))
except IndexError:
analytes = sorted(analytes)
self.get_focus(filt=filt, samples=samples, subset=subset)
fig, axes = plot.crossplot(dat=self.focus, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps,
denominator=self.internal_standard, mode=mode)
if save or isinstance(save, str):
if isinstance(save, str):
fig.savefig(self.report_dir + '/' + save, dpi=200)
else:
fig.savefig(self.report_dir + '/crossplot.png', dpi=200)
return fig, axes
|
def crossplot(self, analytes=None, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', **kwargs):
"""
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
# sort analytes
try:
analytes = sorted(analytes, key=lambda x: float(re.findall('[0-9.-]+', x)[0]))
except IndexError:
analytes = sorted(analytes)
self.get_focus(filt=filt, samples=samples, subset=subset)
fig, axes = plot.crossplot(dat=self.focus, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps,
denominator=self.internal_standard, mode=mode)
if save or isinstance(save, str):
if isinstance(save, str):
fig.savefig(self.report_dir + '/' + save, dpi=200)
else:
fig.savefig(self.report_dir + '/crossplot.png', dpi=200)
return fig, axes
|
[
"Plot",
"analytes",
"against",
"each",
"other",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2948-L3006
|
[
"def",
"crossplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"lognorm",
"=",
"True",
",",
"bins",
"=",
"25",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
",",
"save",
"=",
"False",
",",
"colourful",
"=",
"True",
",",
"mode",
"=",
"'hist2d'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"self",
".",
"focus_stage",
"in",
"[",
"'ratio'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"# sort analytes",
"try",
":",
"analytes",
"=",
"sorted",
"(",
"analytes",
",",
"key",
"=",
"lambda",
"x",
":",
"float",
"(",
"re",
".",
"findall",
"(",
"'[0-9.-]+'",
",",
"x",
")",
"[",
"0",
"]",
")",
")",
"except",
"IndexError",
":",
"analytes",
"=",
"sorted",
"(",
"analytes",
")",
"self",
".",
"get_focus",
"(",
"filt",
"=",
"filt",
",",
"samples",
"=",
"samples",
",",
"subset",
"=",
"subset",
")",
"fig",
",",
"axes",
"=",
"plot",
".",
"crossplot",
"(",
"dat",
"=",
"self",
".",
"focus",
",",
"keys",
"=",
"analytes",
",",
"lognorm",
"=",
"lognorm",
",",
"bins",
"=",
"bins",
",",
"figsize",
"=",
"figsize",
",",
"colourful",
"=",
"colourful",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
",",
"cmap",
"=",
"self",
".",
"cmaps",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
",",
"mode",
"=",
"mode",
")",
"if",
"save",
"or",
"isinstance",
"(",
"save",
",",
"str",
")",
":",
"if",
"isinstance",
"(",
"save",
",",
"str",
")",
":",
"fig",
".",
"savefig",
"(",
"self",
".",
"report_dir",
"+",
"'/'",
"+",
"save",
",",
"dpi",
"=",
"200",
")",
"else",
":",
"fig",
".",
"savefig",
"(",
"self",
".",
"report_dir",
"+",
"'/crossplot.png'",
",",
"dpi",
"=",
"200",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.gradient_crossplot
|
Plot analyte gradients against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
(fig, axes)
|
latools/latools.py
|
def gradient_crossplot(self, analytes=None, win=15, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', recalc=True, **kwargs):
"""
Plot analyte gradients against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
# sort analytes
try:
analytes = sorted(analytes, key=lambda x: float(re.findall('[0-9.-]+', x)[0]))
except IndexError:
analytes = sorted(analytes)
samples = self._get_samples(subset)
# calculate gradients
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
# self.get_focus(filt=filt, samples=samples, subset=subset)
# grads = calc_grads(self.focus.uTime, self.focus, analytes, win)
fig, axes = plot.crossplot(dat=self.gradients, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps,
denominator=self.internal_standard, mode=mode)
if save:
fig.savefig(self.report_dir + '/g_crossplot.png', dpi=200)
return fig, axes
|
def gradient_crossplot(self, analytes=None, win=15, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', recalc=True, **kwargs):
"""
Plot analyte gradients against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
# sort analytes
try:
analytes = sorted(analytes, key=lambda x: float(re.findall('[0-9.-]+', x)[0]))
except IndexError:
analytes = sorted(analytes)
samples = self._get_samples(subset)
# calculate gradients
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
# self.get_focus(filt=filt, samples=samples, subset=subset)
# grads = calc_grads(self.focus.uTime, self.focus, analytes, win)
fig, axes = plot.crossplot(dat=self.gradients, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps,
denominator=self.internal_standard, mode=mode)
if save:
fig.savefig(self.report_dir + '/g_crossplot.png', dpi=200)
return fig, axes
|
[
"Plot",
"analyte",
"gradients",
"against",
"each",
"other",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3009-L3073
|
[
"def",
"gradient_crossplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"15",
",",
"lognorm",
"=",
"True",
",",
"bins",
"=",
"25",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"figsize",
"=",
"(",
"12",
",",
"12",
")",
",",
"save",
"=",
"False",
",",
"colourful",
"=",
"True",
",",
"mode",
"=",
"'hist2d'",
",",
"recalc",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"self",
".",
"focus_stage",
"in",
"[",
"'ratio'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"# sort analytes",
"try",
":",
"analytes",
"=",
"sorted",
"(",
"analytes",
",",
"key",
"=",
"lambda",
"x",
":",
"float",
"(",
"re",
".",
"findall",
"(",
"'[0-9.-]+'",
",",
"x",
")",
"[",
"0",
"]",
")",
")",
"except",
"IndexError",
":",
"analytes",
"=",
"sorted",
"(",
"analytes",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"# calculate gradients",
"self",
".",
"get_gradients",
"(",
"analytes",
"=",
"analytes",
",",
"win",
"=",
"win",
",",
"filt",
"=",
"filt",
",",
"subset",
"=",
"subset",
",",
"recalc",
"=",
"recalc",
")",
"# self.get_focus(filt=filt, samples=samples, subset=subset)",
"# grads = calc_grads(self.focus.uTime, self.focus, analytes, win)",
"fig",
",",
"axes",
"=",
"plot",
".",
"crossplot",
"(",
"dat",
"=",
"self",
".",
"gradients",
",",
"keys",
"=",
"analytes",
",",
"lognorm",
"=",
"lognorm",
",",
"bins",
"=",
"bins",
",",
"figsize",
"=",
"figsize",
",",
"colourful",
"=",
"colourful",
",",
"focus_stage",
"=",
"self",
".",
"focus_stage",
",",
"cmap",
"=",
"self",
".",
"cmaps",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
",",
"mode",
"=",
"mode",
")",
"if",
"save",
":",
"fig",
".",
"savefig",
"(",
"self",
".",
"report_dir",
"+",
"'/g_crossplot.png'",
",",
"dpi",
"=",
"200",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.histograms
|
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
|
latools/latools.py
|
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes
|
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes
|
[
"Plot",
"histograms",
"of",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3075-L3112
|
[
"def",
"histograms",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"bins",
"=",
"25",
",",
"logy",
"=",
"False",
",",
"filt",
"=",
"False",
",",
"colourful",
"=",
"True",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"self",
".",
"focus_stage",
"in",
"[",
"'ratio'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"a",
"]",
"if",
"colourful",
":",
"cmap",
"=",
"self",
".",
"cmaps",
"else",
":",
"cmap",
"=",
"None",
"self",
".",
"get_focus",
"(",
"filt",
"=",
"filt",
")",
"fig",
",",
"axes",
"=",
"plot",
".",
"histograms",
"(",
"self",
".",
"focus",
",",
"keys",
"=",
"analytes",
",",
"bins",
"=",
"bins",
",",
"logy",
"=",
"logy",
",",
"cmap",
"=",
"cmap",
")",
"return",
"fig",
",",
"axes"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_effect
|
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
|
latools/latools.py
|
def filter_effect(self, analytes=None, stats=['mean', 'std'], filt=True):
"""
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
"""
if analytes is None:
analytes = self.analytes
if isinstance(analytes, str):
analytes = [analytes]
# calculate filtered and unfiltered stats
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=False)
suf = self.stats.copy()
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=filt)
sf = self.stats.copy()
# create dataframe for results
cols = []
for s in self.stats_calced:
cols += ['unfiltered_{:}'.format(s), 'filtered_{:}'.format(s)]
comp = pd.DataFrame(index=self.samples,
columns=pd.MultiIndex.from_arrays([cols, [None] * len(cols)]))
# collate stats
for k, v in suf.items():
vf = sf[k]
for i, a in enumerate(v['analytes']):
for s in self.stats_calced:
comp.loc[k, ('unfiltered_{:}'.format(s), a)] = v[s][i,0]
comp.loc[k, ('filtered_{:}'.format(s), a)] = vf[s][i,0]
comp.dropna(0, 'all', inplace=True)
comp.dropna(1, 'all', inplace=True)
comp.sort_index(1, inplace=True)
# calculate filtered/unfiltered ratios
rats = []
for s in self.stats_calced:
rat = comp.loc[:, 'filtered_{:}'.format(s)] / comp.loc[:, 'unfiltered_{:}'.format(s)]
rat.columns = pd.MultiIndex.from_product([['{:}_ratio'.format(s)], rat.columns])
rats.append(rat)
# join it all up
comp = comp.join(pd.concat(rats, 1))
comp.sort_index(1, inplace=True)
return comp.loc[:, (pd.IndexSlice[:], pd.IndexSlice[analytes])]
|
def filter_effect(self, analytes=None, stats=['mean', 'std'], filt=True):
"""
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
"""
if analytes is None:
analytes = self.analytes
if isinstance(analytes, str):
analytes = [analytes]
# calculate filtered and unfiltered stats
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=False)
suf = self.stats.copy()
self.sample_stats(['La139', 'Ti49'], stats=stats, filt=filt)
sf = self.stats.copy()
# create dataframe for results
cols = []
for s in self.stats_calced:
cols += ['unfiltered_{:}'.format(s), 'filtered_{:}'.format(s)]
comp = pd.DataFrame(index=self.samples,
columns=pd.MultiIndex.from_arrays([cols, [None] * len(cols)]))
# collate stats
for k, v in suf.items():
vf = sf[k]
for i, a in enumerate(v['analytes']):
for s in self.stats_calced:
comp.loc[k, ('unfiltered_{:}'.format(s), a)] = v[s][i,0]
comp.loc[k, ('filtered_{:}'.format(s), a)] = vf[s][i,0]
comp.dropna(0, 'all', inplace=True)
comp.dropna(1, 'all', inplace=True)
comp.sort_index(1, inplace=True)
# calculate filtered/unfiltered ratios
rats = []
for s in self.stats_calced:
rat = comp.loc[:, 'filtered_{:}'.format(s)] / comp.loc[:, 'unfiltered_{:}'.format(s)]
rat.columns = pd.MultiIndex.from_product([['{:}_ratio'.format(s)], rat.columns])
rats.append(rat)
# join it all up
comp = comp.join(pd.concat(rats, 1))
comp.sort_index(1, inplace=True)
return comp.loc[:, (pd.IndexSlice[:], pd.IndexSlice[analytes])]
|
[
"Quantify",
"the",
"effects",
"of",
"the",
"active",
"filters",
".",
"Parameters",
"----------",
"analytes",
":",
"str",
"or",
"list",
"Which",
"analytes",
"to",
"consider",
".",
"stats",
":",
"list",
"Which",
"statistics",
"to",
"calculate",
".",
"file",
":",
"valid",
"filter",
"string",
"or",
"bool",
"Which",
"filter",
"to",
"consider",
".",
"If",
"True",
"applies",
"all",
"active",
"filters",
".",
"Returns",
"-------",
"pandas",
".",
"DataFrame",
"Contains",
"statistics",
"calculated",
"for",
"filtered",
"and",
"unfiltered",
"data",
"and",
"the",
"filtered",
"/",
"unfiltered",
"ratio",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3114-L3175
|
[
"def",
"filter_effect",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"stats",
"=",
"[",
"'mean'",
",",
"'std'",
"]",
",",
"filt",
"=",
"True",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"# calculate filtered and unfiltered stats",
"self",
".",
"sample_stats",
"(",
"[",
"'La139'",
",",
"'Ti49'",
"]",
",",
"stats",
"=",
"stats",
",",
"filt",
"=",
"False",
")",
"suf",
"=",
"self",
".",
"stats",
".",
"copy",
"(",
")",
"self",
".",
"sample_stats",
"(",
"[",
"'La139'",
",",
"'Ti49'",
"]",
",",
"stats",
"=",
"stats",
",",
"filt",
"=",
"filt",
")",
"sf",
"=",
"self",
".",
"stats",
".",
"copy",
"(",
")",
"# create dataframe for results",
"cols",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"stats_calced",
":",
"cols",
"+=",
"[",
"'unfiltered_{:}'",
".",
"format",
"(",
"s",
")",
",",
"'filtered_{:}'",
".",
"format",
"(",
"s",
")",
"]",
"comp",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"self",
".",
"samples",
",",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_arrays",
"(",
"[",
"cols",
",",
"[",
"None",
"]",
"*",
"len",
"(",
"cols",
")",
"]",
")",
")",
"# collate stats",
"for",
"k",
",",
"v",
"in",
"suf",
".",
"items",
"(",
")",
":",
"vf",
"=",
"sf",
"[",
"k",
"]",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"v",
"[",
"'analytes'",
"]",
")",
":",
"for",
"s",
"in",
"self",
".",
"stats_calced",
":",
"comp",
".",
"loc",
"[",
"k",
",",
"(",
"'unfiltered_{:}'",
".",
"format",
"(",
"s",
")",
",",
"a",
")",
"]",
"=",
"v",
"[",
"s",
"]",
"[",
"i",
",",
"0",
"]",
"comp",
".",
"loc",
"[",
"k",
",",
"(",
"'filtered_{:}'",
".",
"format",
"(",
"s",
")",
",",
"a",
")",
"]",
"=",
"vf",
"[",
"s",
"]",
"[",
"i",
",",
"0",
"]",
"comp",
".",
"dropna",
"(",
"0",
",",
"'all'",
",",
"inplace",
"=",
"True",
")",
"comp",
".",
"dropna",
"(",
"1",
",",
"'all'",
",",
"inplace",
"=",
"True",
")",
"comp",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"# calculate filtered/unfiltered ratios",
"rats",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"stats_calced",
":",
"rat",
"=",
"comp",
".",
"loc",
"[",
":",
",",
"'filtered_{:}'",
".",
"format",
"(",
"s",
")",
"]",
"/",
"comp",
".",
"loc",
"[",
":",
",",
"'unfiltered_{:}'",
".",
"format",
"(",
"s",
")",
"]",
"rat",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"[",
"[",
"'{:}_ratio'",
".",
"format",
"(",
"s",
")",
"]",
",",
"rat",
".",
"columns",
"]",
")",
"rats",
".",
"append",
"(",
"rat",
")",
"# join it all up",
"comp",
"=",
"comp",
".",
"join",
"(",
"pd",
".",
"concat",
"(",
"rats",
",",
"1",
")",
")",
"comp",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"return",
"comp",
".",
"loc",
"[",
":",
",",
"(",
"pd",
".",
"IndexSlice",
"[",
":",
"]",
",",
"pd",
".",
"IndexSlice",
"[",
"analytes",
"]",
")",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.trace_plots
|
Plot analytes as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
|
latools/latools.py
|
def trace_plots(self, analytes=None, samples=None, ranges=False,
focus=None, outdir=None, filt=None, scale='log',
figsize=[10, 4], stats=False, stat='nanmean',
err='nanstd', subset='All_Analyses'):
"""
Plot analytes as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus
if not os.path.isdir(outdir):
os.mkdir(outdir)
# if samples is not None:
# subset = self.make_subset(samples)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].tplot(analytes=analytes, figsize=figsize,
scale=scale, filt=filt,
ranges=ranges, stats=stats,
stat=stat, err=err, focus_stage=focus)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(outdir + '/' + s + '_traces.pdf')
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
|
def trace_plots(self, analytes=None, samples=None, ranges=False,
focus=None, outdir=None, filt=None, scale='log',
figsize=[10, 4], stats=False, stat='nanmean',
err='nanstd', subset='All_Analyses'):
"""
Plot analytes as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus
if not os.path.isdir(outdir):
os.mkdir(outdir)
# if samples is not None:
# subset = self.make_subset(samples)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].tplot(analytes=analytes, figsize=figsize,
scale=scale, filt=filt,
ranges=ranges, stats=stats,
stat=stat, err=err, focus_stage=focus)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(outdir + '/' + s + '_traces.pdf')
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
|
[
"Plot",
"analytes",
"as",
"a",
"function",
"of",
"time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3290-L3367
|
[
"def",
"trace_plots",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"ranges",
"=",
"False",
",",
"focus",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"filt",
"=",
"None",
",",
"scale",
"=",
"'log'",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"stats",
"=",
"False",
",",
"stat",
"=",
"'nanmean'",
",",
"err",
"=",
"'nanstd'",
",",
"subset",
"=",
"'All_Analyses'",
")",
":",
"if",
"focus",
"is",
"None",
":",
"focus",
"=",
"self",
".",
"focus_stage",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"self",
".",
"report_dir",
"+",
"'/'",
"+",
"focus",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"# if samples is not None:",
"# subset = self.make_subset(samples)",
"if",
"subset",
"is",
"not",
"None",
":",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"elif",
"samples",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"subsets",
"[",
"'All_Analyses'",
"]",
"elif",
"isinstance",
"(",
"samples",
",",
"str",
")",
":",
"samples",
"=",
"[",
"samples",
"]",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Drawing Plots'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"f",
",",
"a",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"tplot",
"(",
"analytes",
"=",
"analytes",
",",
"figsize",
"=",
"figsize",
",",
"scale",
"=",
"scale",
",",
"filt",
"=",
"filt",
",",
"ranges",
"=",
"ranges",
",",
"stats",
"=",
"stats",
",",
"stat",
"=",
"stat",
",",
"err",
"=",
"err",
",",
"focus_stage",
"=",
"focus",
")",
"# ax = fig.axes[0]",
"# for l, u in s.sigrng:",
"# ax.axvspan(l, u, color='r', alpha=0.1)",
"# for l, u in s.bkgrng:",
"# ax.axvspan(l, u, color='k', alpha=0.1)",
"f",
".",
"savefig",
"(",
"outdir",
"+",
"'/'",
"+",
"s",
"+",
"'_traces.pdf'",
")",
"# TODO: on older(?) computers raises",
"# 'OSError: [Errno 24] Too many open files'",
"plt",
".",
"close",
"(",
"f",
")",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.gradient_plots
|
Plot analyte gradients as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
|
latools/latools.py
|
def gradient_plots(self, analytes=None, win=15, samples=None, ranges=False,
focus=None, outdir=None,
figsize=[10, 4], subset='All_Analyses'):
"""
Plot analyte gradients as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus + '_gradient'
if not os.path.isdir(outdir):
os.mkdir(outdir)
# if samples is not None:
# subset = self.make_subset(samples)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].gplot(analytes=analytes, win=win, figsize=figsize,
ranges=ranges, focus_stage=focus)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(outdir + '/' + s + '_gradients.pdf')
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
|
def gradient_plots(self, analytes=None, win=15, samples=None, ranges=False,
focus=None, outdir=None,
figsize=[10, 4], subset='All_Analyses'):
"""
Plot analyte gradients as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus + '_gradient'
if not os.path.isdir(outdir):
os.mkdir(outdir)
# if samples is not None:
# subset = self.make_subset(samples)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].gplot(analytes=analytes, win=win, figsize=figsize,
ranges=ranges, focus_stage=focus)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(outdir + '/' + s + '_gradients.pdf')
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
|
[
"Plot",
"analyte",
"gradients",
"as",
"a",
"function",
"of",
"time",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3371-L3445
|
[
"def",
"gradient_plots",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"win",
"=",
"15",
",",
"samples",
"=",
"None",
",",
"ranges",
"=",
"False",
",",
"focus",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"figsize",
"=",
"[",
"10",
",",
"4",
"]",
",",
"subset",
"=",
"'All_Analyses'",
")",
":",
"if",
"focus",
"is",
"None",
":",
"focus",
"=",
"self",
".",
"focus_stage",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"self",
".",
"report_dir",
"+",
"'/'",
"+",
"focus",
"+",
"'_gradient'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"# if samples is not None:",
"# subset = self.make_subset(samples)",
"if",
"subset",
"is",
"not",
"None",
":",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"elif",
"samples",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"subsets",
"[",
"'All_Analyses'",
"]",
"elif",
"isinstance",
"(",
"samples",
",",
"str",
")",
":",
"samples",
"=",
"[",
"samples",
"]",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Drawing Plots'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"f",
",",
"a",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"gplot",
"(",
"analytes",
"=",
"analytes",
",",
"win",
"=",
"win",
",",
"figsize",
"=",
"figsize",
",",
"ranges",
"=",
"ranges",
",",
"focus_stage",
"=",
"focus",
")",
"# ax = fig.axes[0]",
"# for l, u in s.sigrng:",
"# ax.axvspan(l, u, color='r', alpha=0.1)",
"# for l, u in s.bkgrng:",
"# ax.axvspan(l, u, color='k', alpha=0.1)",
"f",
".",
"savefig",
"(",
"outdir",
"+",
"'/'",
"+",
"s",
"+",
"'_gradients.pdf'",
")",
"# TODO: on older(?) computers raises",
"# 'OSError: [Errno 24] Too many open files'",
"plt",
".",
"close",
"(",
"f",
")",
"prog",
".",
"update",
"(",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.filter_reports
|
Plot filter reports for all filters that contain ``filt_str``
in the name.
|
latools/latools.py
|
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
_ = self.data[s].filter_report(filt=filt_str,
analytes=analytes,
savedir=outdir,
nbin=nbin)
prog.update()
# plt.close(fig)
return
|
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
_ = self.data[s].filter_report(filt=filt_str,
analytes=analytes,
savedir=outdir,
nbin=nbin)
prog.update()
# plt.close(fig)
return
|
[
"Plot",
"filter",
"reports",
"for",
"all",
"filters",
"that",
"contain",
"filt_str",
"in",
"the",
"name",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3449-L3475
|
[
"def",
"filter_reports",
"(",
"self",
",",
"analytes",
",",
"filt_str",
"=",
"'all'",
",",
"nbin",
"=",
"5",
",",
"samples",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"subset",
"=",
"'All_Samples'",
")",
":",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"self",
".",
"report_dir",
"+",
"'/filters/'",
"+",
"filt_str",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"report_dir",
"+",
"'/filters'",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"report_dir",
"+",
"'/filters'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Drawing Plots'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"_",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filter_report",
"(",
"filt",
"=",
"filt_str",
",",
"analytes",
"=",
"analytes",
",",
"savedir",
"=",
"outdir",
",",
"nbin",
"=",
"nbin",
")",
"prog",
".",
"update",
"(",
")",
"# plt.close(fig)",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.sample_stats
|
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
|
latools/latools.py
|
def sample_stats(self, analytes=None, filt=True,
stats=['mean', 'std'],
eachtrace=True, csf_dict={}):
"""
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats_calced = []
stat_fns = Bunch()
stat_dict = {'mean': np.nanmean,
'std': np.nanstd,
'nanmean': np.nanmean,
'nanstd': np.nanstd,
'se': stderr,
'H15_mean': H15_mean,
'H15_std': H15_std,
'H15_se': H15_se}
for s in stats:
if isinstance(s, str):
if s in stat_dict.keys():
self.stats_calced.append(s)
stat_fns[s] = stat_dict[s]
if s in csf_dict.keys():
self.stats_calced.append(s)
exec(csf_dict[s])
stat_fns[s] = eval(s)
elif callable(s):
self.stats_calced.append(s.__name__)
stat_fns[s.__name__] = s
if not hasattr(self, 'custom_stat_functions'):
self.custom_stat_functions = ''
self.custom_stat_functions += inspect.getsource(s) + '\n\n\n\n'
# calculate stats for each sample
with self.pbar.set(total=len(self.samples), desc='Calculating Stats') as prog:
for s in self.samples:
if self.srm_identifier not in s:
self.data[s].sample_stats(analytes, filt=filt,
stat_fns=stat_fns,
eachtrace=eachtrace)
self.stats[s] = self.data[s].stats
prog.update()
|
def sample_stats(self, analytes=None, filt=True,
stats=['mean', 'std'],
eachtrace=True, csf_dict={}):
"""
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats_calced = []
stat_fns = Bunch()
stat_dict = {'mean': np.nanmean,
'std': np.nanstd,
'nanmean': np.nanmean,
'nanstd': np.nanstd,
'se': stderr,
'H15_mean': H15_mean,
'H15_std': H15_std,
'H15_se': H15_se}
for s in stats:
if isinstance(s, str):
if s in stat_dict.keys():
self.stats_calced.append(s)
stat_fns[s] = stat_dict[s]
if s in csf_dict.keys():
self.stats_calced.append(s)
exec(csf_dict[s])
stat_fns[s] = eval(s)
elif callable(s):
self.stats_calced.append(s.__name__)
stat_fns[s.__name__] = s
if not hasattr(self, 'custom_stat_functions'):
self.custom_stat_functions = ''
self.custom_stat_functions += inspect.getsource(s) + '\n\n\n\n'
# calculate stats for each sample
with self.pbar.set(total=len(self.samples), desc='Calculating Stats') as prog:
for s in self.samples:
if self.srm_identifier not in s:
self.data[s].sample_stats(analytes, filt=filt,
stat_fns=stat_fns,
eachtrace=eachtrace)
self.stats[s] = self.data[s].stats
prog.update()
|
[
"Calculate",
"sample",
"statistics",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3506-L3594
|
[
"def",
"sample_stats",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"filt",
"=",
"True",
",",
"stats",
"=",
"[",
"'mean'",
",",
"'std'",
"]",
",",
"eachtrace",
"=",
"True",
",",
"csf_dict",
"=",
"{",
"}",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"stats",
"=",
"Bunch",
"(",
")",
"self",
".",
"stats_calced",
"=",
"[",
"]",
"stat_fns",
"=",
"Bunch",
"(",
")",
"stat_dict",
"=",
"{",
"'mean'",
":",
"np",
".",
"nanmean",
",",
"'std'",
":",
"np",
".",
"nanstd",
",",
"'nanmean'",
":",
"np",
".",
"nanmean",
",",
"'nanstd'",
":",
"np",
".",
"nanstd",
",",
"'se'",
":",
"stderr",
",",
"'H15_mean'",
":",
"H15_mean",
",",
"'H15_std'",
":",
"H15_std",
",",
"'H15_se'",
":",
"H15_se",
"}",
"for",
"s",
"in",
"stats",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"if",
"s",
"in",
"stat_dict",
".",
"keys",
"(",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
")",
"stat_fns",
"[",
"s",
"]",
"=",
"stat_dict",
"[",
"s",
"]",
"if",
"s",
"in",
"csf_dict",
".",
"keys",
"(",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
")",
"exec",
"(",
"csf_dict",
"[",
"s",
"]",
")",
"stat_fns",
"[",
"s",
"]",
"=",
"eval",
"(",
"s",
")",
"elif",
"callable",
"(",
"s",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
".",
"__name__",
")",
"stat_fns",
"[",
"s",
".",
"__name__",
"]",
"=",
"s",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'custom_stat_functions'",
")",
":",
"self",
".",
"custom_stat_functions",
"=",
"''",
"self",
".",
"custom_stat_functions",
"+=",
"inspect",
".",
"getsource",
"(",
"s",
")",
"+",
"'\\n\\n\\n\\n'",
"# calculate stats for each sample",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"samples",
")",
",",
"desc",
"=",
"'Calculating Stats'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"self",
".",
"samples",
":",
"if",
"self",
".",
"srm_identifier",
"not",
"in",
"s",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"sample_stats",
"(",
"analytes",
",",
"filt",
"=",
"filt",
",",
"stat_fns",
"=",
"stat_fns",
",",
"eachtrace",
"=",
"eachtrace",
")",
"self",
".",
"stats",
"[",
"s",
"]",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"stats",
"prog",
".",
"update",
"(",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.statplot
|
Function for visualising per-ablation and per-sample means.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot
samples : str or iterable
Which sample(s) to plot
figsize : tuple
Figure (width, height) in inches
stat : str
Which statistic to plot. Must match
the name of the functions used in
'sample_stats'.
err : str
Which uncertainty to plot.
subset : str
Which subset of samples to plot.
|
latools/latools.py
|
def statplot(self, analytes=None, samples=None, figsize=None,
stat='mean', err='std', subset=None):
"""
Function for visualising per-ablation and per-sample means.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot
samples : str or iterable
Which sample(s) to plot
figsize : tuple
Figure (width, height) in inches
stat : str
Which statistic to plot. Must match
the name of the functions used in
'sample_stats'.
err : str
Which uncertainty to plot.
subset : str
Which subset of samples to plot.
"""
if not hasattr(self, 'stats'):
self.sample_stats()
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
analytes = [a for a in analytes if a !=
self.internal_standard]
if figsize is None:
figsize = (1.5 * len(self.stats), 3 * len(analytes))
fig, axs = plt.subplots(len(analytes), 1, figsize=figsize)
for ax, an in zip(axs, analytes):
i = 0
stab = self.getstats()
m, u = unitpicker(np.percentile(stab.loc[:, an].dropna(), 25), 0.1,
focus_stage='calibrated',
denominator=self.internal_standard)
for s in samples:
if self.srm_identifier not in s:
d = self.stats[s]
if d[stat].ndim == 2:
n = d[stat].shape[-1]
x = np.linspace(i - .1 * n / 2, i + .1 * n / 2, n)
else:
x = [i]
a_ind = d['analytes'] == an
# plot individual ablations with error bars
ax.errorbar(x, d[stat][a_ind][0] * m,
yerr=d[err][a_ind][0] * m,
marker='o', color=self.cmaps[an],
lw=0, elinewidth=1)
ax.set_ylabel('%s / %s (%s )' % (pretty_element(an),
pretty_element(self.internal_standard),
u))
# plot whole - sample mean
if len(x) > 1:
# mean calculation with error propagation?
# umean = un.uarray(d[stat][a_ind][0] * m, d[err][a_ind][0] * m).mean()
# std = un.std_devs(umean)
# mean = un.nominal_values(umean)
mean = np.nanmean(d[stat][a_ind][0] * m)
std = np.nanstd(d[stat][a_ind][0] * m)
ax.plot(x, [mean] * len(x), c=self.cmaps[an], lw=2)
ax.fill_between(x, [mean + std] * len(x),
[mean - std] * len(x),
lw=0, alpha=0.2, color=self.cmaps[an])
# highlight each sample
if i % 2 == 1:
ax.axvspan(i - .5, i + .5, color=(0, 0, 0, 0.05), lw=0)
i += 1
ax.set_xticks(np.arange(0, len(self.stats)))
ax.set_xlim(-0.5, len(self.stats) - .5)
ax.set_xticklabels(samples)
return fig, ax
|
def statplot(self, analytes=None, samples=None, figsize=None,
stat='mean', err='std', subset=None):
"""
Function for visualising per-ablation and per-sample means.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot
samples : str or iterable
Which sample(s) to plot
figsize : tuple
Figure (width, height) in inches
stat : str
Which statistic to plot. Must match
the name of the functions used in
'sample_stats'.
err : str
Which uncertainty to plot.
subset : str
Which subset of samples to plot.
"""
if not hasattr(self, 'stats'):
self.sample_stats()
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
analytes = [a for a in analytes if a !=
self.internal_standard]
if figsize is None:
figsize = (1.5 * len(self.stats), 3 * len(analytes))
fig, axs = plt.subplots(len(analytes), 1, figsize=figsize)
for ax, an in zip(axs, analytes):
i = 0
stab = self.getstats()
m, u = unitpicker(np.percentile(stab.loc[:, an].dropna(), 25), 0.1,
focus_stage='calibrated',
denominator=self.internal_standard)
for s in samples:
if self.srm_identifier not in s:
d = self.stats[s]
if d[stat].ndim == 2:
n = d[stat].shape[-1]
x = np.linspace(i - .1 * n / 2, i + .1 * n / 2, n)
else:
x = [i]
a_ind = d['analytes'] == an
# plot individual ablations with error bars
ax.errorbar(x, d[stat][a_ind][0] * m,
yerr=d[err][a_ind][0] * m,
marker='o', color=self.cmaps[an],
lw=0, elinewidth=1)
ax.set_ylabel('%s / %s (%s )' % (pretty_element(an),
pretty_element(self.internal_standard),
u))
# plot whole - sample mean
if len(x) > 1:
# mean calculation with error propagation?
# umean = un.uarray(d[stat][a_ind][0] * m, d[err][a_ind][0] * m).mean()
# std = un.std_devs(umean)
# mean = un.nominal_values(umean)
mean = np.nanmean(d[stat][a_ind][0] * m)
std = np.nanstd(d[stat][a_ind][0] * m)
ax.plot(x, [mean] * len(x), c=self.cmaps[an], lw=2)
ax.fill_between(x, [mean + std] * len(x),
[mean - std] * len(x),
lw=0, alpha=0.2, color=self.cmaps[an])
# highlight each sample
if i % 2 == 1:
ax.axvspan(i - .5, i + .5, color=(0, 0, 0, 0.05), lw=0)
i += 1
ax.set_xticks(np.arange(0, len(self.stats)))
ax.set_xlim(-0.5, len(self.stats) - .5)
ax.set_xticklabels(samples)
return fig, ax
|
[
"Function",
"for",
"visualising",
"per",
"-",
"ablation",
"and",
"per",
"-",
"sample",
"means",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3621-L3714
|
[
"def",
"statplot",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"stat",
"=",
"'mean'",
",",
"err",
"=",
"'std'",
",",
"subset",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'stats'",
")",
":",
"self",
".",
"sample_stats",
"(",
")",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"a",
"!=",
"self",
".",
"internal_standard",
"]",
"if",
"figsize",
"is",
"None",
":",
"figsize",
"=",
"(",
"1.5",
"*",
"len",
"(",
"self",
".",
"stats",
")",
",",
"3",
"*",
"len",
"(",
"analytes",
")",
")",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"len",
"(",
"analytes",
")",
",",
"1",
",",
"figsize",
"=",
"figsize",
")",
"for",
"ax",
",",
"an",
"in",
"zip",
"(",
"axs",
",",
"analytes",
")",
":",
"i",
"=",
"0",
"stab",
"=",
"self",
".",
"getstats",
"(",
")",
"m",
",",
"u",
"=",
"unitpicker",
"(",
"np",
".",
"percentile",
"(",
"stab",
".",
"loc",
"[",
":",
",",
"an",
"]",
".",
"dropna",
"(",
")",
",",
"25",
")",
",",
"0.1",
",",
"focus_stage",
"=",
"'calibrated'",
",",
"denominator",
"=",
"self",
".",
"internal_standard",
")",
"for",
"s",
"in",
"samples",
":",
"if",
"self",
".",
"srm_identifier",
"not",
"in",
"s",
":",
"d",
"=",
"self",
".",
"stats",
"[",
"s",
"]",
"if",
"d",
"[",
"stat",
"]",
".",
"ndim",
"==",
"2",
":",
"n",
"=",
"d",
"[",
"stat",
"]",
".",
"shape",
"[",
"-",
"1",
"]",
"x",
"=",
"np",
".",
"linspace",
"(",
"i",
"-",
".1",
"*",
"n",
"/",
"2",
",",
"i",
"+",
".1",
"*",
"n",
"/",
"2",
",",
"n",
")",
"else",
":",
"x",
"=",
"[",
"i",
"]",
"a_ind",
"=",
"d",
"[",
"'analytes'",
"]",
"==",
"an",
"# plot individual ablations with error bars",
"ax",
".",
"errorbar",
"(",
"x",
",",
"d",
"[",
"stat",
"]",
"[",
"a_ind",
"]",
"[",
"0",
"]",
"*",
"m",
",",
"yerr",
"=",
"d",
"[",
"err",
"]",
"[",
"a_ind",
"]",
"[",
"0",
"]",
"*",
"m",
",",
"marker",
"=",
"'o'",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"an",
"]",
",",
"lw",
"=",
"0",
",",
"elinewidth",
"=",
"1",
")",
"ax",
".",
"set_ylabel",
"(",
"'%s / %s (%s )'",
"%",
"(",
"pretty_element",
"(",
"an",
")",
",",
"pretty_element",
"(",
"self",
".",
"internal_standard",
")",
",",
"u",
")",
")",
"# plot whole - sample mean",
"if",
"len",
"(",
"x",
")",
">",
"1",
":",
"# mean calculation with error propagation?",
"# umean = un.uarray(d[stat][a_ind][0] * m, d[err][a_ind][0] * m).mean()",
"# std = un.std_devs(umean)",
"# mean = un.nominal_values(umean)",
"mean",
"=",
"np",
".",
"nanmean",
"(",
"d",
"[",
"stat",
"]",
"[",
"a_ind",
"]",
"[",
"0",
"]",
"*",
"m",
")",
"std",
"=",
"np",
".",
"nanstd",
"(",
"d",
"[",
"stat",
"]",
"[",
"a_ind",
"]",
"[",
"0",
"]",
"*",
"m",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"[",
"mean",
"]",
"*",
"len",
"(",
"x",
")",
",",
"c",
"=",
"self",
".",
"cmaps",
"[",
"an",
"]",
",",
"lw",
"=",
"2",
")",
"ax",
".",
"fill_between",
"(",
"x",
",",
"[",
"mean",
"+",
"std",
"]",
"*",
"len",
"(",
"x",
")",
",",
"[",
"mean",
"-",
"std",
"]",
"*",
"len",
"(",
"x",
")",
",",
"lw",
"=",
"0",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"self",
".",
"cmaps",
"[",
"an",
"]",
")",
"# highlight each sample",
"if",
"i",
"%",
"2",
"==",
"1",
":",
"ax",
".",
"axvspan",
"(",
"i",
"-",
".5",
",",
"i",
"+",
".5",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.05",
")",
",",
"lw",
"=",
"0",
")",
"i",
"+=",
"1",
"ax",
".",
"set_xticks",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"self",
".",
"stats",
")",
")",
")",
"ax",
".",
"set_xlim",
"(",
"-",
"0.5",
",",
"len",
"(",
"self",
".",
"stats",
")",
"-",
".5",
")",
"ax",
".",
"set_xticklabels",
"(",
"samples",
")",
"return",
"fig",
",",
"ax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.getstats
|
Return pandas dataframe of all sample statistics.
|
latools/latools.py
|
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
"""
Return pandas dataframe of all sample statistics.
"""
slst = []
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in [n for n in samples if self.srm_identifier
not in n]:
if self.stats[nm][s].ndim == 2:
# make multi - index
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
# make sub - dataframe
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm]['analytes'],
index=[ss, nms, reps])
stdf.index.set_names(['statistic', 'sample', 'rep'],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm]['analytes'],
columns=[[s], [nm]]).T
stdf.index.set_names(['statistic', 'sample'],
inplace=True)
slst.append(stdf)
out = pd.concat(slst)
if ablation_time:
ats = self.ablation_times(samples=samples, subset=subset)
ats['statistic'] = 'nanmean'
ats.set_index('statistic', append=True, inplace=True)
ats = ats.reorder_levels(['statistic', 'sample', 'rep'])
out = out.join(ats)
out.drop(self.internal_standard, 1, inplace=True)
if save:
if filename is None:
filename = 'stat_export.csv'
out.to_csv(self.export_dir + '/' + filename)
self.stats_df = out
return out
|
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
"""
Return pandas dataframe of all sample statistics.
"""
slst = []
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in [n for n in samples if self.srm_identifier
not in n]:
if self.stats[nm][s].ndim == 2:
# make multi - index
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
# make sub - dataframe
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm]['analytes'],
index=[ss, nms, reps])
stdf.index.set_names(['statistic', 'sample', 'rep'],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm]['analytes'],
columns=[[s], [nm]]).T
stdf.index.set_names(['statistic', 'sample'],
inplace=True)
slst.append(stdf)
out = pd.concat(slst)
if ablation_time:
ats = self.ablation_times(samples=samples, subset=subset)
ats['statistic'] = 'nanmean'
ats.set_index('statistic', append=True, inplace=True)
ats = ats.reorder_levels(['statistic', 'sample', 'rep'])
out = out.join(ats)
out.drop(self.internal_standard, 1, inplace=True)
if save:
if filename is None:
filename = 'stat_export.csv'
out.to_csv(self.export_dir + '/' + filename)
self.stats_df = out
return out
|
[
"Return",
"pandas",
"dataframe",
"of",
"all",
"sample",
"statistics",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3717-L3769
|
[
"def",
"getstats",
"(",
"self",
",",
"save",
"=",
"True",
",",
"filename",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"ablation_time",
"=",
"False",
")",
":",
"slst",
"=",
"[",
"]",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"for",
"s",
"in",
"self",
".",
"stats_calced",
":",
"for",
"nm",
"in",
"[",
"n",
"for",
"n",
"in",
"samples",
"if",
"self",
".",
"srm_identifier",
"not",
"in",
"n",
"]",
":",
"if",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"s",
"]",
".",
"ndim",
"==",
"2",
":",
"# make multi - index",
"reps",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"s",
"]",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"ss",
"=",
"np",
".",
"array",
"(",
"[",
"s",
"]",
"*",
"reps",
".",
"size",
")",
"nms",
"=",
"np",
".",
"array",
"(",
"[",
"nm",
"]",
"*",
"reps",
".",
"size",
")",
"# make sub - dataframe",
"stdf",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"s",
"]",
".",
"T",
",",
"columns",
"=",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"'analytes'",
"]",
",",
"index",
"=",
"[",
"ss",
",",
"nms",
",",
"reps",
"]",
")",
"stdf",
".",
"index",
".",
"set_names",
"(",
"[",
"'statistic'",
",",
"'sample'",
",",
"'rep'",
"]",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"stdf",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"s",
"]",
",",
"index",
"=",
"self",
".",
"stats",
"[",
"nm",
"]",
"[",
"'analytes'",
"]",
",",
"columns",
"=",
"[",
"[",
"s",
"]",
",",
"[",
"nm",
"]",
"]",
")",
".",
"T",
"stdf",
".",
"index",
".",
"set_names",
"(",
"[",
"'statistic'",
",",
"'sample'",
"]",
",",
"inplace",
"=",
"True",
")",
"slst",
".",
"append",
"(",
"stdf",
")",
"out",
"=",
"pd",
".",
"concat",
"(",
"slst",
")",
"if",
"ablation_time",
":",
"ats",
"=",
"self",
".",
"ablation_times",
"(",
"samples",
"=",
"samples",
",",
"subset",
"=",
"subset",
")",
"ats",
"[",
"'statistic'",
"]",
"=",
"'nanmean'",
"ats",
".",
"set_index",
"(",
"'statistic'",
",",
"append",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"ats",
"=",
"ats",
".",
"reorder_levels",
"(",
"[",
"'statistic'",
",",
"'sample'",
",",
"'rep'",
"]",
")",
"out",
"=",
"out",
".",
"join",
"(",
"ats",
")",
"out",
".",
"drop",
"(",
"self",
".",
"internal_standard",
",",
"1",
",",
"inplace",
"=",
"True",
")",
"if",
"save",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"'stat_export.csv'",
"out",
".",
"to_csv",
"(",
"self",
".",
"export_dir",
"+",
"'/'",
"+",
"filename",
")",
"self",
".",
"stats_df",
"=",
"out",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse._minimal_export_traces
|
Used for exporting minimal dataset. DON'T USE.
|
latools/latools.py
|
def _minimal_export_traces(self, outdir=None, analytes=None,
samples=None, subset='All_Analyses'):
"""
Used for exporting minimal dataset. DON'T USE.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
focus_stage = 'rawdata'
# ud = 'counts'
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
out = Bunch()
for a in analytes:
out[a] = d[a]
out = pd.DataFrame(out, index=self.data[s].Time)
out.index.name = 'Time'
d = dateutil.parser.parse(self.data[s].meta['date'])
header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
"# Analysis described in '../analysis.lalog'",
'# Run latools.reproduce to import analysis.',
'#',
'# Sample: %s' % (s),
'# Analysis Time: ' + d.strftime('%Y-%m-%d %H:%M:%S')]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s.csv' % (outdir, s), 'w') as f:
f.write(header)
f.write(csv)
return
|
def _minimal_export_traces(self, outdir=None, analytes=None,
samples=None, subset='All_Analyses'):
"""
Used for exporting minimal dataset. DON'T USE.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
focus_stage = 'rawdata'
# ud = 'counts'
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
out = Bunch()
for a in analytes:
out[a] = d[a]
out = pd.DataFrame(out, index=self.data[s].Time)
out.index.name = 'Time'
d = dateutil.parser.parse(self.data[s].meta['date'])
header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
"# Analysis described in '../analysis.lalog'",
'# Run latools.reproduce to import analysis.',
'#',
'# Sample: %s' % (s),
'# Analysis Time: ' + d.strftime('%Y-%m-%d %H:%M:%S')]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s.csv' % (outdir, s), 'w') as f:
f.write(header)
f.write(csv)
return
|
[
"Used",
"for",
"exporting",
"minimal",
"dataset",
".",
"DON",
"T",
"USE",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3772-L3819
|
[
"def",
"_minimal_export_traces",
"(",
"self",
",",
"outdir",
"=",
"None",
",",
"analytes",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"'All_Analyses'",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"focus_stage",
"=",
"'rawdata'",
"# ud = 'counts'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"data",
"[",
"focus_stage",
"]",
"out",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"analytes",
":",
"out",
"[",
"a",
"]",
"=",
"d",
"[",
"a",
"]",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"out",
",",
"index",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"Time",
")",
"out",
".",
"index",
".",
"name",
"=",
"'Time'",
"d",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"self",
".",
"data",
"[",
"s",
"]",
".",
"meta",
"[",
"'date'",
"]",
")",
"header",
"=",
"[",
"'# Minimal Reproduction Dataset Exported from LATOOLS on %s'",
"%",
"(",
"time",
".",
"strftime",
"(",
"'%Y:%m:%d %H:%M:%S'",
")",
")",
",",
"\"# Analysis described in '../analysis.lalog'\"",
",",
"'# Run latools.reproduce to import analysis.'",
",",
"'#'",
",",
"'# Sample: %s'",
"%",
"(",
"s",
")",
",",
"'# Analysis Time: '",
"+",
"d",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"]",
"header",
"=",
"'\\n'",
".",
"join",
"(",
"header",
")",
"+",
"'\\n'",
"csv",
"=",
"out",
".",
"to_csv",
"(",
")",
"with",
"open",
"(",
"'%s/%s.csv'",
"%",
"(",
"outdir",
",",
"s",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
")",
"f",
".",
"write",
"(",
"csv",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.export_traces
|
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
|
latools/latools.py
|
def export_traces(self, outdir=None, focus_stage=None, analytes=None,
samples=None, subset='All_Analyses', filt=False, zip_archive=False):
"""
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if outdir is None:
outdir = os.path.join(self.export_dir, 'trace_export')
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/count {:s}',
'calibrated': 'mol/mol {:s}'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
ind = self.data[s].filt.grab_filt(filt)
out = Bunch()
for a in analytes:
out[a] = nominal_values(d[a][ind])
if focus_stage not in ['rawdata', 'despiked']:
out[a + '_std'] = std_devs(d[a][ind])
out[a + '_std'][out[a + '_std'] == 0] = np.nan
out = pd.DataFrame(out, index=self.data[s].Time[ind])
out.index.name = 'Time'
header = ['# Sample: %s' % (s),
'# Data Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'# Processed using %s configuration' % (self.config['config']),
'# Analysis Stage: %s' % (focus_stage),
'# Unit: %s' % ud[focus_stage]]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s_%s.csv' % (outdir, s, focus_stage), 'w') as f:
f.write(header)
f.write(csv)
if zip_archive:
utils.zipdir(outdir, delete=True)
return
|
def export_traces(self, outdir=None, focus_stage=None, analytes=None,
samples=None, subset='All_Analyses', filt=False, zip_archive=False):
"""
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if outdir is None:
outdir = os.path.join(self.export_dir, 'trace_export')
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/count {:s}',
'calibrated': 'mol/mol {:s}'}
if focus_stage in ['ratios', 'calibrated']:
ud[focus_stage] = ud[focus_stage].format(self.internal_standard)
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
ind = self.data[s].filt.grab_filt(filt)
out = Bunch()
for a in analytes:
out[a] = nominal_values(d[a][ind])
if focus_stage not in ['rawdata', 'despiked']:
out[a + '_std'] = std_devs(d[a][ind])
out[a + '_std'][out[a + '_std'] == 0] = np.nan
out = pd.DataFrame(out, index=self.data[s].Time[ind])
out.index.name = 'Time'
header = ['# Sample: %s' % (s),
'# Data Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'# Processed using %s configuration' % (self.config['config']),
'# Analysis Stage: %s' % (focus_stage),
'# Unit: %s' % ud[focus_stage]]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s_%s.csv' % (outdir, s, focus_stage), 'w') as f:
f.write(header)
f.write(csv)
if zip_archive:
utils.zipdir(outdir, delete=True)
return
|
[
"Function",
"to",
"export",
"raw",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3822-L3918
|
[
"def",
"export_traces",
"(",
"self",
",",
"outdir",
"=",
"None",
",",
"focus_stage",
"=",
"None",
",",
"analytes",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"'All_Analyses'",
",",
"filt",
"=",
"False",
",",
"zip_archive",
"=",
"False",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"if",
"focus_stage",
"is",
"None",
":",
"focus_stage",
"=",
"self",
".",
"focus_stage",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"analytes",
"=",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"a",
"!=",
"self",
".",
"internal_standard",
"]",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"export_dir",
",",
"'trace_export'",
")",
"ud",
"=",
"{",
"'rawdata'",
":",
"'counts'",
",",
"'despiked'",
":",
"'counts'",
",",
"'bkgsub'",
":",
"'background corrected counts'",
",",
"'ratios'",
":",
"'counts/count {:s}'",
",",
"'calibrated'",
":",
"'mol/mol {:s}'",
"}",
"if",
"focus_stage",
"in",
"[",
"'ratios'",
",",
"'calibrated'",
"]",
":",
"ud",
"[",
"focus_stage",
"]",
"=",
"ud",
"[",
"focus_stage",
"]",
".",
"format",
"(",
"self",
".",
"internal_standard",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"data",
"[",
"focus_stage",
"]",
"ind",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
")",
"out",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"analytes",
":",
"out",
"[",
"a",
"]",
"=",
"nominal_values",
"(",
"d",
"[",
"a",
"]",
"[",
"ind",
"]",
")",
"if",
"focus_stage",
"not",
"in",
"[",
"'rawdata'",
",",
"'despiked'",
"]",
":",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"=",
"std_devs",
"(",
"d",
"[",
"a",
"]",
"[",
"ind",
"]",
")",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"[",
"out",
"[",
"a",
"+",
"'_std'",
"]",
"==",
"0",
"]",
"=",
"np",
".",
"nan",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"out",
",",
"index",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"Time",
"[",
"ind",
"]",
")",
"out",
".",
"index",
".",
"name",
"=",
"'Time'",
"header",
"=",
"[",
"'# Sample: %s'",
"%",
"(",
"s",
")",
",",
"'# Data Exported from LATOOLS on %s'",
"%",
"(",
"time",
".",
"strftime",
"(",
"'%Y:%m:%d %H:%M:%S'",
")",
")",
",",
"'# Processed using %s configuration'",
"%",
"(",
"self",
".",
"config",
"[",
"'config'",
"]",
")",
",",
"'# Analysis Stage: %s'",
"%",
"(",
"focus_stage",
")",
",",
"'# Unit: %s'",
"%",
"ud",
"[",
"focus_stage",
"]",
"]",
"header",
"=",
"'\\n'",
".",
"join",
"(",
"header",
")",
"+",
"'\\n'",
"csv",
"=",
"out",
".",
"to_csv",
"(",
")",
"with",
"open",
"(",
"'%s/%s_%s.csv'",
"%",
"(",
"outdir",
",",
"s",
",",
"focus_stage",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
")",
"f",
".",
"write",
"(",
"csv",
")",
"if",
"zip_archive",
":",
"utils",
".",
"zipdir",
"(",
"outdir",
",",
"delete",
"=",
"True",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.save_log
|
Save analysis.lalog in specified location
|
latools/latools.py
|
def save_log(self, directory=None, logname=None, header=None):
"""
Save analysis.lalog in specified location
"""
if directory is None:
directory = self.export_dir
if not os.path.isdir(directory):
directory = os.path.dirname(directory)
if logname is None:
logname = 'analysis.lalog'
if header is None:
header = self._log_header()
loc = logging.write_logfile(self.log, header,
os.path.join(directory, logname))
return loc
|
def save_log(self, directory=None, logname=None, header=None):
"""
Save analysis.lalog in specified location
"""
if directory is None:
directory = self.export_dir
if not os.path.isdir(directory):
directory = os.path.dirname(directory)
if logname is None:
logname = 'analysis.lalog'
if header is None:
header = self._log_header()
loc = logging.write_logfile(self.log, header,
os.path.join(directory, logname))
return loc
|
[
"Save",
"analysis",
".",
"lalog",
"in",
"specified",
"location"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3920-L3938
|
[
"def",
"save_log",
"(",
"self",
",",
"directory",
"=",
"None",
",",
"logname",
"=",
"None",
",",
"header",
"=",
"None",
")",
":",
"if",
"directory",
"is",
"None",
":",
"directory",
"=",
"self",
".",
"export_dir",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"directory",
")",
"if",
"logname",
"is",
"None",
":",
"logname",
"=",
"'analysis.lalog'",
"if",
"header",
"is",
"None",
":",
"header",
"=",
"self",
".",
"_log_header",
"(",
")",
"loc",
"=",
"logging",
".",
"write_logfile",
"(",
"self",
".",
"log",
",",
"header",
",",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"logname",
")",
")",
"return",
"loc"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyse.minimal_export
|
Exports a analysis parameters, standard info and a minimal dataset,
which can be imported by another user.
Parameters
----------
target_analytes : str or iterable
Which analytes to include in the export. If specified, the export
will contain these analytes, and all other analytes used during
data processing (e.g. during filtering). If not specified,
all analytes are exported.
path : str
Where to save the minimal export.
If it ends with .zip, a zip file is created.
If it's a folder, all data are exported to a folder.
|
latools/latools.py
|
def minimal_export(self, target_analytes=None, path=None):
"""
Exports a analysis parameters, standard info and a minimal dataset,
which can be imported by another user.
Parameters
----------
target_analytes : str or iterable
Which analytes to include in the export. If specified, the export
will contain these analytes, and all other analytes used during
data processing (e.g. during filtering). If not specified,
all analytes are exported.
path : str
Where to save the minimal export.
If it ends with .zip, a zip file is created.
If it's a folder, all data are exported to a folder.
"""
if target_analytes is None:
target_analytes = self.analytes
if isinstance(target_analytes, str):
target_analytes = [target_analytes]
self.minimal_analytes.update(target_analytes)
zip_archive = False
# set up data path
if path is None:
path = self.export_dir + '/minimal_export.zip'
if path.endswith('.zip'):
path = path.replace('.zip', '')
zip_archive = True
if not os.path.isdir(path):
os.mkdir(path)
# export data
self._minimal_export_traces(path + '/data', analytes=self.minimal_analytes)
# define analysis_log header
log_header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_folder :: ./data/']
if hasattr(self, 'srmdat'):
log_header.append('srm_table :: ./srm.table')
# export srm table
els = np.unique([re.sub('[0-9]', '', a) for a in self.minimal_analytes])
srmdat = []
for e in els:
srmdat.append(self.srmdat.loc[self.srmdat.element == e, :])
srmdat = pd.concat(srmdat)
with open(path + '/srm.table', 'w') as f:
f.write(srmdat.to_csv())
# save custom functions (of defined)
if hasattr(self, 'custom_stat_functions'):
with open(path + '/custom_stat_fns.py', 'w') as f:
f.write(self.custom_stat_functions)
log_header.append('custom_stat_functions :: ./custom_stat_fns.py')
log_header.append('# Analysis Log Start: \n')
# format sample_stats correctly
lss = [(i, l) for i, l in enumerate(self.log) if 'sample_stats' in l]
rep = re.compile("(.*'stats': )(\[.*?\])(.*)")
for i, l in lss:
self.log[i] = rep.sub(r'\1' + str(self.stats_calced) + r'\3', l)
# save log
self.save_log(path, 'analysis.lalog', header=log_header)
if zip_archive:
utils.zipdir(directory=path, delete=True)
return
|
def minimal_export(self, target_analytes=None, path=None):
"""
Exports a analysis parameters, standard info and a minimal dataset,
which can be imported by another user.
Parameters
----------
target_analytes : str or iterable
Which analytes to include in the export. If specified, the export
will contain these analytes, and all other analytes used during
data processing (e.g. during filtering). If not specified,
all analytes are exported.
path : str
Where to save the minimal export.
If it ends with .zip, a zip file is created.
If it's a folder, all data are exported to a folder.
"""
if target_analytes is None:
target_analytes = self.analytes
if isinstance(target_analytes, str):
target_analytes = [target_analytes]
self.minimal_analytes.update(target_analytes)
zip_archive = False
# set up data path
if path is None:
path = self.export_dir + '/minimal_export.zip'
if path.endswith('.zip'):
path = path.replace('.zip', '')
zip_archive = True
if not os.path.isdir(path):
os.mkdir(path)
# export data
self._minimal_export_traces(path + '/data', analytes=self.minimal_analytes)
# define analysis_log header
log_header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_folder :: ./data/']
if hasattr(self, 'srmdat'):
log_header.append('srm_table :: ./srm.table')
# export srm table
els = np.unique([re.sub('[0-9]', '', a) for a in self.minimal_analytes])
srmdat = []
for e in els:
srmdat.append(self.srmdat.loc[self.srmdat.element == e, :])
srmdat = pd.concat(srmdat)
with open(path + '/srm.table', 'w') as f:
f.write(srmdat.to_csv())
# save custom functions (of defined)
if hasattr(self, 'custom_stat_functions'):
with open(path + '/custom_stat_fns.py', 'w') as f:
f.write(self.custom_stat_functions)
log_header.append('custom_stat_functions :: ./custom_stat_fns.py')
log_header.append('# Analysis Log Start: \n')
# format sample_stats correctly
lss = [(i, l) for i, l in enumerate(self.log) if 'sample_stats' in l]
rep = re.compile("(.*'stats': )(\[.*?\])(.*)")
for i, l in lss:
self.log[i] = rep.sub(r'\1' + str(self.stats_calced) + r'\3', l)
# save log
self.save_log(path, 'analysis.lalog', header=log_header)
if zip_archive:
utils.zipdir(directory=path, delete=True)
return
|
[
"Exports",
"a",
"analysis",
"parameters",
"standard",
"info",
"and",
"a",
"minimal",
"dataset",
"which",
"can",
"be",
"imported",
"by",
"another",
"user",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3940-L4015
|
[
"def",
"minimal_export",
"(",
"self",
",",
"target_analytes",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"target_analytes",
"is",
"None",
":",
"target_analytes",
"=",
"self",
".",
"analytes",
"if",
"isinstance",
"(",
"target_analytes",
",",
"str",
")",
":",
"target_analytes",
"=",
"[",
"target_analytes",
"]",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"target_analytes",
")",
"zip_archive",
"=",
"False",
"# set up data path",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"self",
".",
"export_dir",
"+",
"'/minimal_export.zip'",
"if",
"path",
".",
"endswith",
"(",
"'.zip'",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"'.zip'",
",",
"''",
")",
"zip_archive",
"=",
"True",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
")",
"# export data",
"self",
".",
"_minimal_export_traces",
"(",
"path",
"+",
"'/data'",
",",
"analytes",
"=",
"self",
".",
"minimal_analytes",
")",
"# define analysis_log header",
"log_header",
"=",
"[",
"'# Minimal Reproduction Dataset Exported from LATOOLS on %s'",
"%",
"(",
"time",
".",
"strftime",
"(",
"'%Y:%m:%d %H:%M:%S'",
")",
")",
",",
"'data_folder :: ./data/'",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'srmdat'",
")",
":",
"log_header",
".",
"append",
"(",
"'srm_table :: ./srm.table'",
")",
"# export srm table",
"els",
"=",
"np",
".",
"unique",
"(",
"[",
"re",
".",
"sub",
"(",
"'[0-9]'",
",",
"''",
",",
"a",
")",
"for",
"a",
"in",
"self",
".",
"minimal_analytes",
"]",
")",
"srmdat",
"=",
"[",
"]",
"for",
"e",
"in",
"els",
":",
"srmdat",
".",
"append",
"(",
"self",
".",
"srmdat",
".",
"loc",
"[",
"self",
".",
"srmdat",
".",
"element",
"==",
"e",
",",
":",
"]",
")",
"srmdat",
"=",
"pd",
".",
"concat",
"(",
"srmdat",
")",
"with",
"open",
"(",
"path",
"+",
"'/srm.table'",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"srmdat",
".",
"to_csv",
"(",
")",
")",
"# save custom functions (of defined)",
"if",
"hasattr",
"(",
"self",
",",
"'custom_stat_functions'",
")",
":",
"with",
"open",
"(",
"path",
"+",
"'/custom_stat_fns.py'",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"custom_stat_functions",
")",
"log_header",
".",
"append",
"(",
"'custom_stat_functions :: ./custom_stat_fns.py'",
")",
"log_header",
".",
"append",
"(",
"'# Analysis Log Start: \\n'",
")",
"# format sample_stats correctly",
"lss",
"=",
"[",
"(",
"i",
",",
"l",
")",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"self",
".",
"log",
")",
"if",
"'sample_stats'",
"in",
"l",
"]",
"rep",
"=",
"re",
".",
"compile",
"(",
"\"(.*'stats': )(\\[.*?\\])(.*)\"",
")",
"for",
"i",
",",
"l",
"in",
"lss",
":",
"self",
".",
"log",
"[",
"i",
"]",
"=",
"rep",
".",
"sub",
"(",
"r'\\1'",
"+",
"str",
"(",
"self",
".",
"stats_calced",
")",
"+",
"r'\\3'",
",",
"l",
")",
"# save log",
"self",
".",
"save_log",
"(",
"path",
",",
"'analysis.lalog'",
",",
"header",
"=",
"log_header",
")",
"if",
"zip_archive",
":",
"utils",
".",
"zipdir",
"(",
"directory",
"=",
"path",
",",
"delete",
"=",
"True",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
by_regex
|
Split one long analysis file into multiple smaller ones.
Parameters
----------
file : str
The path to the file you want to split.
outdir : str
The directory to save the split files to.
If None, files are saved to a new directory
called 'split', which is created inside the
data directory.
split_pattern : regex string
A regular expression that will match lines in the
file that mark the start of a new section. Does
not have to match the whole line, but must provide
a positive match to the lines containing the pattern.
global_header_rows : int
How many rows at the start of the file to include
in each new sub-file.
fname_pattern : regex string
A regular expression that identifies a new file name
in the lines identified by split_pattern. If none,
files will be called 'noname_N'. The extension of the
main file will be used for all sub-files.
trim_head_lines : int
If greater than zero, this many lines are removed from the start of each segment
trim_tail_lines : int
If greater than zero, this many lines are removed from the end of each segment
Returns
-------
Path to new directory : str
|
latools/preprocessing/split.py
|
def by_regex(file, outdir=None, split_pattern=None, global_header_rows=0, fname_pattern=None, trim_tail_lines=0, trim_head_lines=0):
"""
Split one long analysis file into multiple smaller ones.
Parameters
----------
file : str
The path to the file you want to split.
outdir : str
The directory to save the split files to.
If None, files are saved to a new directory
called 'split', which is created inside the
data directory.
split_pattern : regex string
A regular expression that will match lines in the
file that mark the start of a new section. Does
not have to match the whole line, but must provide
a positive match to the lines containing the pattern.
global_header_rows : int
How many rows at the start of the file to include
in each new sub-file.
fname_pattern : regex string
A regular expression that identifies a new file name
in the lines identified by split_pattern. If none,
files will be called 'noname_N'. The extension of the
main file will be used for all sub-files.
trim_head_lines : int
If greater than zero, this many lines are removed from the start of each segment
trim_tail_lines : int
If greater than zero, this many lines are removed from the end of each segment
Returns
-------
Path to new directory : str
"""
# create output sirectory
if outdir is None:
outdir = os.path.join(os.path.dirname(file), 'split')
if not os.path.exists(outdir):
os.mkdir(outdir)
# read input file
with open(file, 'r') as f:
lines = f.readlines()
# get file extension
extension = os.path.splitext(file)[-1]
# grab global header rows
global_header = lines[:global_header_rows]
# find indices of lines containing split_pattern
starts = []
for i, line in enumerate(lines):
if re.search(split_pattern, line):
starts.append(i)
starts.append(len(lines)) # get length of lines
# split lines into segments based on positions of regex
splits = {}
for i in range(len(starts) - 1):
m = re.search(fname_pattern, lines[starts[i]])
if m:
fname = m.groups()[0].strip()
else:
fname = 'no_name_{:}'.format(i)
splits[fname] = global_header + lines[starts[i]:starts[i+1]][trim_head_lines:trim_tail_lines]
# write files
print('Writing files to: {:}'.format(outdir))
for k, v in splits.items():
fname = (k + extension).replace(' ', '_')
with open(os.path.join(outdir, fname), 'w') as f:
f.writelines(v)
print(' {:}'.format(fname))
print('Done.')
return outdir
|
def by_regex(file, outdir=None, split_pattern=None, global_header_rows=0, fname_pattern=None, trim_tail_lines=0, trim_head_lines=0):
"""
Split one long analysis file into multiple smaller ones.
Parameters
----------
file : str
The path to the file you want to split.
outdir : str
The directory to save the split files to.
If None, files are saved to a new directory
called 'split', which is created inside the
data directory.
split_pattern : regex string
A regular expression that will match lines in the
file that mark the start of a new section. Does
not have to match the whole line, but must provide
a positive match to the lines containing the pattern.
global_header_rows : int
How many rows at the start of the file to include
in each new sub-file.
fname_pattern : regex string
A regular expression that identifies a new file name
in the lines identified by split_pattern. If none,
files will be called 'noname_N'. The extension of the
main file will be used for all sub-files.
trim_head_lines : int
If greater than zero, this many lines are removed from the start of each segment
trim_tail_lines : int
If greater than zero, this many lines are removed from the end of each segment
Returns
-------
Path to new directory : str
"""
# create output sirectory
if outdir is None:
outdir = os.path.join(os.path.dirname(file), 'split')
if not os.path.exists(outdir):
os.mkdir(outdir)
# read input file
with open(file, 'r') as f:
lines = f.readlines()
# get file extension
extension = os.path.splitext(file)[-1]
# grab global header rows
global_header = lines[:global_header_rows]
# find indices of lines containing split_pattern
starts = []
for i, line in enumerate(lines):
if re.search(split_pattern, line):
starts.append(i)
starts.append(len(lines)) # get length of lines
# split lines into segments based on positions of regex
splits = {}
for i in range(len(starts) - 1):
m = re.search(fname_pattern, lines[starts[i]])
if m:
fname = m.groups()[0].strip()
else:
fname = 'no_name_{:}'.format(i)
splits[fname] = global_header + lines[starts[i]:starts[i+1]][trim_head_lines:trim_tail_lines]
# write files
print('Writing files to: {:}'.format(outdir))
for k, v in splits.items():
fname = (k + extension).replace(' ', '_')
with open(os.path.join(outdir, fname), 'w') as f:
f.writelines(v)
print(' {:}'.format(fname))
print('Done.')
return outdir
|
[
"Split",
"one",
"long",
"analysis",
"file",
"into",
"multiple",
"smaller",
"ones",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/preprocessing/split.py#L11-L90
|
[
"def",
"by_regex",
"(",
"file",
",",
"outdir",
"=",
"None",
",",
"split_pattern",
"=",
"None",
",",
"global_header_rows",
"=",
"0",
",",
"fname_pattern",
"=",
"None",
",",
"trim_tail_lines",
"=",
"0",
",",
"trim_head_lines",
"=",
"0",
")",
":",
"# create output sirectory",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file",
")",
",",
"'split'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"outdir",
")",
":",
"os",
".",
"mkdir",
"(",
"outdir",
")",
"# read input file",
"with",
"open",
"(",
"file",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# get file extension",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file",
")",
"[",
"-",
"1",
"]",
"# grab global header rows",
"global_header",
"=",
"lines",
"[",
":",
"global_header_rows",
"]",
"# find indices of lines containing split_pattern",
"starts",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"re",
".",
"search",
"(",
"split_pattern",
",",
"line",
")",
":",
"starts",
".",
"append",
"(",
"i",
")",
"starts",
".",
"append",
"(",
"len",
"(",
"lines",
")",
")",
"# get length of lines",
"# split lines into segments based on positions of regex",
"splits",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"starts",
")",
"-",
"1",
")",
":",
"m",
"=",
"re",
".",
"search",
"(",
"fname_pattern",
",",
"lines",
"[",
"starts",
"[",
"i",
"]",
"]",
")",
"if",
"m",
":",
"fname",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"else",
":",
"fname",
"=",
"'no_name_{:}'",
".",
"format",
"(",
"i",
")",
"splits",
"[",
"fname",
"]",
"=",
"global_header",
"+",
"lines",
"[",
"starts",
"[",
"i",
"]",
":",
"starts",
"[",
"i",
"+",
"1",
"]",
"]",
"[",
"trim_head_lines",
":",
"trim_tail_lines",
"]",
"# write files",
"print",
"(",
"'Writing files to: {:}'",
".",
"format",
"(",
"outdir",
")",
")",
"for",
"k",
",",
"v",
"in",
"splits",
".",
"items",
"(",
")",
":",
"fname",
"=",
"(",
"k",
"+",
"extension",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"fname",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"writelines",
"(",
"v",
")",
"print",
"(",
"' {:}'",
".",
"format",
"(",
"fname",
")",
")",
"print",
"(",
"'Done.'",
")",
"return",
"outdir"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
long_file
|
TODO: Check for existing files in savedir, don't overwrite?
|
latools/preprocessing/split.py
|
def long_file(data_file, dataformat, sample_list, savedir=None, srm_id=None, **autorange_args):
"""
TODO: Check for existing files in savedir, don't overwrite?
"""
if isinstance(sample_list, str):
if os.path.exists(sample_list):
sample_list = np.genfromtxt(sample_list, dtype=str)
else:
raise ValueError('File {} not found.')
elif not isinstance(sample_list, (list, np.ndarray)):
raise ValueError('sample_list should be an array_like or a file.')
if srm_id is not None:
srm_replace = []
for s in sample_list:
if srm_id in s:
s = srm_id
srm_replace.append(s)
sample_list = srm_replace
_, _, dat, meta = read_data(data_file, dataformat=dataformat, name_mode='file')
if 'date' in meta:
d = dateutil.parser.parse(meta['date'])
else:
d = datetime.datetime.now()
# autorange
bkg, sig, trn, _ = autorange(dat['Time'], dat['total_counts'], **autorange_args)
ns = np.zeros(sig.size)
ns[sig] = np.cumsum((sig ^ np.roll(sig, 1)) & sig)[sig]
n = int(max(ns))
if len(sample_list) != n:
warn('Length of sample list does not match number of ablations in file.\n' +
'We will continue, but please make sure the assignments are correct.')
# calculate split boundaries
bounds = []
lower = 0
sn = 0
next_sample = ''
for ni in range(n-1):
sample = sample_list[sn]
next_sample = sample_list[sn + 1]
if sample != next_sample:
current_end = np.argwhere(dat['Time'] == dat['Time'][ns == ni + 1].max())[0]
next_start = np.argwhere(dat['Time'] == dat['Time'][ns == ni + 2].min())[0]
upper = (current_end + next_start) // 2
bounds.append((sample, (int(lower), int(upper))))
lower = upper + 1
sn += 1
bounds.append((sample_list[-1], (int(upper) + 1, len(ns))))
# split up data
sections = {}
seen = {}
for s, (lo, hi) in bounds:
if s not in seen:
seen[s] = 0
else:
seen[s] += 1
s += '_{}'.format(seen[s])
sections[s] = {'oTime': dat['Time'][lo:hi]}
sections[s]['Time'] = sections[s]['oTime'] - np.nanmin(sections[s]['oTime'])
sections[s]['rawdata'] = {}
for k, v in dat['rawdata'].items():
sections[s]['rawdata'][k] = v[lo:hi]
sections[s]['starttime'] = d + datetime.timedelta(seconds=np.nanmin(sections[s]['oTime']))
# save output
if savedir is None:
savedir = os.path.join(os.path.dirname(os.path.abspath(data_file)), os.path.splitext(os.path.basename(data_file))[0] + '_split')
if not os.path.isdir(savedir):
os.makedirs(savedir)
header = ['# Long data file split by latools on {}'.format(datetime.datetime.now().strftime('%Y:%m:%d %H:%M:%S'))]
if 'date' not in meta:
header.append('# Warning: No date specified in file - Analysis Times are date file was split. ')
else:
header.append('# ')
header.append('# ')
header.append('# ')
flist = [savedir]
for s, dat in sections.items():
iheader = header.copy()
iheader.append('# Sample: {}'.format(s))
iheader.append('# Analysis Time: {}'.format(dat['starttime'].strftime('%Y-%m-%d %H:%M:%S')))
iheader = '\n'.join(iheader) + '\n'
out = pd.DataFrame({analyte_2_namemass(k): v for k, v in dat['rawdata'].items()}, index=dat['Time'])
out.index.name = 'Time'
csv = out.to_csv()
with open('{}/{}.csv'.format(savedir, s), 'w') as f:
f.write(iheader)
f.write(csv)
flist.append(' {}.csv'.format(s))
print("File split into {} sections.\n Saved to: {}\n\n Import using the 'REPRODUCE' configuration.".format(n, '\n'.join(flist)))
return None
|
def long_file(data_file, dataformat, sample_list, savedir=None, srm_id=None, **autorange_args):
"""
TODO: Check for existing files in savedir, don't overwrite?
"""
if isinstance(sample_list, str):
if os.path.exists(sample_list):
sample_list = np.genfromtxt(sample_list, dtype=str)
else:
raise ValueError('File {} not found.')
elif not isinstance(sample_list, (list, np.ndarray)):
raise ValueError('sample_list should be an array_like or a file.')
if srm_id is not None:
srm_replace = []
for s in sample_list:
if srm_id in s:
s = srm_id
srm_replace.append(s)
sample_list = srm_replace
_, _, dat, meta = read_data(data_file, dataformat=dataformat, name_mode='file')
if 'date' in meta:
d = dateutil.parser.parse(meta['date'])
else:
d = datetime.datetime.now()
# autorange
bkg, sig, trn, _ = autorange(dat['Time'], dat['total_counts'], **autorange_args)
ns = np.zeros(sig.size)
ns[sig] = np.cumsum((sig ^ np.roll(sig, 1)) & sig)[sig]
n = int(max(ns))
if len(sample_list) != n:
warn('Length of sample list does not match number of ablations in file.\n' +
'We will continue, but please make sure the assignments are correct.')
# calculate split boundaries
bounds = []
lower = 0
sn = 0
next_sample = ''
for ni in range(n-1):
sample = sample_list[sn]
next_sample = sample_list[sn + 1]
if sample != next_sample:
current_end = np.argwhere(dat['Time'] == dat['Time'][ns == ni + 1].max())[0]
next_start = np.argwhere(dat['Time'] == dat['Time'][ns == ni + 2].min())[0]
upper = (current_end + next_start) // 2
bounds.append((sample, (int(lower), int(upper))))
lower = upper + 1
sn += 1
bounds.append((sample_list[-1], (int(upper) + 1, len(ns))))
# split up data
sections = {}
seen = {}
for s, (lo, hi) in bounds:
if s not in seen:
seen[s] = 0
else:
seen[s] += 1
s += '_{}'.format(seen[s])
sections[s] = {'oTime': dat['Time'][lo:hi]}
sections[s]['Time'] = sections[s]['oTime'] - np.nanmin(sections[s]['oTime'])
sections[s]['rawdata'] = {}
for k, v in dat['rawdata'].items():
sections[s]['rawdata'][k] = v[lo:hi]
sections[s]['starttime'] = d + datetime.timedelta(seconds=np.nanmin(sections[s]['oTime']))
# save output
if savedir is None:
savedir = os.path.join(os.path.dirname(os.path.abspath(data_file)), os.path.splitext(os.path.basename(data_file))[0] + '_split')
if not os.path.isdir(savedir):
os.makedirs(savedir)
header = ['# Long data file split by latools on {}'.format(datetime.datetime.now().strftime('%Y:%m:%d %H:%M:%S'))]
if 'date' not in meta:
header.append('# Warning: No date specified in file - Analysis Times are date file was split. ')
else:
header.append('# ')
header.append('# ')
header.append('# ')
flist = [savedir]
for s, dat in sections.items():
iheader = header.copy()
iheader.append('# Sample: {}'.format(s))
iheader.append('# Analysis Time: {}'.format(dat['starttime'].strftime('%Y-%m-%d %H:%M:%S')))
iheader = '\n'.join(iheader) + '\n'
out = pd.DataFrame({analyte_2_namemass(k): v for k, v in dat['rawdata'].items()}, index=dat['Time'])
out.index.name = 'Time'
csv = out.to_csv()
with open('{}/{}.csv'.format(savedir, s), 'w') as f:
f.write(iheader)
f.write(csv)
flist.append(' {}.csv'.format(s))
print("File split into {} sections.\n Saved to: {}\n\n Import using the 'REPRODUCE' configuration.".format(n, '\n'.join(flist)))
return None
|
[
"TODO",
":",
"Check",
"for",
"existing",
"files",
"in",
"savedir",
"don",
"t",
"overwrite?"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/preprocessing/split.py#L92-L200
|
[
"def",
"long_file",
"(",
"data_file",
",",
"dataformat",
",",
"sample_list",
",",
"savedir",
"=",
"None",
",",
"srm_id",
"=",
"None",
",",
"*",
"*",
"autorange_args",
")",
":",
"if",
"isinstance",
"(",
"sample_list",
",",
"str",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"sample_list",
")",
":",
"sample_list",
"=",
"np",
".",
"genfromtxt",
"(",
"sample_list",
",",
"dtype",
"=",
"str",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'File {} not found.'",
")",
"elif",
"not",
"isinstance",
"(",
"sample_list",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"raise",
"ValueError",
"(",
"'sample_list should be an array_like or a file.'",
")",
"if",
"srm_id",
"is",
"not",
"None",
":",
"srm_replace",
"=",
"[",
"]",
"for",
"s",
"in",
"sample_list",
":",
"if",
"srm_id",
"in",
"s",
":",
"s",
"=",
"srm_id",
"srm_replace",
".",
"append",
"(",
"s",
")",
"sample_list",
"=",
"srm_replace",
"_",
",",
"_",
",",
"dat",
",",
"meta",
"=",
"read_data",
"(",
"data_file",
",",
"dataformat",
"=",
"dataformat",
",",
"name_mode",
"=",
"'file'",
")",
"if",
"'date'",
"in",
"meta",
":",
"d",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"meta",
"[",
"'date'",
"]",
")",
"else",
":",
"d",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# autorange",
"bkg",
",",
"sig",
",",
"trn",
",",
"_",
"=",
"autorange",
"(",
"dat",
"[",
"'Time'",
"]",
",",
"dat",
"[",
"'total_counts'",
"]",
",",
"*",
"*",
"autorange_args",
")",
"ns",
"=",
"np",
".",
"zeros",
"(",
"sig",
".",
"size",
")",
"ns",
"[",
"sig",
"]",
"=",
"np",
".",
"cumsum",
"(",
"(",
"sig",
"^",
"np",
".",
"roll",
"(",
"sig",
",",
"1",
")",
")",
"&",
"sig",
")",
"[",
"sig",
"]",
"n",
"=",
"int",
"(",
"max",
"(",
"ns",
")",
")",
"if",
"len",
"(",
"sample_list",
")",
"!=",
"n",
":",
"warn",
"(",
"'Length of sample list does not match number of ablations in file.\\n'",
"+",
"'We will continue, but please make sure the assignments are correct.'",
")",
"# calculate split boundaries",
"bounds",
"=",
"[",
"]",
"lower",
"=",
"0",
"sn",
"=",
"0",
"next_sample",
"=",
"''",
"for",
"ni",
"in",
"range",
"(",
"n",
"-",
"1",
")",
":",
"sample",
"=",
"sample_list",
"[",
"sn",
"]",
"next_sample",
"=",
"sample_list",
"[",
"sn",
"+",
"1",
"]",
"if",
"sample",
"!=",
"next_sample",
":",
"current_end",
"=",
"np",
".",
"argwhere",
"(",
"dat",
"[",
"'Time'",
"]",
"==",
"dat",
"[",
"'Time'",
"]",
"[",
"ns",
"==",
"ni",
"+",
"1",
"]",
".",
"max",
"(",
")",
")",
"[",
"0",
"]",
"next_start",
"=",
"np",
".",
"argwhere",
"(",
"dat",
"[",
"'Time'",
"]",
"==",
"dat",
"[",
"'Time'",
"]",
"[",
"ns",
"==",
"ni",
"+",
"2",
"]",
".",
"min",
"(",
")",
")",
"[",
"0",
"]",
"upper",
"=",
"(",
"current_end",
"+",
"next_start",
")",
"//",
"2",
"bounds",
".",
"append",
"(",
"(",
"sample",
",",
"(",
"int",
"(",
"lower",
")",
",",
"int",
"(",
"upper",
")",
")",
")",
")",
"lower",
"=",
"upper",
"+",
"1",
"sn",
"+=",
"1",
"bounds",
".",
"append",
"(",
"(",
"sample_list",
"[",
"-",
"1",
"]",
",",
"(",
"int",
"(",
"upper",
")",
"+",
"1",
",",
"len",
"(",
"ns",
")",
")",
")",
")",
"# split up data",
"sections",
"=",
"{",
"}",
"seen",
"=",
"{",
"}",
"for",
"s",
",",
"(",
"lo",
",",
"hi",
")",
"in",
"bounds",
":",
"if",
"s",
"not",
"in",
"seen",
":",
"seen",
"[",
"s",
"]",
"=",
"0",
"else",
":",
"seen",
"[",
"s",
"]",
"+=",
"1",
"s",
"+=",
"'_{}'",
".",
"format",
"(",
"seen",
"[",
"s",
"]",
")",
"sections",
"[",
"s",
"]",
"=",
"{",
"'oTime'",
":",
"dat",
"[",
"'Time'",
"]",
"[",
"lo",
":",
"hi",
"]",
"}",
"sections",
"[",
"s",
"]",
"[",
"'Time'",
"]",
"=",
"sections",
"[",
"s",
"]",
"[",
"'oTime'",
"]",
"-",
"np",
".",
"nanmin",
"(",
"sections",
"[",
"s",
"]",
"[",
"'oTime'",
"]",
")",
"sections",
"[",
"s",
"]",
"[",
"'rawdata'",
"]",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"dat",
"[",
"'rawdata'",
"]",
".",
"items",
"(",
")",
":",
"sections",
"[",
"s",
"]",
"[",
"'rawdata'",
"]",
"[",
"k",
"]",
"=",
"v",
"[",
"lo",
":",
"hi",
"]",
"sections",
"[",
"s",
"]",
"[",
"'starttime'",
"]",
"=",
"d",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"np",
".",
"nanmin",
"(",
"sections",
"[",
"s",
"]",
"[",
"'oTime'",
"]",
")",
")",
"# save output",
"if",
"savedir",
"is",
"None",
":",
"savedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"data_file",
")",
")",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"data_file",
")",
")",
"[",
"0",
"]",
"+",
"'_split'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"savedir",
")",
":",
"os",
".",
"makedirs",
"(",
"savedir",
")",
"header",
"=",
"[",
"'# Long data file split by latools on {}'",
".",
"format",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y:%m:%d %H:%M:%S'",
")",
")",
"]",
"if",
"'date'",
"not",
"in",
"meta",
":",
"header",
".",
"append",
"(",
"'# Warning: No date specified in file - Analysis Times are date file was split. '",
")",
"else",
":",
"header",
".",
"append",
"(",
"'# '",
")",
"header",
".",
"append",
"(",
"'# '",
")",
"header",
".",
"append",
"(",
"'# '",
")",
"flist",
"=",
"[",
"savedir",
"]",
"for",
"s",
",",
"dat",
"in",
"sections",
".",
"items",
"(",
")",
":",
"iheader",
"=",
"header",
".",
"copy",
"(",
")",
"iheader",
".",
"append",
"(",
"'# Sample: {}'",
".",
"format",
"(",
"s",
")",
")",
"iheader",
".",
"append",
"(",
"'# Analysis Time: {}'",
".",
"format",
"(",
"dat",
"[",
"'starttime'",
"]",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
")",
")",
"iheader",
"=",
"'\\n'",
".",
"join",
"(",
"iheader",
")",
"+",
"'\\n'",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"analyte_2_namemass",
"(",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"dat",
"[",
"'rawdata'",
"]",
".",
"items",
"(",
")",
"}",
",",
"index",
"=",
"dat",
"[",
"'Time'",
"]",
")",
"out",
".",
"index",
".",
"name",
"=",
"'Time'",
"csv",
"=",
"out",
".",
"to_csv",
"(",
")",
"with",
"open",
"(",
"'{}/{}.csv'",
".",
"format",
"(",
"savedir",
",",
"s",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"iheader",
")",
"f",
".",
"write",
"(",
"csv",
")",
"flist",
".",
"append",
"(",
"' {}.csv'",
".",
"format",
"(",
"s",
")",
")",
"print",
"(",
"\"File split into {} sections.\\n Saved to: {}\\n\\n Import using the 'REPRODUCE' configuration.\"",
".",
"format",
"(",
"n",
",",
"'\\n'",
".",
"join",
"(",
"flist",
")",
")",
")",
"return",
"None"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
Foldable.fold_map
|
map `f` over the traversable, then fold over the result
using the supplied initial element `z` and operation `g`,
defaulting to addition for the latter.
|
amino/tc/foldable.py
|
def fold_map(self, fa: F[A], z: B, f: Callable[[A], B], g: Callable[[Z, B], Z]=operator.add) -> Z:
''' map `f` over the traversable, then fold over the result
using the supplied initial element `z` and operation `g`,
defaulting to addition for the latter.
'''
mapped = Functor.fatal(type(fa)).map(fa, f)
return self.fold_left(mapped)(z)(g)
|
def fold_map(self, fa: F[A], z: B, f: Callable[[A], B], g: Callable[[Z, B], Z]=operator.add) -> Z:
''' map `f` over the traversable, then fold over the result
using the supplied initial element `z` and operation `g`,
defaulting to addition for the latter.
'''
mapped = Functor.fatal(type(fa)).map(fa, f)
return self.fold_left(mapped)(z)(g)
|
[
"map",
"f",
"over",
"the",
"traversable",
"then",
"fold",
"over",
"the",
"result",
"using",
"the",
"supplied",
"initial",
"element",
"z",
"and",
"operation",
"g",
"defaulting",
"to",
"addition",
"for",
"the",
"latter",
"."
] |
tek/amino
|
python
|
https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/tc/foldable.py#L64-L70
|
[
"def",
"fold_map",
"(",
"self",
",",
"fa",
":",
"F",
"[",
"A",
"]",
",",
"z",
":",
"B",
",",
"f",
":",
"Callable",
"[",
"[",
"A",
"]",
",",
"B",
"]",
",",
"g",
":",
"Callable",
"[",
"[",
"Z",
",",
"B",
"]",
",",
"Z",
"]",
"=",
"operator",
".",
"add",
")",
"->",
"Z",
":",
"mapped",
"=",
"Functor",
".",
"fatal",
"(",
"type",
"(",
"fa",
")",
")",
".",
"map",
"(",
"fa",
",",
"f",
")",
"return",
"self",
".",
"fold_left",
"(",
"mapped",
")",
"(",
"z",
")",
"(",
"g",
")"
] |
51b314933e047a45587a24ecff02c836706d27ff
|
test
|
pca_calc
|
Calculates pca of d.
Parameters
----------
nc : int
Number of components
d : np.ndarray
An NxM array, containing M observations of N variables.
Data must be floats. Can contain NaN values.
Returns
-------
pca, dt : tuple
fitted PCA object, and transformed d (same size as d).
|
latools/filtering/pca.py
|
def pca_calc(nc, d):
"""
Calculates pca of d.
Parameters
----------
nc : int
Number of components
d : np.ndarray
An NxM array, containing M observations of N variables.
Data must be floats. Can contain NaN values.
Returns
-------
pca, dt : tuple
fitted PCA object, and transformed d (same size as d).
"""
# check for and remove nans
ind = ~np.apply_along_axis(any, 1, np.isnan(d))
if any(~ind):
pcs = np.full((d.shape[0], nc), np.nan)
d = d[ind, :]
pca = PCA(nc).fit(d)
if any(~ind):
pcs[ind, :] = pca.transform(d)
else:
pcs = pca.transform(d)
return pca, pcs
|
def pca_calc(nc, d):
"""
Calculates pca of d.
Parameters
----------
nc : int
Number of components
d : np.ndarray
An NxM array, containing M observations of N variables.
Data must be floats. Can contain NaN values.
Returns
-------
pca, dt : tuple
fitted PCA object, and transformed d (same size as d).
"""
# check for and remove nans
ind = ~np.apply_along_axis(any, 1, np.isnan(d))
if any(~ind):
pcs = np.full((d.shape[0], nc), np.nan)
d = d[ind, :]
pca = PCA(nc).fit(d)
if any(~ind):
pcs[ind, :] = pca.transform(d)
else:
pcs = pca.transform(d)
return pca, pcs
|
[
"Calculates",
"pca",
"of",
"d",
".",
"Parameters",
"----------",
"nc",
":",
"int",
"Number",
"of",
"components",
"d",
":",
"np",
".",
"ndarray",
"An",
"NxM",
"array",
"containing",
"M",
"observations",
"of",
"N",
"variables",
".",
"Data",
"must",
"be",
"floats",
".",
"Can",
"contain",
"NaN",
"values",
".",
"Returns",
"-------",
"pca",
"dt",
":",
"tuple",
"fitted",
"PCA",
"object",
"and",
"transformed",
"d",
"(",
"same",
"size",
"as",
"d",
")",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/pca.py#L10-L42
|
[
"def",
"pca_calc",
"(",
"nc",
",",
"d",
")",
":",
"# check for and remove nans",
"ind",
"=",
"~",
"np",
".",
"apply_along_axis",
"(",
"any",
",",
"1",
",",
"np",
".",
"isnan",
"(",
"d",
")",
")",
"if",
"any",
"(",
"~",
"ind",
")",
":",
"pcs",
"=",
"np",
".",
"full",
"(",
"(",
"d",
".",
"shape",
"[",
"0",
"]",
",",
"nc",
")",
",",
"np",
".",
"nan",
")",
"d",
"=",
"d",
"[",
"ind",
",",
":",
"]",
"pca",
"=",
"PCA",
"(",
"nc",
")",
".",
"fit",
"(",
"d",
")",
"if",
"any",
"(",
"~",
"ind",
")",
":",
"pcs",
"[",
"ind",
",",
":",
"]",
"=",
"pca",
".",
"transform",
"(",
"d",
")",
"else",
":",
"pcs",
"=",
"pca",
".",
"transform",
"(",
"d",
")",
"return",
"pca",
",",
"pcs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
pca_plot
|
Plot a fitted PCA, and all components.
|
latools/filtering/pca.py
|
def pca_plot(pca, dt, xlabs=None, mode='scatter', lognorm=True):
"""
Plot a fitted PCA, and all components.
"""
nc = pca.n_components
f = np.arange(pca.n_features_)
cs = list(itertools.combinations(range(nc), 2))
ind = ~np.apply_along_axis(any, 1, np.isnan(dt))
cylim = (pca.components_.min(), pca.components_.max())
yd = cylim[1] - cylim[0]
# Make figure
fig, axs = plt.subplots(nc, nc, figsize=[3 * nc, nc * 3], tight_layout=True)
for x, y in zip(*np.triu_indices(nc)):
if x == y:
tax = axs[x, y]
tax.bar(f, pca.components_[x], 0.8)
tax.set_xticks([])
tax.axhline(0, zorder=-1, c=(0,0,0,0.6))
# labels
tax.set_ylim(cylim[0] - 0.2 * yd,
cylim[1] + 0.2 * yd)
for xi, yi, lab in zip(f, pca.components_[x], xlabs):
if yi > 0:
yo = yd * 0.03
va = 'bottom'
else:
yo = yd * -0.02
va = 'top'
tax.text(xi, yi + yo, lab, ha='center', va=va, rotation=90, fontsize=8)
else:
xv = dt[ind, x]
yv = dt[ind, y]
if mode == 'scatter':
axs[x, y].scatter(xv, yv, alpha=0.2)
axs[y, x].scatter(yv, xv, alpha=0.2)
if mode == 'hist2d':
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
axs[x, y].hist2d(xv, yv, 50, cmap=plt.cm.Blues, norm=norm)
axs[y, x].hist2d(yv, xv, 50, cmap=plt.cm.Blues, norm=norm)
if x == 0:
axs[y, x].set_ylabel('PC{:.0f}'.format(y + 1))
if y == nc - 1:
axs[y, x].set_xlabel('PC{:.0f}'.format(x + 1))
return fig, axs, xv, yv
|
def pca_plot(pca, dt, xlabs=None, mode='scatter', lognorm=True):
"""
Plot a fitted PCA, and all components.
"""
nc = pca.n_components
f = np.arange(pca.n_features_)
cs = list(itertools.combinations(range(nc), 2))
ind = ~np.apply_along_axis(any, 1, np.isnan(dt))
cylim = (pca.components_.min(), pca.components_.max())
yd = cylim[1] - cylim[0]
# Make figure
fig, axs = plt.subplots(nc, nc, figsize=[3 * nc, nc * 3], tight_layout=True)
for x, y in zip(*np.triu_indices(nc)):
if x == y:
tax = axs[x, y]
tax.bar(f, pca.components_[x], 0.8)
tax.set_xticks([])
tax.axhline(0, zorder=-1, c=(0,0,0,0.6))
# labels
tax.set_ylim(cylim[0] - 0.2 * yd,
cylim[1] + 0.2 * yd)
for xi, yi, lab in zip(f, pca.components_[x], xlabs):
if yi > 0:
yo = yd * 0.03
va = 'bottom'
else:
yo = yd * -0.02
va = 'top'
tax.text(xi, yi + yo, lab, ha='center', va=va, rotation=90, fontsize=8)
else:
xv = dt[ind, x]
yv = dt[ind, y]
if mode == 'scatter':
axs[x, y].scatter(xv, yv, alpha=0.2)
axs[y, x].scatter(yv, xv, alpha=0.2)
if mode == 'hist2d':
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
axs[x, y].hist2d(xv, yv, 50, cmap=plt.cm.Blues, norm=norm)
axs[y, x].hist2d(yv, xv, 50, cmap=plt.cm.Blues, norm=norm)
if x == 0:
axs[y, x].set_ylabel('PC{:.0f}'.format(y + 1))
if y == nc - 1:
axs[y, x].set_xlabel('PC{:.0f}'.format(x + 1))
return fig, axs, xv, yv
|
[
"Plot",
"a",
"fitted",
"PCA",
"and",
"all",
"components",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/pca.py#L45-L103
|
[
"def",
"pca_plot",
"(",
"pca",
",",
"dt",
",",
"xlabs",
"=",
"None",
",",
"mode",
"=",
"'scatter'",
",",
"lognorm",
"=",
"True",
")",
":",
"nc",
"=",
"pca",
".",
"n_components",
"f",
"=",
"np",
".",
"arange",
"(",
"pca",
".",
"n_features_",
")",
"cs",
"=",
"list",
"(",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"nc",
")",
",",
"2",
")",
")",
"ind",
"=",
"~",
"np",
".",
"apply_along_axis",
"(",
"any",
",",
"1",
",",
"np",
".",
"isnan",
"(",
"dt",
")",
")",
"cylim",
"=",
"(",
"pca",
".",
"components_",
".",
"min",
"(",
")",
",",
"pca",
".",
"components_",
".",
"max",
"(",
")",
")",
"yd",
"=",
"cylim",
"[",
"1",
"]",
"-",
"cylim",
"[",
"0",
"]",
"# Make figure",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"nc",
",",
"nc",
",",
"figsize",
"=",
"[",
"3",
"*",
"nc",
",",
"nc",
"*",
"3",
"]",
",",
"tight_layout",
"=",
"True",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"*",
"np",
".",
"triu_indices",
"(",
"nc",
")",
")",
":",
"if",
"x",
"==",
"y",
":",
"tax",
"=",
"axs",
"[",
"x",
",",
"y",
"]",
"tax",
".",
"bar",
"(",
"f",
",",
"pca",
".",
"components_",
"[",
"x",
"]",
",",
"0.8",
")",
"tax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"tax",
".",
"axhline",
"(",
"0",
",",
"zorder",
"=",
"-",
"1",
",",
"c",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.6",
")",
")",
"# labels ",
"tax",
".",
"set_ylim",
"(",
"cylim",
"[",
"0",
"]",
"-",
"0.2",
"*",
"yd",
",",
"cylim",
"[",
"1",
"]",
"+",
"0.2",
"*",
"yd",
")",
"for",
"xi",
",",
"yi",
",",
"lab",
"in",
"zip",
"(",
"f",
",",
"pca",
".",
"components_",
"[",
"x",
"]",
",",
"xlabs",
")",
":",
"if",
"yi",
">",
"0",
":",
"yo",
"=",
"yd",
"*",
"0.03",
"va",
"=",
"'bottom'",
"else",
":",
"yo",
"=",
"yd",
"*",
"-",
"0.02",
"va",
"=",
"'top'",
"tax",
".",
"text",
"(",
"xi",
",",
"yi",
"+",
"yo",
",",
"lab",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"va",
",",
"rotation",
"=",
"90",
",",
"fontsize",
"=",
"8",
")",
"else",
":",
"xv",
"=",
"dt",
"[",
"ind",
",",
"x",
"]",
"yv",
"=",
"dt",
"[",
"ind",
",",
"y",
"]",
"if",
"mode",
"==",
"'scatter'",
":",
"axs",
"[",
"x",
",",
"y",
"]",
".",
"scatter",
"(",
"xv",
",",
"yv",
",",
"alpha",
"=",
"0.2",
")",
"axs",
"[",
"y",
",",
"x",
"]",
".",
"scatter",
"(",
"yv",
",",
"xv",
",",
"alpha",
"=",
"0.2",
")",
"if",
"mode",
"==",
"'hist2d'",
":",
"if",
"lognorm",
":",
"norm",
"=",
"mpl",
".",
"colors",
".",
"LogNorm",
"(",
")",
"else",
":",
"norm",
"=",
"None",
"axs",
"[",
"x",
",",
"y",
"]",
".",
"hist2d",
"(",
"xv",
",",
"yv",
",",
"50",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"Blues",
",",
"norm",
"=",
"norm",
")",
"axs",
"[",
"y",
",",
"x",
"]",
".",
"hist2d",
"(",
"yv",
",",
"xv",
",",
"50",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"Blues",
",",
"norm",
"=",
"norm",
")",
"if",
"x",
"==",
"0",
":",
"axs",
"[",
"y",
",",
"x",
"]",
".",
"set_ylabel",
"(",
"'PC{:.0f}'",
".",
"format",
"(",
"y",
"+",
"1",
")",
")",
"if",
"y",
"==",
"nc",
"-",
"1",
":",
"axs",
"[",
"y",
",",
"x",
"]",
".",
"set_xlabel",
"(",
"'PC{:.0f}'",
".",
"format",
"(",
"x",
"+",
"1",
")",
")",
"return",
"fig",
",",
"axs",
",",
"xv",
",",
"yv"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
calc_windows
|
Apply fn to all contiguous regions in s that have at least min_points.
|
latools/filtering/signal_optimiser.py
|
def calc_windows(fn, s, min_points):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
out = np.full((n_points, s.size), np.nan)
# skip nans, for speed
ind = ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
out[i, ind] = np.apply_along_axis(fn, 1, r)
return out
|
def calc_windows(fn, s, min_points):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
out = np.full((n_points, s.size), np.nan)
# skip nans, for speed
ind = ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
out[i, ind] = np.apply_along_axis(fn, 1, r)
return out
|
[
"Apply",
"fn",
"to",
"all",
"contiguous",
"regions",
"in",
"s",
"that",
"have",
"at",
"least",
"min_points",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L14-L31
|
[
"def",
"calc_windows",
"(",
"fn",
",",
"s",
",",
"min_points",
")",
":",
"max_points",
"=",
"np",
".",
"sum",
"(",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
")",
"n_points",
"=",
"max_points",
"-",
"min_points",
"out",
"=",
"np",
".",
"full",
"(",
"(",
"n_points",
",",
"s",
".",
"size",
")",
",",
"np",
".",
"nan",
")",
"# skip nans, for speed",
"ind",
"=",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
"s",
"=",
"s",
"[",
"ind",
"]",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"range",
"(",
"min_points",
",",
"s",
".",
"size",
")",
")",
":",
"r",
"=",
"rolling_window",
"(",
"s",
",",
"w",
",",
"pad",
"=",
"np",
".",
"nan",
")",
"out",
"[",
"i",
",",
"ind",
"]",
"=",
"np",
".",
"apply_along_axis",
"(",
"fn",
",",
"1",
",",
"r",
")",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
calc_window_mean_std
|
Apply fn to all contiguous regions in s that have at least min_points.
|
latools/filtering/signal_optimiser.py
|
def calc_window_mean_std(s, min_points, ind=None):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
mean = np.full((n_points, s.size), np.nan)
std = np.full((n_points, s.size), np.nan)
# skip nans, for speed
if ind is None:
ind = ~np.isnan(s)
else:
ind = ind & ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
mean[i, ind] = r.sum(1) / w
std[i, ind] = (((r - mean[i, ind][:, np.newaxis])**2).sum(1) / (w - 1))**0.5
# mean[i, ind] = np.apply_along_axis(np.nanmean, 1, r)
# std[i, ind] = np.apply_along_axis(np.nanstd, 1, r)
return mean, std
|
def calc_window_mean_std(s, min_points, ind=None):
"""
Apply fn to all contiguous regions in s that have at least min_points.
"""
max_points = np.sum(~np.isnan(s))
n_points = max_points - min_points
mean = np.full((n_points, s.size), np.nan)
std = np.full((n_points, s.size), np.nan)
# skip nans, for speed
if ind is None:
ind = ~np.isnan(s)
else:
ind = ind & ~np.isnan(s)
s = s[ind]
for i, w in enumerate(range(min_points, s.size)):
r = rolling_window(s, w, pad=np.nan)
mean[i, ind] = r.sum(1) / w
std[i, ind] = (((r - mean[i, ind][:, np.newaxis])**2).sum(1) / (w - 1))**0.5
# mean[i, ind] = np.apply_along_axis(np.nanmean, 1, r)
# std[i, ind] = np.apply_along_axis(np.nanstd, 1, r)
return mean, std
|
[
"Apply",
"fn",
"to",
"all",
"contiguous",
"regions",
"in",
"s",
"that",
"have",
"at",
"least",
"min_points",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L33-L57
|
[
"def",
"calc_window_mean_std",
"(",
"s",
",",
"min_points",
",",
"ind",
"=",
"None",
")",
":",
"max_points",
"=",
"np",
".",
"sum",
"(",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
")",
"n_points",
"=",
"max_points",
"-",
"min_points",
"mean",
"=",
"np",
".",
"full",
"(",
"(",
"n_points",
",",
"s",
".",
"size",
")",
",",
"np",
".",
"nan",
")",
"std",
"=",
"np",
".",
"full",
"(",
"(",
"n_points",
",",
"s",
".",
"size",
")",
",",
"np",
".",
"nan",
")",
"# skip nans, for speed",
"if",
"ind",
"is",
"None",
":",
"ind",
"=",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
"else",
":",
"ind",
"=",
"ind",
"&",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
"s",
"=",
"s",
"[",
"ind",
"]",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"range",
"(",
"min_points",
",",
"s",
".",
"size",
")",
")",
":",
"r",
"=",
"rolling_window",
"(",
"s",
",",
"w",
",",
"pad",
"=",
"np",
".",
"nan",
")",
"mean",
"[",
"i",
",",
"ind",
"]",
"=",
"r",
".",
"sum",
"(",
"1",
")",
"/",
"w",
"std",
"[",
"i",
",",
"ind",
"]",
"=",
"(",
"(",
"(",
"r",
"-",
"mean",
"[",
"i",
",",
"ind",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"**",
"2",
")",
".",
"sum",
"(",
"1",
")",
"/",
"(",
"w",
"-",
"1",
")",
")",
"**",
"0.5",
"# mean[i, ind] = np.apply_along_axis(np.nanmean, 1, r)",
"# std[i, ind] = np.apply_along_axis(np.nanstd, 1, r)",
"return",
"mean",
",",
"std"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
bayes_scale
|
Remove mean and divide by standard deviation, using bayes_kvm statistics.
|
latools/filtering/signal_optimiser.py
|
def bayes_scale(s):
"""
Remove mean and divide by standard deviation, using bayes_kvm statistics.
"""
if sum(~np.isnan(s)) > 1:
bm, bv, bs = bayes_mvs(s[~np.isnan(s)])
return (s - bm.statistic) / bs.statistic
else:
return np.full(s.shape, np.nan)
|
def bayes_scale(s):
"""
Remove mean and divide by standard deviation, using bayes_kvm statistics.
"""
if sum(~np.isnan(s)) > 1:
bm, bv, bs = bayes_mvs(s[~np.isnan(s)])
return (s - bm.statistic) / bs.statistic
else:
return np.full(s.shape, np.nan)
|
[
"Remove",
"mean",
"and",
"divide",
"by",
"standard",
"deviation",
"using",
"bayes_kvm",
"statistics",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L65-L73
|
[
"def",
"bayes_scale",
"(",
"s",
")",
":",
"if",
"sum",
"(",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
")",
">",
"1",
":",
"bm",
",",
"bv",
",",
"bs",
"=",
"bayes_mvs",
"(",
"s",
"[",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
"]",
")",
"return",
"(",
"s",
"-",
"bm",
".",
"statistic",
")",
"/",
"bs",
".",
"statistic",
"else",
":",
"return",
"np",
".",
"full",
"(",
"s",
".",
"shape",
",",
"np",
".",
"nan",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
median_scaler
|
Remove median, divide by IQR.
|
latools/filtering/signal_optimiser.py
|
def median_scaler(s):
"""
Remove median, divide by IQR.
"""
if sum(~np.isnan(s)) > 2:
ss = s[~np.isnan(s)]
median = np.median(ss)
IQR = np.diff(np.percentile(ss, [25, 75]))
return (s - median) / IQR
else:
return np.full(s.shape, np.nan)
|
def median_scaler(s):
"""
Remove median, divide by IQR.
"""
if sum(~np.isnan(s)) > 2:
ss = s[~np.isnan(s)]
median = np.median(ss)
IQR = np.diff(np.percentile(ss, [25, 75]))
return (s - median) / IQR
else:
return np.full(s.shape, np.nan)
|
[
"Remove",
"median",
"divide",
"by",
"IQR",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L75-L85
|
[
"def",
"median_scaler",
"(",
"s",
")",
":",
"if",
"sum",
"(",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
")",
">",
"2",
":",
"ss",
"=",
"s",
"[",
"~",
"np",
".",
"isnan",
"(",
"s",
")",
"]",
"median",
"=",
"np",
".",
"median",
"(",
"ss",
")",
"IQR",
"=",
"np",
".",
"diff",
"(",
"np",
".",
"percentile",
"(",
"ss",
",",
"[",
"25",
",",
"75",
"]",
")",
")",
"return",
"(",
"s",
"-",
"median",
")",
"/",
"IQR",
"else",
":",
"return",
"np",
".",
"full",
"(",
"s",
".",
"shape",
",",
"np",
".",
"nan",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
signal_optimiser
|
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
|
latools/filtering/signal_optimiser.py
|
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg
|
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg
|
[
"Optimise",
"data",
"selection",
"based",
"on",
"specified",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L132-L389
|
[
"def",
"signal_optimiser",
"(",
"d",
",",
"analytes",
",",
"min_points",
"=",
"5",
",",
"threshold_mode",
"=",
"'kde_first_max'",
",",
"threshold_mult",
"=",
"1.",
",",
"x_bias",
"=",
"0",
",",
"weights",
"=",
"None",
",",
"ind",
"=",
"None",
",",
"mode",
"=",
"'minimise'",
")",
":",
"errmsg",
"=",
"''",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"if",
"ind",
"is",
"None",
":",
"ind",
"=",
"np",
".",
"full",
"(",
"len",
"(",
"d",
".",
"Time",
")",
",",
"True",
")",
"# initial catch",
"if",
"not",
"any",
"(",
"ind",
")",
"or",
"(",
"np",
".",
"diff",
"(",
"bool_2_indices",
"(",
"ind",
")",
")",
".",
"max",
"(",
")",
"<",
"min_points",
")",
":",
"errmsg",
"=",
"'Optmisation failed. No contiguous data regions longer than {:.0f} points.'",
".",
"format",
"(",
"min_points",
")",
"return",
"Bunch",
"(",
"{",
"'means'",
":",
"np",
".",
"nan",
",",
"'stds'",
":",
"np",
".",
"nan",
",",
"'mean_threshold'",
":",
"np",
".",
"nan",
",",
"'std_threshold'",
":",
"np",
".",
"nan",
",",
"'lims'",
":",
"np",
".",
"nan",
",",
"'filt'",
":",
"ind",
",",
"'threshold_mode'",
":",
"threshold_mode",
",",
"'min_points'",
":",
"min_points",
",",
"'analytes'",
":",
"analytes",
",",
"'opt_centre'",
":",
"np",
".",
"nan",
",",
"'opt_n_points'",
":",
"np",
".",
"nan",
",",
"'weights'",
":",
"weights",
",",
"'optimisation_success'",
":",
"False",
",",
"'errmsg'",
":",
"errmsg",
"}",
")",
",",
"errmsg",
"msmeans",
",",
"msstds",
"=",
"calculate_optimisation_stats",
"(",
"d",
",",
"analytes",
",",
"min_points",
",",
"weights",
",",
"ind",
",",
"x_bias",
")",
"# second catch",
"if",
"all",
"(",
"np",
".",
"isnan",
"(",
"msmeans",
")",
".",
"flat",
")",
"or",
"all",
"(",
"np",
".",
"isnan",
"(",
"msmeans",
")",
".",
"flat",
")",
":",
"errmsg",
"=",
"'Optmisation failed. No contiguous data regions longer than {:.0f} points.'",
".",
"format",
"(",
"min_points",
")",
"return",
"Bunch",
"(",
"{",
"'means'",
":",
"np",
".",
"nan",
",",
"'stds'",
":",
"np",
".",
"nan",
",",
"'mean_threshold'",
":",
"np",
".",
"nan",
",",
"'std_threshold'",
":",
"np",
".",
"nan",
",",
"'lims'",
":",
"np",
".",
"nan",
",",
"'filt'",
":",
"ind",
",",
"'threshold_mode'",
":",
"threshold_mode",
",",
"'min_points'",
":",
"min_points",
",",
"'analytes'",
":",
"analytes",
",",
"'opt_centre'",
":",
"np",
".",
"nan",
",",
"'opt_n_points'",
":",
"np",
".",
"nan",
",",
"'weights'",
":",
"weights",
",",
"'optimisation_success'",
":",
"False",
",",
"'errmsg'",
":",
"errmsg",
"}",
")",
",",
"errmsg",
"# define thresholds",
"valid",
"=",
"[",
"'kde_first_max'",
",",
"'kde_max'",
",",
"'median'",
",",
"'bayes_mvs'",
",",
"'mean'",
"]",
"n_under",
"=",
"0",
"i",
"=",
"np",
".",
"argwhere",
"(",
"np",
".",
"array",
"(",
"valid",
")",
"==",
"threshold_mode",
")",
"[",
"0",
",",
"0",
"]",
"o_threshold_mode",
"=",
"threshold_mode",
"while",
"(",
"n_under",
"<=",
"0",
")",
"&",
"(",
"i",
"<",
"len",
"(",
"valid",
")",
")",
":",
"if",
"threshold_mode",
"==",
"'median'",
":",
"# median - OK, but best?",
"std_threshold",
"=",
"np",
".",
"nanmedian",
"(",
"msstds",
")",
"mean_threshold",
"=",
"np",
".",
"nanmedian",
"(",
"msmeans",
")",
"elif",
"threshold_mode",
"==",
"'mean'",
":",
"# mean",
"std_threshold",
"=",
"np",
".",
"nanmean",
"(",
"msstds",
")",
"mean_threshold",
"=",
"np",
".",
"nanmean",
"(",
"msmeans",
")",
"elif",
"threshold_mode",
"==",
"'kde_max'",
":",
"# maximum of gaussian kernel density estimator",
"mkd",
"=",
"gaussian_kde",
"(",
"msmeans",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
")",
"]",
".",
"flat",
")",
"xm",
"=",
"np",
".",
"linspace",
"(",
"*",
"np",
".",
"percentile",
"(",
"msmeans",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"1",
",",
"99",
")",
")",
",",
"100",
")",
"mdf",
"=",
"mkd",
".",
"pdf",
"(",
"xm",
")",
"mean_threshold",
"=",
"xm",
"[",
"np",
".",
"argmax",
"(",
"mdf",
")",
"]",
"rkd",
"=",
"gaussian_kde",
"(",
"msstds",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
")",
"]",
")",
"xr",
"=",
"np",
".",
"linspace",
"(",
"*",
"np",
".",
"percentile",
"(",
"msstds",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"1",
",",
"99",
")",
")",
",",
"100",
")",
"rdf",
"=",
"rkd",
".",
"pdf",
"(",
"xr",
")",
"std_threshold",
"=",
"xr",
"[",
"np",
".",
"argmax",
"(",
"rdf",
")",
"]",
"elif",
"threshold_mode",
"==",
"'kde_first_max'",
":",
"# first local maximum of gaussian kernel density estimator",
"mkd",
"=",
"gaussian_kde",
"(",
"msmeans",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
")",
"]",
".",
"flat",
")",
"xm",
"=",
"np",
".",
"linspace",
"(",
"*",
"np",
".",
"percentile",
"(",
"msmeans",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"1",
",",
"99",
")",
")",
",",
"100",
")",
"mdf",
"=",
"mkd",
".",
"pdf",
"(",
"xm",
")",
"inds",
"=",
"np",
".",
"argwhere",
"(",
"np",
".",
"r_",
"[",
"False",
",",
"mdf",
"[",
"1",
":",
"]",
">",
"mdf",
"[",
":",
"-",
"1",
"]",
"]",
"&",
"np",
".",
"r_",
"[",
"mdf",
"[",
":",
"-",
"1",
"]",
">",
"mdf",
"[",
"1",
":",
"]",
",",
"False",
"]",
"&",
"(",
"mdf",
">",
"0.25",
"*",
"mdf",
".",
"max",
"(",
")",
")",
")",
"mean_threshold",
"=",
"xm",
"[",
"np",
".",
"min",
"(",
"inds",
")",
"]",
"rkd",
"=",
"gaussian_kde",
"(",
"msstds",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
")",
"]",
")",
"xr",
"=",
"np",
".",
"linspace",
"(",
"*",
"np",
".",
"percentile",
"(",
"msstds",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"1",
",",
"99",
")",
")",
",",
"100",
")",
"rdf",
"=",
"rkd",
".",
"pdf",
"(",
"xr",
")",
"inds",
"=",
"np",
".",
"argwhere",
"(",
"np",
".",
"r_",
"[",
"False",
",",
"rdf",
"[",
"1",
":",
"]",
">",
"rdf",
"[",
":",
"-",
"1",
"]",
"]",
"&",
"np",
".",
"r_",
"[",
"rdf",
"[",
":",
"-",
"1",
"]",
">",
"rdf",
"[",
"1",
":",
"]",
",",
"False",
"]",
"&",
"(",
"rdf",
">",
"0.25",
"*",
"rdf",
".",
"max",
"(",
")",
")",
")",
"std_threshold",
"=",
"xr",
"[",
"np",
".",
"min",
"(",
"inds",
")",
"]",
"elif",
"threshold_mode",
"==",
"'bayes_mvs'",
":",
"# bayesian mvs.",
"bm",
",",
"_",
",",
"bs",
"=",
"bayes_mvs",
"(",
"msstds",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
")",
"]",
")",
"std_threshold",
"=",
"bm",
".",
"statistic",
"bm",
",",
"_",
",",
"bs",
"=",
"bayes_mvs",
"(",
"msmeans",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
")",
"]",
")",
"mean_threshold",
"=",
"bm",
".",
"statistic",
"elif",
"callable",
"(",
"threshold_mode",
")",
":",
"std_threshold",
"=",
"threshold_mode",
"(",
"msstds",
"[",
"~",
"np",
".",
"isnan",
"(",
"msstds",
")",
"]",
".",
"flatten",
"(",
")",
")",
"mean_threshold",
"=",
"threshold_mode",
"(",
"msmeans",
"[",
"~",
"np",
".",
"isnan",
"(",
"msmeans",
")",
"]",
".",
"flatten",
"(",
")",
")",
"else",
":",
"try",
":",
"mean_threshold",
",",
"std_threshold",
"=",
"threshold_mode",
"except",
":",
"raise",
"ValueError",
"(",
"'\\nthreshold_mode must be one of:\\n '",
"+",
"', '",
".",
"join",
"(",
"valid",
")",
"+",
"',\\na custom function, or a \\n(mean_threshold, std_threshold) tuple.'",
")",
"# apply threshold_mult",
"if",
"isinstance",
"(",
"threshold_mult",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"std_threshold",
"*=",
"threshold_mult",
"mean_threshold",
"*=",
"threshold_mult",
"elif",
"len",
"(",
"threshold_mult",
")",
"==",
"2",
":",
"mean_threshold",
"*=",
"threshold_mult",
"[",
"0",
"]",
"std_threshold",
"*=",
"threshold_mult",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'\\nthreshold_mult must be a float, int or tuple of length 2.'",
")",
"rind",
"=",
"(",
"msstds",
"<",
"std_threshold",
")",
"if",
"mode",
"==",
"'minimise'",
":",
"mind",
"=",
"(",
"msmeans",
"<",
"mean_threshold",
")",
"else",
":",
"mind",
"=",
"(",
"msmeans",
">",
"mean_threshold",
")",
"ind",
"=",
"rind",
"&",
"mind",
"n_under",
"=",
"ind",
".",
"sum",
"(",
")",
"if",
"n_under",
"==",
"0",
":",
"i",
"+=",
"1",
"if",
"i",
"<=",
"len",
"(",
"valid",
")",
"-",
"1",
":",
"threshold_mode",
"=",
"valid",
"[",
"i",
"]",
"else",
":",
"errmsg",
"=",
"'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'",
"return",
"Bunch",
"(",
"{",
"'means'",
":",
"np",
".",
"nan",
",",
"'stds'",
":",
"np",
".",
"nan",
",",
"'mean_threshold'",
":",
"np",
".",
"nan",
",",
"'std_threshold'",
":",
"np",
".",
"nan",
",",
"'lims'",
":",
"np",
".",
"nan",
",",
"'filt'",
":",
"ind",
",",
"'threshold_mode'",
":",
"threshold_mode",
",",
"'min_points'",
":",
"min_points",
",",
"'analytes'",
":",
"analytes",
",",
"'opt_centre'",
":",
"np",
".",
"nan",
",",
"'opt_n_points'",
":",
"np",
".",
"nan",
",",
"'weights'",
":",
"weights",
",",
"'optimisation_success'",
":",
"False",
",",
"'errmsg'",
":",
"errmsg",
"}",
")",
",",
"errmsg",
"if",
"i",
">",
"0",
":",
"errmsg",
"=",
"\"optimisation failed using threshold_mode='{:}', falling back to '{:}'\"",
".",
"format",
"(",
"o_threshold_mode",
",",
"threshold_mode",
")",
"# identify max number of points within thresholds",
"passing",
"=",
"np",
".",
"argwhere",
"(",
"ind",
")",
"opt_n_points",
"=",
"passing",
"[",
":",
",",
"0",
"]",
".",
"max",
"(",
")",
"opt_centre",
"=",
"passing",
"[",
"passing",
"[",
":",
",",
"0",
"]",
"==",
"opt_n_points",
",",
"1",
"]",
".",
"min",
"(",
")",
"opt_n_points",
"+=",
"min_points",
"# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),",
"# np.arange(min_points, min_points + msmeans.shape[0]))",
"# opt_n_points = npoints[ind].max()",
"# plus/minus one point to allow some freedom to shift selection window.",
"# cind = ind & (npoints == opt_n_points)",
"# opt_centre = centres[cind].min()",
"if",
"opt_n_points",
"%",
"2",
"==",
"0",
":",
"lims",
"=",
"(",
"opt_centre",
"-",
"opt_n_points",
"//",
"2",
",",
"opt_centre",
"+",
"opt_n_points",
"//",
"2",
")",
"else",
":",
"lims",
"=",
"(",
"opt_centre",
"-",
"opt_n_points",
"//",
"2",
",",
"opt_centre",
"+",
"opt_n_points",
"//",
"2",
"+",
"1",
")",
"filt",
"=",
"np",
".",
"zeros",
"(",
"d",
".",
"Time",
".",
"shape",
",",
"dtype",
"=",
"bool",
")",
"filt",
"[",
"lims",
"[",
"0",
"]",
":",
"lims",
"[",
"1",
"]",
"]",
"=",
"True",
"return",
"Bunch",
"(",
"{",
"'means'",
":",
"msmeans",
",",
"'stds'",
":",
"msstds",
",",
"'mean_threshold'",
":",
"mean_threshold",
",",
"'std_threshold'",
":",
"std_threshold",
",",
"'lims'",
":",
"lims",
",",
"'filt'",
":",
"filt",
",",
"'threshold_mode'",
":",
"threshold_mode",
",",
"'min_points'",
":",
"min_points",
",",
"'analytes'",
":",
"analytes",
",",
"'opt_centre'",
":",
"opt_centre",
",",
"'opt_n_points'",
":",
"opt_n_points",
",",
"'weights'",
":",
"weights",
",",
"'optimisation_success'",
":",
"True",
",",
"'errmsg'",
":",
"errmsg",
"}",
")",
",",
"errmsg"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
optimisation_plot
|
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
|
latools/filtering/signal_optimiser.py
|
def optimisation_plot(d, overlay_alpha=0.5, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if not hasattr(d, 'opt'):
raise ValueError('Please run `signal_optimiser` before trying to plot its results.')
out = []
for n, opt in d.opt.items():
if not opt['optimisation_success']:
out.append((None, None))
else:
# unpack variables
means = opt['means']
stds = opt['stds']
min_points = opt['min_points']
mean_threshold = opt['mean_threshold']
std_threshold = opt['std_threshold']
opt_centre = opt['opt_centre']
opt_n_points = opt['opt_n_points']
centres, npoints = np.meshgrid(np.arange(means.shape[1]), np.arange(min_points, min_points + means.shape[0]))
rind = (stds < std_threshold)
mind = (means < mean_threshold)
# color scale and histogram limits
mlim = np.percentile(means.flatten()[~np.isnan(means.flatten())], (0, 99))
rlim = np.percentile(stds.flatten()[~np.isnan(stds.flatten())], (0, 99))
cmr = plt.cm.Blues
cmr.set_bad((0,0,0,0.3))
cmm = plt.cm.Reds
cmm.set_bad((0,0,0,0.3))
# create figure
fig = plt.figure(figsize=[7,7])
ma = fig.add_subplot(3, 2, 1)
ra = fig.add_subplot(3, 2, 2)
# work out image limits
nonan = np.argwhere(~np.isnan(means))
xdif = np.ptp(nonan[:, 1])
ydif = np.ptp(nonan[:, 0])
extent = (nonan[:, 1].min() - np.ceil(0.1 * xdif), # x min
nonan[:, 1].max() + np.ceil(0.1 * xdif), # x max
nonan[:, 0].min() + min_points, # y min
nonan[:, 0].max() + np.ceil(0.1 * ydif) + min_points) # y max
mm = ma.imshow(means, origin='bottomleft', cmap=cmm, vmin=mlim[0], vmax=mlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.set_ylabel('N points')
ma.set_xlabel('Center')
fig.colorbar(mm, ax=ma, label='Amplitude')
mr = ra.imshow(stds, origin='bottomleft', cmap=cmr, vmin=rlim[0], vmax=rlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ra.set_xlabel('Center')
fig.colorbar(mr, ax=ra, label='std')
# view limits
ra.imshow(~rind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.imshow(~mind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
for ax in [ma, ra]:
ax.scatter(opt_centre, opt_n_points, c=(1,1,1,0.7), edgecolor='k',marker='o')
ax.set_xlim(extent[:2])
ax.set_ylim(extent[-2:])
# draw histograms
mah = fig.add_subplot(3, 2, 3)
rah = fig.add_subplot(3, 2, 4)
mah.set_xlim(mlim)
mbin = np.linspace(*mah.get_xlim(), 50)
mah.hist(means.flatten()[~np.isnan(means.flatten())], mbin)
mah.axvspan(mean_threshold, mah.get_xlim()[1], color=(0,0,0,overlay_alpha))
mah.axvline(mean_threshold, c='r')
mah.set_xlabel('Scaled Mean Analyte Conc')
mah.set_ylabel('N')
rah.set_xlim(rlim)
rbin = np.linspace(*rah.get_xlim(), 50)
rah.hist(stds.flatten()[~np.isnan(stds.flatten())], rbin)
rah.axvspan(std_threshold, rah.get_xlim()[1], color=(0,0,0,0.4))
rah.axvline(std_threshold, c='r')
rah.set_xlabel('std')
tax = fig.add_subplot(3,1,3)
tplot(d, opt.analytes, ax=tax, **kwargs)
tax.axvspan(*d.Time[[opt.lims[0], opt.lims[1]]], alpha=0.2)
tax.set_xlim(d.Time[d.ns == n].min() - 3, d.Time[d.ns == n].max() + 3)
fig.tight_layout()
out.append((fig, (ma, ra, mah, rah, tax)))
return out
|
def optimisation_plot(d, overlay_alpha=0.5, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if not hasattr(d, 'opt'):
raise ValueError('Please run `signal_optimiser` before trying to plot its results.')
out = []
for n, opt in d.opt.items():
if not opt['optimisation_success']:
out.append((None, None))
else:
# unpack variables
means = opt['means']
stds = opt['stds']
min_points = opt['min_points']
mean_threshold = opt['mean_threshold']
std_threshold = opt['std_threshold']
opt_centre = opt['opt_centre']
opt_n_points = opt['opt_n_points']
centres, npoints = np.meshgrid(np.arange(means.shape[1]), np.arange(min_points, min_points + means.shape[0]))
rind = (stds < std_threshold)
mind = (means < mean_threshold)
# color scale and histogram limits
mlim = np.percentile(means.flatten()[~np.isnan(means.flatten())], (0, 99))
rlim = np.percentile(stds.flatten()[~np.isnan(stds.flatten())], (0, 99))
cmr = plt.cm.Blues
cmr.set_bad((0,0,0,0.3))
cmm = plt.cm.Reds
cmm.set_bad((0,0,0,0.3))
# create figure
fig = plt.figure(figsize=[7,7])
ma = fig.add_subplot(3, 2, 1)
ra = fig.add_subplot(3, 2, 2)
# work out image limits
nonan = np.argwhere(~np.isnan(means))
xdif = np.ptp(nonan[:, 1])
ydif = np.ptp(nonan[:, 0])
extent = (nonan[:, 1].min() - np.ceil(0.1 * xdif), # x min
nonan[:, 1].max() + np.ceil(0.1 * xdif), # x max
nonan[:, 0].min() + min_points, # y min
nonan[:, 0].max() + np.ceil(0.1 * ydif) + min_points) # y max
mm = ma.imshow(means, origin='bottomleft', cmap=cmm, vmin=mlim[0], vmax=mlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.set_ylabel('N points')
ma.set_xlabel('Center')
fig.colorbar(mm, ax=ma, label='Amplitude')
mr = ra.imshow(stds, origin='bottomleft', cmap=cmr, vmin=rlim[0], vmax=rlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ra.set_xlabel('Center')
fig.colorbar(mr, ax=ra, label='std')
# view limits
ra.imshow(~rind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.imshow(~mind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
for ax in [ma, ra]:
ax.scatter(opt_centre, opt_n_points, c=(1,1,1,0.7), edgecolor='k',marker='o')
ax.set_xlim(extent[:2])
ax.set_ylim(extent[-2:])
# draw histograms
mah = fig.add_subplot(3, 2, 3)
rah = fig.add_subplot(3, 2, 4)
mah.set_xlim(mlim)
mbin = np.linspace(*mah.get_xlim(), 50)
mah.hist(means.flatten()[~np.isnan(means.flatten())], mbin)
mah.axvspan(mean_threshold, mah.get_xlim()[1], color=(0,0,0,overlay_alpha))
mah.axvline(mean_threshold, c='r')
mah.set_xlabel('Scaled Mean Analyte Conc')
mah.set_ylabel('N')
rah.set_xlim(rlim)
rbin = np.linspace(*rah.get_xlim(), 50)
rah.hist(stds.flatten()[~np.isnan(stds.flatten())], rbin)
rah.axvspan(std_threshold, rah.get_xlim()[1], color=(0,0,0,0.4))
rah.axvline(std_threshold, c='r')
rah.set_xlabel('std')
tax = fig.add_subplot(3,1,3)
tplot(d, opt.analytes, ax=tax, **kwargs)
tax.axvspan(*d.Time[[opt.lims[0], opt.lims[1]]], alpha=0.2)
tax.set_xlim(d.Time[d.ns == n].min() - 3, d.Time[d.ns == n].max() + 3)
fig.tight_layout()
out.append((fig, (ma, ra, mah, rah, tax)))
return out
|
[
"Plot",
"the",
"result",
"of",
"signal_optimise",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L392-L508
|
[
"def",
"optimisation_plot",
"(",
"d",
",",
"overlay_alpha",
"=",
"0.5",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"d",
",",
"'opt'",
")",
":",
"raise",
"ValueError",
"(",
"'Please run `signal_optimiser` before trying to plot its results.'",
")",
"out",
"=",
"[",
"]",
"for",
"n",
",",
"opt",
"in",
"d",
".",
"opt",
".",
"items",
"(",
")",
":",
"if",
"not",
"opt",
"[",
"'optimisation_success'",
"]",
":",
"out",
".",
"append",
"(",
"(",
"None",
",",
"None",
")",
")",
"else",
":",
"# unpack variables",
"means",
"=",
"opt",
"[",
"'means'",
"]",
"stds",
"=",
"opt",
"[",
"'stds'",
"]",
"min_points",
"=",
"opt",
"[",
"'min_points'",
"]",
"mean_threshold",
"=",
"opt",
"[",
"'mean_threshold'",
"]",
"std_threshold",
"=",
"opt",
"[",
"'std_threshold'",
"]",
"opt_centre",
"=",
"opt",
"[",
"'opt_centre'",
"]",
"opt_n_points",
"=",
"opt",
"[",
"'opt_n_points'",
"]",
"centres",
",",
"npoints",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"arange",
"(",
"means",
".",
"shape",
"[",
"1",
"]",
")",
",",
"np",
".",
"arange",
"(",
"min_points",
",",
"min_points",
"+",
"means",
".",
"shape",
"[",
"0",
"]",
")",
")",
"rind",
"=",
"(",
"stds",
"<",
"std_threshold",
")",
"mind",
"=",
"(",
"means",
"<",
"mean_threshold",
")",
"# color scale and histogram limits",
"mlim",
"=",
"np",
".",
"percentile",
"(",
"means",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"means",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"0",
",",
"99",
")",
")",
"rlim",
"=",
"np",
".",
"percentile",
"(",
"stds",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"stds",
".",
"flatten",
"(",
")",
")",
"]",
",",
"(",
"0",
",",
"99",
")",
")",
"cmr",
"=",
"plt",
".",
"cm",
".",
"Blues",
"cmr",
".",
"set_bad",
"(",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.3",
")",
")",
"cmm",
"=",
"plt",
".",
"cm",
".",
"Reds",
"cmm",
".",
"set_bad",
"(",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.3",
")",
")",
"# create figure",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"[",
"7",
",",
"7",
"]",
")",
"ma",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"2",
",",
"1",
")",
"ra",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"2",
",",
"2",
")",
"# work out image limits",
"nonan",
"=",
"np",
".",
"argwhere",
"(",
"~",
"np",
".",
"isnan",
"(",
"means",
")",
")",
"xdif",
"=",
"np",
".",
"ptp",
"(",
"nonan",
"[",
":",
",",
"1",
"]",
")",
"ydif",
"=",
"np",
".",
"ptp",
"(",
"nonan",
"[",
":",
",",
"0",
"]",
")",
"extent",
"=",
"(",
"nonan",
"[",
":",
",",
"1",
"]",
".",
"min",
"(",
")",
"-",
"np",
".",
"ceil",
"(",
"0.1",
"*",
"xdif",
")",
",",
"# x min",
"nonan",
"[",
":",
",",
"1",
"]",
".",
"max",
"(",
")",
"+",
"np",
".",
"ceil",
"(",
"0.1",
"*",
"xdif",
")",
",",
"# x max",
"nonan",
"[",
":",
",",
"0",
"]",
".",
"min",
"(",
")",
"+",
"min_points",
",",
"# y min",
"nonan",
"[",
":",
",",
"0",
"]",
".",
"max",
"(",
")",
"+",
"np",
".",
"ceil",
"(",
"0.1",
"*",
"ydif",
")",
"+",
"min_points",
")",
"# y max",
"mm",
"=",
"ma",
".",
"imshow",
"(",
"means",
",",
"origin",
"=",
"'bottomleft'",
",",
"cmap",
"=",
"cmm",
",",
"vmin",
"=",
"mlim",
"[",
"0",
"]",
",",
"vmax",
"=",
"mlim",
"[",
"1",
"]",
",",
"extent",
"=",
"(",
"centres",
".",
"min",
"(",
")",
",",
"centres",
".",
"max",
"(",
")",
",",
"npoints",
".",
"min",
"(",
")",
",",
"npoints",
".",
"max",
"(",
")",
")",
")",
"ma",
".",
"set_ylabel",
"(",
"'N points'",
")",
"ma",
".",
"set_xlabel",
"(",
"'Center'",
")",
"fig",
".",
"colorbar",
"(",
"mm",
",",
"ax",
"=",
"ma",
",",
"label",
"=",
"'Amplitude'",
")",
"mr",
"=",
"ra",
".",
"imshow",
"(",
"stds",
",",
"origin",
"=",
"'bottomleft'",
",",
"cmap",
"=",
"cmr",
",",
"vmin",
"=",
"rlim",
"[",
"0",
"]",
",",
"vmax",
"=",
"rlim",
"[",
"1",
"]",
",",
"extent",
"=",
"(",
"centres",
".",
"min",
"(",
")",
",",
"centres",
".",
"max",
"(",
")",
",",
"npoints",
".",
"min",
"(",
")",
",",
"npoints",
".",
"max",
"(",
")",
")",
")",
"ra",
".",
"set_xlabel",
"(",
"'Center'",
")",
"fig",
".",
"colorbar",
"(",
"mr",
",",
"ax",
"=",
"ra",
",",
"label",
"=",
"'std'",
")",
"# view limits",
"ra",
".",
"imshow",
"(",
"~",
"rind",
",",
"origin",
"=",
"'bottomleft'",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"Greys",
",",
"alpha",
"=",
"overlay_alpha",
",",
"extent",
"=",
"(",
"centres",
".",
"min",
"(",
")",
",",
"centres",
".",
"max",
"(",
")",
",",
"npoints",
".",
"min",
"(",
")",
",",
"npoints",
".",
"max",
"(",
")",
")",
")",
"ma",
".",
"imshow",
"(",
"~",
"mind",
",",
"origin",
"=",
"'bottomleft'",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"Greys",
",",
"alpha",
"=",
"overlay_alpha",
",",
"extent",
"=",
"(",
"centres",
".",
"min",
"(",
")",
",",
"centres",
".",
"max",
"(",
")",
",",
"npoints",
".",
"min",
"(",
")",
",",
"npoints",
".",
"max",
"(",
")",
")",
")",
"for",
"ax",
"in",
"[",
"ma",
",",
"ra",
"]",
":",
"ax",
".",
"scatter",
"(",
"opt_centre",
",",
"opt_n_points",
",",
"c",
"=",
"(",
"1",
",",
"1",
",",
"1",
",",
"0.7",
")",
",",
"edgecolor",
"=",
"'k'",
",",
"marker",
"=",
"'o'",
")",
"ax",
".",
"set_xlim",
"(",
"extent",
"[",
":",
"2",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"extent",
"[",
"-",
"2",
":",
"]",
")",
"# draw histograms",
"mah",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"2",
",",
"3",
")",
"rah",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"2",
",",
"4",
")",
"mah",
".",
"set_xlim",
"(",
"mlim",
")",
"mbin",
"=",
"np",
".",
"linspace",
"(",
"*",
"mah",
".",
"get_xlim",
"(",
")",
",",
"50",
")",
"mah",
".",
"hist",
"(",
"means",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"means",
".",
"flatten",
"(",
")",
")",
"]",
",",
"mbin",
")",
"mah",
".",
"axvspan",
"(",
"mean_threshold",
",",
"mah",
".",
"get_xlim",
"(",
")",
"[",
"1",
"]",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"overlay_alpha",
")",
")",
"mah",
".",
"axvline",
"(",
"mean_threshold",
",",
"c",
"=",
"'r'",
")",
"mah",
".",
"set_xlabel",
"(",
"'Scaled Mean Analyte Conc'",
")",
"mah",
".",
"set_ylabel",
"(",
"'N'",
")",
"rah",
".",
"set_xlim",
"(",
"rlim",
")",
"rbin",
"=",
"np",
".",
"linspace",
"(",
"*",
"rah",
".",
"get_xlim",
"(",
")",
",",
"50",
")",
"rah",
".",
"hist",
"(",
"stds",
".",
"flatten",
"(",
")",
"[",
"~",
"np",
".",
"isnan",
"(",
"stds",
".",
"flatten",
"(",
")",
")",
"]",
",",
"rbin",
")",
"rah",
".",
"axvspan",
"(",
"std_threshold",
",",
"rah",
".",
"get_xlim",
"(",
")",
"[",
"1",
"]",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.4",
")",
")",
"rah",
".",
"axvline",
"(",
"std_threshold",
",",
"c",
"=",
"'r'",
")",
"rah",
".",
"set_xlabel",
"(",
"'std'",
")",
"tax",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"1",
",",
"3",
")",
"tplot",
"(",
"d",
",",
"opt",
".",
"analytes",
",",
"ax",
"=",
"tax",
",",
"*",
"*",
"kwargs",
")",
"tax",
".",
"axvspan",
"(",
"*",
"d",
".",
"Time",
"[",
"[",
"opt",
".",
"lims",
"[",
"0",
"]",
",",
"opt",
".",
"lims",
"[",
"1",
"]",
"]",
"]",
",",
"alpha",
"=",
"0.2",
")",
"tax",
".",
"set_xlim",
"(",
"d",
".",
"Time",
"[",
"d",
".",
"ns",
"==",
"n",
"]",
".",
"min",
"(",
")",
"-",
"3",
",",
"d",
".",
"Time",
"[",
"d",
".",
"ns",
"==",
"n",
"]",
".",
"max",
"(",
")",
"+",
"3",
")",
"fig",
".",
"tight_layout",
"(",
")",
"out",
".",
"append",
"(",
"(",
"fig",
",",
"(",
"ma",
",",
"ra",
",",
"mah",
",",
"rah",
",",
"tax",
")",
")",
")",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.