signature stringlengths 8 3.44k | body stringlengths 0 1.41M | docstring stringlengths 1 122k | id stringlengths 5 17 |
|---|---|---|---|
def summary_df(df_in, **kwargs): | true_values = kwargs.pop('<STR_LIT>', None)<EOL>include_true_values = kwargs.pop('<STR_LIT>', False)<EOL>include_rmse = kwargs.pop('<STR_LIT>', False)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>if true_values is not None:<EOL><INDENT>assert true_values.shape[<NUM_LIT:0>] == df_in.shape[<NUM_LIT:1>], (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' + str(true_values.shape) + '<STR_LIT:U+002CU+0020>'<EOL>'<STR_LIT>' + str(df_in.shape))<EOL><DEDENT>df = pd.DataFrame([df_in.mean(axis=<NUM_LIT:0>), df_in.std(axis=<NUM_LIT:0>, ddof=<NUM_LIT:1>)],<EOL>index=['<STR_LIT>', '<STR_LIT>'])<EOL>if include_true_values:<EOL><INDENT>assert true_values is not None<EOL>df.loc['<STR_LIT>'] = true_values<EOL><DEDENT>df.index = pd.CategoricalIndex(df.index.values, ordered=True,<EOL>categories=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'],<EOL>name='<STR_LIT>')<EOL>num_cals = df_in.shape[<NUM_LIT:0>]<EOL>mean_unc = df.loc['<STR_LIT>'] / np.sqrt(num_cals)<EOL>std_unc = df.loc['<STR_LIT>'] * np.sqrt(<NUM_LIT:1> / (<NUM_LIT:2> * (num_cals - <NUM_LIT:1>)))<EOL>df['<STR_LIT>'] = pd.Categorical(['<STR_LIT:value>'] * df.shape[<NUM_LIT:0>], ordered=True,<EOL>categories=['<STR_LIT:value>', '<STR_LIT>'])<EOL>df.set_index(['<STR_LIT>'], drop=True, append=True, inplace=True)<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), :] = mean_unc.values<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), :] = std_unc.values<EOL>if include_rmse:<EOL><INDENT>assert true_values is not None,'<STR_LIT>'<EOL>rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>'), :] = rmse<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), :] = rmse_unc<EOL><DEDENT>df.sort_index(inplace=True)<EOL>df.set_index(<EOL>[df.index.get_level_values('<STR_LIT>').astype(str),<EOL>df.index.get_level_values('<STR_LIT>')],<EOL>inplace=True)<EOL>return df<EOL> | Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame | f15346:m3 |
def efficiency_gain_df(method_names, method_values, est_names, **kwargs): | true_values = kwargs.pop('<STR_LIT>', None)<EOL>include_true_values = kwargs.pop('<STR_LIT>', False)<EOL>include_rmse = kwargs.pop('<STR_LIT>', False)<EOL>adjust_nsamp = kwargs.pop('<STR_LIT>', None)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>if adjust_nsamp is not None:<EOL><INDENT>assert adjust_nsamp.shape == (len(method_names),)<EOL><DEDENT>assert len(method_names) == len(method_values)<EOL>df_dict = {}<EOL>for i, method_name in enumerate(method_names):<EOL><INDENT>df = summary_df_from_list(<EOL>method_values[i], est_names, true_values=true_values,<EOL>include_true_values=False, include_rmse=include_rmse)<EOL>if i != <NUM_LIT:0>:<EOL><INDENT>stats = ['<STR_LIT>']<EOL>if include_rmse:<EOL><INDENT>stats.append('<STR_LIT>')<EOL><DEDENT>if adjust_nsamp is not None:<EOL><INDENT>adjust = (adjust_nsamp[<NUM_LIT:0>] / adjust_nsamp[i])<EOL><DEDENT>else:<EOL><INDENT>adjust = <NUM_LIT:1><EOL><DEDENT>for stat in stats:<EOL><INDENT>gain, gain_unc = get_eff_gain(<EOL>df_dict[method_names[<NUM_LIT:0>]].loc[(stat, '<STR_LIT:value>')],<EOL>df_dict[method_names[<NUM_LIT:0>]].loc[(stat, '<STR_LIT>')],<EOL>df.loc[(stat, '<STR_LIT:value>')],<EOL>df.loc[(stat, '<STR_LIT>')], adjust=adjust)<EOL>key = stat + '<STR_LIT>'<EOL>df.loc[(key, '<STR_LIT:value>'), :] = gain<EOL>df.loc[(key, '<STR_LIT>'), :] = gain_unc<EOL><DEDENT><DEDENT>df_dict[method_name] = df<EOL><DEDENT>results = pd.concat(df_dict)<EOL>results.index.rename('<STR_LIT>', level=<NUM_LIT:0>, inplace=True)<EOL>new_ind = []<EOL>new_ind.append(pd.CategoricalIndex(<EOL>results.index.get_level_values('<STR_LIT>'), ordered=True,<EOL>categories=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']))<EOL>new_ind.append(pd.CategoricalIndex(<EOL>results.index.get_level_values('<STR_LIT>'),<EOL>ordered=True, categories=['<STR_LIT>'] + method_names))<EOL>new_ind.append(results.index.get_level_values('<STR_LIT>'))<EOL>results.set_index(new_ind, inplace=True)<EOL>if include_true_values:<EOL><INDENT>with warnings.catch_warnings():<EOL><INDENT>warnings.filterwarnings('<STR_LIT:ignore>', message=(<EOL>'<STR_LIT>'))<EOL>results.loc[('<STR_LIT>', '<STR_LIT>', '<STR_LIT:value>'), :] = true_values<EOL><DEDENT><DEDENT>results.sort_index(inplace=True)<EOL>return results<EOL> | r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame. | f15346:m4 |
def paper_format_efficiency_gain_df(eff_gain_df): | idxs = pd.IndexSlice[['<STR_LIT>', '<STR_LIT>'], :, :]<EOL>paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])<EOL>means = (eff_gain_df.xs('<STR_LIT>', level='<STR_LIT>')<EOL>.xs('<STR_LIT:value>', level='<STR_LIT>'))<EOL>for col in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>col_vals = []<EOL>for val in means[col].values:<EOL><INDENT>col_vals += [int(np.rint(val)), np.nan]<EOL><DEDENT>col_vals += [np.nan] * (paper_df.shape[<NUM_LIT:0>] - len(col_vals))<EOL>paper_df[col] = col_vals<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>row_name_map = {'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>row_names = (paper_df.index.get_level_values(<NUM_LIT:0>).astype(str) + '<STR_LIT:U+0020>' +<EOL>paper_df.index.get_level_values(<NUM_LIT:1>).astype(str))<EOL>for key, value in row_name_map.items():<EOL><INDENT>row_names = row_names.str.replace(key, value)<EOL><DEDENT>paper_df.index = [row_names, paper_df.index.get_level_values(<NUM_LIT:2>)]<EOL>return paper_df<EOL> | Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame | f15346:m5 |
def get_eff_gain(base_std, base_std_unc, meth_std, meth_std_unc, adjust=<NUM_LIT:1>): | ratio = base_std / meth_std<EOL>ratio_unc = array_ratio_std(<EOL>base_std, base_std_unc, meth_std, meth_std_unc)<EOL>gain = ratio ** <NUM_LIT:2><EOL>gain_unc = <NUM_LIT:2> * ratio * ratio_unc<EOL>gain *= adjust<EOL>gain_unc *= adjust<EOL>return gain, gain_unc<EOL> | r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain. | f15346:m6 |
def rmse_and_unc(values_array, true_values): | assert true_values.shape == (values_array.shape[<NUM_LIT:1>],)<EOL>errors = values_array - true_values[np.newaxis, :]<EOL>sq_errors = errors ** <NUM_LIT:2><EOL>sq_errors_mean = np.mean(sq_errors, axis=<NUM_LIT:0>)<EOL>sq_errors_mean_unc = (np.std(sq_errors, axis=<NUM_LIT:0>, ddof=<NUM_LIT:1>) /<EOL>np.sqrt(sq_errors.shape[<NUM_LIT:0>]))<EOL>rmse = np.sqrt(sq_errors_mean)<EOL>rmse_unc = <NUM_LIT:0.5> * (<NUM_LIT:1> / rmse) * sq_errors_mean_unc<EOL>return rmse, rmse_unc<EOL> | r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse. | f15346:m7 |
def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d): | std = np.sqrt((sigmas_n / values_n) ** <NUM_LIT:2> + (sigmas_d / values_d) ** <NUM_LIT:2>)<EOL>std *= (values_n / values_d)<EOL>return std<EOL> | r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d. | f15346:m8 |
@nestcheck.io_utils.save_load_result<EOL>def run_list_error_values(run_list, estimator_list, estimator_names,<EOL>n_simulate=<NUM_LIT:100>, **kwargs): | thread_pvalue = kwargs.pop('<STR_LIT>', False)<EOL>bs_stat_dist = kwargs.pop('<STR_LIT>', False)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>assert len(estimator_list) == len(estimator_names), (<EOL>'<STR_LIT>'<EOL>.format(len(estimator_list), len(estimator_names)))<EOL>df = estimator_values_df(run_list, estimator_list, parallel=parallel,<EOL>estimator_names=estimator_names)<EOL>df.index = df.index.map(str)<EOL>df['<STR_LIT>'] = '<STR_LIT>'<EOL>df.set_index('<STR_LIT>', drop=True, append=True, inplace=True)<EOL>df = df.reorder_levels(['<STR_LIT>', '<STR_LIT>'])<EOL>bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,<EOL>n_simulate, parallel=parallel)<EOL>bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=<NUM_LIT:1>))<EOL>bs_std_df.index.name = '<STR_LIT>'<EOL>bs_std_df['<STR_LIT>'] = '<STR_LIT>'<EOL>bs_std_df.set_index('<STR_LIT>', drop=True, append=True,<EOL>inplace=True)<EOL>bs_std_df = bs_std_df.reorder_levels(['<STR_LIT>', '<STR_LIT>'])<EOL>df = pd.concat([df, bs_std_df])<EOL>if thread_pvalue:<EOL><INDENT>t_vals_df = thread_values_df(<EOL>run_list, estimator_list, estimator_names, parallel=parallel)<EOL>t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,<EOL>energy_dist=False)<EOL>t_d_df = t_d_df.xs('<STR_LIT>', level='<STR_LIT>',<EOL>drop_level=False)<EOL>t_d_df.index.set_levels(['<STR_LIT>'], level='<STR_LIT>',<EOL>inplace=True)<EOL>df = pd.concat([df, t_d_df])<EOL><DEDENT>if bs_stat_dist:<EOL><INDENT>b_d_df = pairwise_dists_on_cols(bs_vals_df)<EOL>dists = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]<EOL>new_ind = ['<STR_LIT>' +<EOL>b_d_df.index.get_level_values('<STR_LIT>'),<EOL>b_d_df.index.get_level_values('<STR_LIT>')]<EOL>b_d_df.set_index(new_ind, inplace=True)<EOL>df = pd.concat([df, b_d_df])<EOL><DEDENT>return df<EOL> | Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list. | f15347:m0 |
@nestcheck.io_utils.save_load_result<EOL>def estimator_values_df(run_list, estimator_list, **kwargs): | estimator_names = kwargs.pop(<EOL>'<STR_LIT>',<EOL>['<STR_LIT>' + str(i) for i in range(len(estimator_list))])<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>values_list = pu.parallel_apply(<EOL>nestcheck.ns_run_utils.run_estimators, run_list,<EOL>func_args=(estimator_list,), parallel=parallel)<EOL>df = pd.DataFrame(np.stack(values_list, axis=<NUM_LIT:0>))<EOL>df.columns = estimator_names<EOL>df.index.name = '<STR_LIT>'<EOL>return df<EOL> | Get a dataframe of estimator values.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs, optional
Name of each func in estimator_list.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs.
Columns have titles given by estimator_names and show results for the
different functions in estimators_list. | f15347:m1 |
def error_values_summary(error_values, **summary_df_kwargs): | df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)<EOL>imp_std, imp_std_unc, imp_frac, imp_frac_unc =nestcheck.error_analysis.implementation_std(<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT>')])<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>'), df.columns] = imp_std<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), df.columns] = imp_std_unc<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>'), :] = imp_frac<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), :] = imp_frac_unc<EOL>if '<STR_LIT>' in set(df.index.get_level_values('<STR_LIT>')):<EOL><INDENT>imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc =nestcheck.error_analysis.implementation_std(<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>')],<EOL>df.loc[('<STR_LIT>', '<STR_LIT>')])<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>'), df.columns] = imp_rmse<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), df.columns] =imp_rmse_unc<EOL>df.loc[('<STR_LIT>', '<STR_LIT:value>'), :] = imp_frac<EOL>df.loc[('<STR_LIT>', '<STR_LIT>'), :] = imp_frac_unc<EOL><DEDENT>calcs_to_keep = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>df = pd.concat([df.xs(calc, level='<STR_LIT>', drop_level=False) for<EOL>calc in calcs_to_keep if calc in<EOL>df.index.get_level_values('<STR_LIT>')])<EOL>return df<EOL> | Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results. | f15347:m2 |
def run_list_error_summary(run_list, estimator_list, estimator_names,<EOL>n_simulate, **kwargs): | true_values = kwargs.pop('<STR_LIT>', None)<EOL>include_true_values = kwargs.pop('<STR_LIT>', False)<EOL>include_rmse = kwargs.pop('<STR_LIT>', False)<EOL>error_values = run_list_error_values(run_list, estimator_list,<EOL>estimator_names, n_simulate, **kwargs)<EOL>return error_values_summary(error_values, true_values=true_values,<EOL>include_true_values=include_true_values,<EOL>include_rmse=include_rmse)<EOL> | Wrapper which runs run_list_error_values then applies error_values
summary to the resulting dataframe. See the docstrings for those two
funcions for more details and for descriptions of parameters and output. | f15347:m3 |
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,<EOL>**kwargs): | tqdm_kwargs = kwargs.pop('<STR_LIT>', {'<STR_LIT>': '<STR_LIT>'})<EOL>assert len(estimator_list) == len(estimator_names), (<EOL>'<STR_LIT>'<EOL>.format(len(estimator_list), len(estimator_names)))<EOL>bs_values_list = pu.parallel_apply(<EOL>nestcheck.error_analysis.run_bootstrap_values, run_list,<EOL>func_args=(estimator_list,), func_kwargs={'<STR_LIT>': n_simulate},<EOL>tqdm_kwargs=tqdm_kwargs, **kwargs)<EOL>df = pd.DataFrame()<EOL>for i, name in enumerate(estimator_names):<EOL><INDENT>df[name] = [arr[i, :] for arr in bs_values_list]<EOL><DEDENT>for vals_shape in df.loc[<NUM_LIT:0>].apply(lambda x: x.shape).values:<EOL><INDENT>assert vals_shape == (n_simulate,), (<EOL>'<STR_LIT>' + str(n_simulate) + '<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>str(vals_shape))<EOL><DEDENT>return df<EOL> | Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator. | f15347:m4 |
def thread_values_df(run_list, estimator_list, estimator_names, **kwargs): | tqdm_kwargs = kwargs.pop('<STR_LIT>', {'<STR_LIT>': '<STR_LIT>'})<EOL>assert len(estimator_list) == len(estimator_names), (<EOL>'<STR_LIT>'<EOL>.format(len(estimator_list), len(estimator_names)))<EOL>thread_vals_arrays = pu.parallel_apply(<EOL>nestcheck.error_analysis.run_thread_values, run_list,<EOL>func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, **kwargs)<EOL>df = pd.DataFrame()<EOL>for i, name in enumerate(estimator_names):<EOL><INDENT>df[name] = [arr[i, :] for arr in thread_vals_arrays]<EOL><DEDENT>for vals_shape in df.loc[<NUM_LIT:0>].apply(lambda x: x.shape).values:<EOL><INDENT>assert vals_shape == (run_list[<NUM_LIT:0>]['<STR_LIT>'].shape[<NUM_LIT:0>],),('<STR_LIT>' + str(run_list[<NUM_LIT:0>]['<STR_LIT>'].shape[<NUM_LIT:0>]) +<EOL>'<STR_LIT>' +<EOL>str(vals_shape))<EOL><DEDENT>return df<EOL> | Calculates estimator values for the constituent threads of the input
runs.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d numpy array with length equal to the number
of threads in the run, containing the results from evaluating the
estimator on each thread. | f15347:m5 |
def pairwise_dists_on_cols(df_in, earth_mover_dist=True, energy_dist=True): | df = pd.DataFrame()<EOL>for col in df_in.columns:<EOL><INDENT>df[col] = nestcheck.error_analysis.pairwise_distances(<EOL>df_in[col].values, earth_mover_dist=earth_mover_dist,<EOL>energy_dist=energy_dist)<EOL><DEDENT>return df<EOL> | Computes pairwise statistical distance measures.
parameters
----------
df_in: pandas data frame
Columns represent estimators and rows represent runs.
Each data frane element is an array of values which are used as samples
in the distance measures.
earth_mover_dist: bool, optional
Passed to error_analysis.pairwise_distances.
energy_dist: bool, optional
Passed to error_analysis.pairwise_distances.
returns
-------
df: pandas data frame with kl values for each pair. | f15347:m6 |
def write_run_output(run, **kwargs): | write_dead = kwargs.pop('<STR_LIT>', True)<EOL>write_stats = kwargs.pop('<STR_LIT>', True)<EOL>posteriors = kwargs.pop('<STR_LIT>', False)<EOL>equals = kwargs.pop('<STR_LIT>', False)<EOL>stats_means_errs = kwargs.pop('<STR_LIT>', True)<EOL>fmt = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>n_simulate = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>mandatory_keys = ['<STR_LIT>', '<STR_LIT>']<EOL>for key in mandatory_keys:<EOL><INDENT>assert key in run['<STR_LIT>'], key + '<STR_LIT>'<EOL><DEDENT>root = os.path.join(run['<STR_LIT>']['<STR_LIT>'], run['<STR_LIT>']['<STR_LIT>'])<EOL>if write_dead:<EOL><INDENT>samples = run_dead_birth_array(run)<EOL>np.savetxt(root + '<STR_LIT>', samples, fmt=fmt)<EOL>np.savetxt(root + '<STR_LIT>', samples[:, :-<NUM_LIT:1>], fmt=fmt)<EOL><DEDENT>if equals or posteriors:<EOL><INDENT>w_rel = nestcheck.ns_run_utils.get_w_rel(run)<EOL>post_arr = np.zeros((run['<STR_LIT>'].shape[<NUM_LIT:0>], run['<STR_LIT>'].shape[<NUM_LIT:1>] + <NUM_LIT:2>))<EOL>post_arr[:, <NUM_LIT:0>] = w_rel<EOL>post_arr[:, <NUM_LIT:1>] = -<NUM_LIT:2> * run['<STR_LIT>']<EOL>post_arr[:, <NUM_LIT:2>:] = run['<STR_LIT>']<EOL><DEDENT>if posteriors:<EOL><INDENT>np.savetxt(root + '<STR_LIT>', post_arr, fmt=fmt)<EOL>run['<STR_LIT>']['<STR_LIT>'] = post_arr.shape[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>run['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>if equals:<EOL><INDENT>inds = np.where(w_rel > np.random.random(w_rel.shape[<NUM_LIT:0>]))[<NUM_LIT:0>]<EOL>np.savetxt(root + '<STR_LIT>', post_arr[inds, <NUM_LIT:1>:],<EOL>fmt=fmt)<EOL>run['<STR_LIT>']['<STR_LIT>'] = inds.shape[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>run['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>if write_stats:<EOL><INDENT>run['<STR_LIT>']['<STR_LIT>'] = run['<STR_LIT>'].shape[<NUM_LIT:0>]<EOL>if stats_means_errs:<EOL><INDENT>estimators = [e.logz]<EOL>for i in range(run['<STR_LIT>'].shape[<NUM_LIT:1>]):<EOL><INDENT>estimators.append(functools.partial(e.param_mean, param_ind=i))<EOL><DEDENT>values = nestcheck.ns_run_utils.run_estimators(run, estimators)<EOL>stds = nestcheck.error_analysis.run_std_bootstrap(<EOL>run, estimators, n_simulate=n_simulate)<EOL>run['<STR_LIT>']['<STR_LIT>'] = values[<NUM_LIT:0>]<EOL>run['<STR_LIT>']['<STR_LIT>'] = stds[<NUM_LIT:0>]<EOL>run['<STR_LIT>']['<STR_LIT>'] = list(values[<NUM_LIT:1>:])<EOL>run['<STR_LIT>']['<STR_LIT>'] = list(stds[<NUM_LIT:1>:])<EOL><DEDENT>write_stats_file(run['<STR_LIT>'])<EOL><DEDENT> | Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means. | f15348:m0 |
def run_dead_birth_array(run, **kwargs): | nestcheck.ns_run_utils.check_ns_run(run, **kwargs)<EOL>threads = nestcheck.ns_run_utils.get_run_threads(run)<EOL>samp_arrays = []<EOL>ndim = run['<STR_LIT>'].shape[<NUM_LIT:1>]<EOL>for th in threads:<EOL><INDENT>samp_arr = np.zeros((th['<STR_LIT>'].shape[<NUM_LIT:0>], ndim + <NUM_LIT:2>))<EOL>samp_arr[:, :ndim] = th['<STR_LIT>']<EOL>samp_arr[:, ndim] = th['<STR_LIT>']<EOL>samp_arr[<NUM_LIT:1>:, ndim + <NUM_LIT:1>] = th['<STR_LIT>'][:-<NUM_LIT:1>]<EOL>if th['<STR_LIT>'][<NUM_LIT:0>, <NUM_LIT:0>] == -np.inf:<EOL><INDENT>samp_arr[<NUM_LIT:0>, ndim + <NUM_LIT:1>] = -<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>samp_arr[<NUM_LIT:0>, ndim + <NUM_LIT:1>] = th['<STR_LIT>'][<NUM_LIT:0>, <NUM_LIT:0>]<EOL><DEDENT>samp_arrays.append(samp_arr)<EOL><DEDENT>samples = np.vstack(samp_arrays)<EOL>samples = samples[np.argsort(samples[:, ndim]), :]<EOL>return samples<EOL> | Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl | f15348:m1 |
def write_stats_file(run_output_dict): | mandatory_keys = ['<STR_LIT>', '<STR_LIT>']<EOL>for key in mandatory_keys:<EOL><INDENT>assert key in run_output_dict, key + '<STR_LIT>'<EOL><DEDENT>default_output = {'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': [<NUM_LIT:0.0>],<EOL>'<STR_LIT>': [<NUM_LIT:0.0>],<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>],<EOL>'<STR_LIT>': [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>]}<EOL>allowed_keys = set(mandatory_keys) | set(default_output.keys())<EOL>assert set(run_output_dict.keys()).issubset(allowed_keys), (<EOL>'<STR_LIT>'.format(<EOL>set(run_output_dict.keys()) - allowed_keys))<EOL>output = copy.deepcopy(run_output_dict)<EOL>for key, value in default_output.items():<EOL><INDENT>if key not in output:<EOL><INDENT>output[key] = value<EOL><DEDENT><DEDENT>file_lines = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(<EOL>output['<STR_LIT>'], output['<STR_LIT>']),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>for i, (lz, lzerr) in enumerate(zip(output['<STR_LIT>'], output['<STR_LIT>'])):<EOL><INDENT>file_lines.append('<STR_LIT>'.format(<EOL>str(i + <NUM_LIT:1>).rjust(<NUM_LIT:2>), lz, lzerr))<EOL><DEDENT>file_lines += [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(output['<STR_LIT>']),<EOL>'<STR_LIT>'.format(output['<STR_LIT>']),<EOL>'<STR_LIT>'.format(output['<STR_LIT>']),<EOL>'<STR_LIT>'.format(output['<STR_LIT>']),<EOL>'<STR_LIT>'.format(output['<STR_LIT>']),<EOL>'<STR_LIT>'.format(<EOL>output['<STR_LIT>'], output['<STR_LIT>']),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>for i, (mean, meanerr) in enumerate(zip(output['<STR_LIT>'],<EOL>output['<STR_LIT>'])):<EOL><INDENT>file_lines.append('<STR_LIT>'.format(<EOL>str(i + <NUM_LIT:1>).ljust(<NUM_LIT:3>), mean, meanerr))<EOL><DEDENT>file_path = os.path.join(output['<STR_LIT>'],<EOL>output['<STR_LIT>'] + '<STR_LIT>')<EOL>with open(file_path, '<STR_LIT:w>') as stats_file:<EOL><INDENT>stats_file.writelines('<STR_LIT>'.format(line) for line in file_lines)<EOL><DEDENT>return output<EOL> | Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir) | f15348:m2 |
def timing_decorator(func): | @functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>"""<STR_LIT>"""<EOL>print_time = kwargs.pop('<STR_LIT>', False)<EOL>if not print_time:<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>start_time = time.time()<EOL>result = func(*args, **kwargs)<EOL>end_time = time.time()<EOL>print(func.__name__ + '<STR_LIT>' %<EOL>(end_time - start_time))<EOL>return result<EOL><DEDENT><DEDENT>return wrapper<EOL> | Prints the time func takes to execute. | f15349:m0 |
def save_load_result(func): | @functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>"""<STR_LIT>"""<EOL>save_name = kwargs.pop('<STR_LIT>', None)<EOL>save = kwargs.pop('<STR_LIT>', save_name is not None)<EOL>load = kwargs.pop('<STR_LIT>', save_name is not None)<EOL>overwrite_existing = kwargs.pop('<STR_LIT>', True)<EOL>warn_if_error = kwargs.pop('<STR_LIT>', False)<EOL>if load:<EOL><INDENT>if save_name is None:<EOL><INDENT>warnings.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'.format(func.__name__)),<EOL>UserWarning)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return pickle_load(save_name)<EOL><DEDENT>except (OSError, IOError) as err:<EOL><INDENT>if warn_if_error:<EOL><INDENT>msg = ('<STR_LIT>'.format(<EOL>func.__name__, type(err).__name__, save_name))<EOL>msg = '<STR_LIT>'<EOL>warnings.warn(msg, UserWarning)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>result = func(*args, **kwargs)<EOL>if save:<EOL><INDENT>if save_name is None:<EOL><INDENT>warnings.warn((func.__name__ + '<STR_LIT>' +<EOL>'<STR_LIT>'), UserWarning)<EOL><DEDENT>else:<EOL><INDENT>pickle_save(result, save_name,<EOL>overwrite_existing=overwrite_existing)<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>return wrapper<EOL> | Saves and/or loads func output (must be picklable). | f15349:m1 |
@timing_decorator<EOL>def pickle_save(data, name, **kwargs): | extension = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>overwrite_existing = kwargs.pop('<STR_LIT>', True)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>filename = name + extension<EOL>dirname = os.path.dirname(filename)<EOL>if not os.path.exists(dirname) and dirname != '<STR_LIT>':<EOL><INDENT>os.makedirs(dirname)<EOL><DEDENT>if os.path.isfile(filename) and not overwrite_existing:<EOL><INDENT>print(filename + '<STR_LIT>')<EOL>filename = name + '<STR_LIT:_>' + time.asctime().replace('<STR_LIT:U+0020>', '<STR_LIT:_>')<EOL>filename += extension<EOL><DEDENT>try:<EOL><INDENT>PermissionError<EOL><DEDENT>except NameError:<EOL><INDENT>PermissionError = IOError<EOL><DEDENT>try:<EOL><INDENT>outfile = open(filename, '<STR_LIT:wb>')<EOL>pickle.dump(data, outfile)<EOL>outfile.close()<EOL><DEDENT>except (MemoryError, PermissionError) as err:<EOL><INDENT>warnings.warn((type(err).__name__ + '<STR_LIT>'<EOL>'<STR_LIT>'), UserWarning)<EOL><DEDENT> | Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name. | f15349:m2 |
@timing_decorator<EOL>def pickle_load(name, extension='<STR_LIT>'): | filename = name + extension<EOL>infile = open(filename, '<STR_LIT:rb>')<EOL>data = pickle.load(infile)<EOL>infile.close()<EOL>return data<EOL> | Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path. | f15349:m3 |
def count_samples(ns_run, **kwargs): | kwargs.pop('<STR_LIT>', None)<EOL>kwargs.pop('<STR_LIT>', None)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>return ns_run['<STR_LIT>'].shape[<NUM_LIT:0>]<EOL> | r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int | f15350:m0 |
def logz(ns_run, logw=None, simulate=False): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>return scipy.special.logsumexp(logw)<EOL> | r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | f15350:m1 |
def evidence(ns_run, logw=None, simulate=False): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>return np.exp(scipy.special.logsumexp(logw))<EOL> | r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | f15350:m2 |
def param_mean(ns_run, logw=None, simulate=False, param_ind=<NUM_LIT:0>,<EOL>handle_indexerror=False): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>w_relative = np.exp(logw - logw.max())<EOL>try:<EOL><INDENT>return (np.sum(w_relative * ns_run['<STR_LIT>'][:, param_ind])<EOL>/ np.sum(w_relative))<EOL><DEDENT>except IndexError:<EOL><INDENT>if handle_indexerror:<EOL><INDENT>return np.nan<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT> | Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float | f15350:m3 |
def param_cred(ns_run, logw=None, simulate=False, probability=<NUM_LIT:0.5>,<EOL>param_ind=<NUM_LIT:0>): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>w_relative = np.exp(logw - logw.max()) <EOL>return weighted_quantile(probability, ns_run['<STR_LIT>'][:, param_ind],<EOL>w_relative)<EOL> | One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | f15350:m4 |
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=<NUM_LIT:0>): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>w_relative = np.exp(logw - logw.max()) <EOL>w_relative /= np.sum(w_relative)<EOL>return np.sum(w_relative * (ns_run['<STR_LIT>'][:, param_ind] ** <NUM_LIT:2>))<EOL> | Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | f15350:m5 |
def r_mean(ns_run, logw=None, simulate=False): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>w_relative = np.exp(logw - logw.max())<EOL>r = np.sqrt(np.sum(ns_run['<STR_LIT>'] ** <NUM_LIT:2>, axis=<NUM_LIT:1>))<EOL>return np.sum(w_relative * r) / np.sum(w_relative)<EOL> | Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | f15350:m6 |
def r_cred(ns_run, logw=None, simulate=False, probability=<NUM_LIT:0.5>): | if logw is None:<EOL><INDENT>logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)<EOL><DEDENT>w_relative = np.exp(logw - logw.max()) <EOL>r = np.sqrt(np.sum(ns_run['<STR_LIT>'] ** <NUM_LIT:2>, axis=<NUM_LIT:1>))<EOL>return weighted_quantile(probability, r, w_relative)<EOL> | One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float | f15350:m7 |
def get_latex_name(func_in, **kwargs): | if isinstance(func_in, functools.partial):<EOL><INDENT>func = func_in.func<EOL>assert not set(func_in.keywords) & set(kwargs), (<EOL>'<STR_LIT>'<EOL>.format(kwargs, func_in.keywords))<EOL>kwargs.update(func_in.keywords)<EOL><DEDENT>else:<EOL><INDENT>func = func_in<EOL><DEDENT>param_ind = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>probability = kwargs.pop('<STR_LIT>', <NUM_LIT:0.5>)<EOL>kwargs.pop('<STR_LIT>', None)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>ind_str = r'<STR_LIT>' + str(param_ind + <NUM_LIT:1>) + '<STR_LIT>'<EOL>latex_name_dict = {<EOL>'<STR_LIT>': r'<STR_LIT>',<EOL>'<STR_LIT>': r'<STR_LIT>',<EOL>'<STR_LIT>': r'<STR_LIT>',<EOL>'<STR_LIT>': r'<STR_LIT>',<EOL>'<STR_LIT>': r'<STR_LIT>' + ind_str + '<STR_LIT>',<EOL>'<STR_LIT>': r'<STR_LIT>' + ind_str + '<STR_LIT>'}<EOL>if probability == <NUM_LIT:0.5>:<EOL><INDENT>cred_str = r'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>percent_str = ('<STR_LIT>' % (probability * <NUM_LIT:100>)).rstrip('<STR_LIT:0>').rstrip('<STR_LIT:.>')<EOL>cred_str = r'<STR_LIT>' + percent_str + r'<STR_LIT>'<EOL><DEDENT>latex_name_dict['<STR_LIT>'] = cred_str + r'<STR_LIT>' + ind_str + '<STR_LIT>'<EOL>latex_name_dict['<STR_LIT>'] = cred_str + r'<STR_LIT>'<EOL>try:<EOL><INDENT>return latex_name_dict[func.__name__]<EOL><DEDENT>except KeyError as err:<EOL><INDENT>err.args = err.args + ('<STR_LIT>' +<EOL>func.__name__,)<EOL>raise<EOL><DEDENT> | Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function. | f15350:m8 |
def weighted_quantile(probability, values, weights): | assert <NUM_LIT:1> > probability > <NUM_LIT:0>, (<EOL>'<STR_LIT>' + str(probability) + '<STR_LIT>')<EOL>assert values.shape == weights.shape<EOL>assert values.ndim == <NUM_LIT:1><EOL>assert weights.ndim == <NUM_LIT:1><EOL>sorted_inds = np.argsort(values)<EOL>quantiles = np.cumsum(weights[sorted_inds]) - (<NUM_LIT:0.5> * weights[sorted_inds])<EOL>quantiles /= np.sum(weights)<EOL>return np.interp(probability, quantiles, values[sorted_inds])<EOL> | Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float | f15350:m9 |
def parallel_map(func, *arg_iterable, **kwargs): | chunksize = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>func_pre_args = kwargs.pop('<STR_LIT>', ())<EOL>func_kwargs = kwargs.pop('<STR_LIT>', {})<EOL>max_workers = kwargs.pop('<STR_LIT>', None)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>parallel_warning = kwargs.pop('<STR_LIT>', True)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)<EOL>if parallel:<EOL><INDENT>pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)<EOL>return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))<EOL><DEDENT>else:<EOL><INDENT>if parallel_warning:<EOL><INDENT>warnings.warn(('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>UserWarning)<EOL><DEDENT>return list(map(func_to_map, *arg_iterable))<EOL><DEDENT> | Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | f15351:m0 |
def parallel_apply(func, arg_iterable, **kwargs): | max_workers = kwargs.pop('<STR_LIT>', None)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>parallel_warning = kwargs.pop('<STR_LIT>', True)<EOL>func_args = kwargs.pop('<STR_LIT>', ())<EOL>func_pre_args = kwargs.pop('<STR_LIT>', ())<EOL>func_kwargs = kwargs.pop('<STR_LIT>', {})<EOL>tqdm_kwargs = kwargs.pop('<STR_LIT>', {})<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>if '<STR_LIT>' not in tqdm_kwargs: <EOL><INDENT>tqdm_kwargs['<STR_LIT>'] = False<EOL><DEDENT>assert isinstance(func_args, tuple), (<EOL>str(func_args) + '<STR_LIT>' + str(type(func_args)))<EOL>assert isinstance(func_pre_args, tuple), (<EOL>str(func_pre_args) + '<STR_LIT>' + str(type(func_pre_args)))<EOL>progress = select_tqdm()<EOL>if not parallel:<EOL><INDENT>if parallel_warning:<EOL><INDENT>warnings.warn(('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>UserWarning)<EOL><DEDENT>return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for<EOL>x in progress(arg_iterable, **tqdm_kwargs)]<EOL><DEDENT>else:<EOL><INDENT>pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)<EOL>futures = []<EOL>for element in arg_iterable:<EOL><INDENT>futures.append(pool.submit(<EOL>func, *(func_pre_args + (element,) + func_args),<EOL>**func_kwargs))<EOL><DEDENT>results = []<EOL>for fut in progress(concurrent.futures.as_completed(futures),<EOL>total=len(arg_iterable), **tqdm_kwargs):<EOL><INDENT>results.append(fut.result())<EOL><DEDENT>return results<EOL><DEDENT> | Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | f15351:m1 |
def select_tqdm(): | try:<EOL><INDENT>progress = tqdm.tqdm_notebook<EOL>assert get_ipython().has_trait('<STR_LIT>')<EOL><DEDENT>except (NameError, AssertionError):<EOL><INDENT>progress = tqdm.tqdm<EOL><DEDENT>return progress<EOL> | If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function | f15351:m2 |
@nestcheck.io_utils.save_load_result<EOL>def batch_process_data(file_roots, **kwargs): | base_dir = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>process_func = kwargs.pop('<STR_LIT>', process_polychord_run)<EOL>func_kwargs = kwargs.pop('<STR_LIT>', {})<EOL>func_kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>', ())<EOL>data = nestcheck.parallel_utils.parallel_apply(<EOL>process_error_helper, file_roots, func_args=(base_dir, process_func),<EOL>func_kwargs=func_kwargs, **kwargs)<EOL>data = sorted(data,<EOL>key=lambda x: file_roots.index(x['<STR_LIT>']['<STR_LIT>']))<EOL>errors = {}<EOL>for i, run in enumerate(data):<EOL><INDENT>if '<STR_LIT:error>' in run:<EOL><INDENT>try:<EOL><INDENT>errors[run['<STR_LIT:error>']].append(i)<EOL><DEDENT>except KeyError:<EOL><INDENT>errors[run['<STR_LIT:error>']] = [i]<EOL><DEDENT><DEDENT><DEDENT>for error_name, index_list in errors.items():<EOL><INDENT>message = (error_name + '<STR_LIT>' + str(len(index_list)) + '<STR_LIT>'<EOL>+ str(len(file_roots)) + '<STR_LIT>')<EOL>if len(index_list) != len(file_roots):<EOL><INDENT>message += ('<STR_LIT>'<EOL>+ str(index_list))<EOL><DEDENT>print(message)<EOL><DEDENT>return [run for run in data if '<STR_LIT:error>' not in run]<EOL> | Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details). | f15352:m0 |
def process_error_helper(root, base_dir, process_func, errors_to_handle=(),<EOL>**func_kwargs): | try:<EOL><INDENT>return process_func(root, base_dir, **func_kwargs)<EOL><DEDENT>except errors_to_handle as err:<EOL><INDENT>run = {'<STR_LIT:error>': type(err).__name__,<EOL>'<STR_LIT>': {'<STR_LIT>': root}}<EOL>return run<EOL><DEDENT> | Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root. | f15352:m1 |
def process_polychord_run(file_root, base_dir, process_stats_file=True,<EOL>**kwargs): | <EOL>samples = np.loadtxt(os.path.join(base_dir, file_root) + '<STR_LIT>')<EOL>ns_run = process_samples_array(samples, **kwargs)<EOL>ns_run['<STR_LIT>'] = {'<STR_LIT>': base_dir, '<STR_LIT>': file_root}<EOL>if process_stats_file:<EOL><INDENT>try:<EOL><INDENT>ns_run['<STR_LIT>'] = process_polychord_stats(file_root, base_dir)<EOL><DEDENT>except (OSError, IOError, ValueError) as err:<EOL><INDENT>warnings.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>type(err).__name__, os.path.join(base_dir, file_root)),<EOL>UserWarning)<EOL><DEDENT><DEDENT>return ns_run<EOL> | Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | f15352:m2 |
def process_multinest_run(file_root, base_dir, **kwargs): | <EOL>dead = np.loadtxt(os.path.join(base_dir, file_root) + '<STR_LIT>')<EOL>live = np.loadtxt(os.path.join(base_dir, file_root)<EOL>+ '<STR_LIT>')<EOL>dead = dead[:, :-<NUM_LIT:2>]<EOL>live = live[:, :-<NUM_LIT:1>]<EOL>assert dead[:, -<NUM_LIT:2>].max() < live[:, -<NUM_LIT:2>].min(), (<EOL>'<STR_LIT>',<EOL>dead, live)<EOL>ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)<EOL>assert np.all(ns_run['<STR_LIT>'][:, <NUM_LIT:0>] == -np.inf), (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>ns_run['<STR_LIT>'] = {}<EOL>ns_run['<STR_LIT>']['<STR_LIT>'] = file_root<EOL>ns_run['<STR_LIT>']['<STR_LIT>'] = base_dir<EOL>return ns_run<EOL> | Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | f15352:m3 |
def process_dynesty_run(results): | samples = np.zeros((results.samples.shape[<NUM_LIT:0>],<EOL>results.samples.shape[<NUM_LIT:1>] + <NUM_LIT:3>))<EOL>samples[:, <NUM_LIT:0>] = results.logl<EOL>samples[:, <NUM_LIT:1>] = results.samples_id<EOL>samples[:, <NUM_LIT:3>:] = results.samples<EOL>unique_th, first_inds = np.unique(results.samples_id, return_index=True)<EOL>assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[<NUM_LIT:0>])))<EOL>thread_min_max = np.full((unique_th.shape[<NUM_LIT:0>], <NUM_LIT:2>), np.nan)<EOL>try:<EOL><INDENT>assert unique_th.shape[<NUM_LIT:0>] == results.nlive<EOL>assert np.array_equal(<EOL>np.unique(results.samples_id[-results.nlive:]),<EOL>np.asarray(range(results.nlive))), (<EOL>'<STR_LIT>')<EOL>thread_min_max[:, <NUM_LIT:0>] = -np.inf<EOL><DEDENT>except AttributeError:<EOL><INDENT>assert unique_th.shape[<NUM_LIT:0>] == sum(results.batch_nlive)<EOL>for th_lab, ind in zip(unique_th, first_inds):<EOL><INDENT>thread_min_max[th_lab, <NUM_LIT:0>] = (<EOL>results.batch_bounds[results.samples_batch[ind], <NUM_LIT:0>])<EOL><DEDENT><DEDENT>for th_lab in unique_th:<EOL><INDENT>final_ind = np.where(results.samples_id == th_lab)[<NUM_LIT:0>][-<NUM_LIT:1>]<EOL>thread_min_max[th_lab, <NUM_LIT:1>] = results.logl[final_ind]<EOL>samples[final_ind, <NUM_LIT:2>] = -<NUM_LIT:1><EOL><DEDENT>assert np.all(~np.isnan(thread_min_max))<EOL>run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)<EOL>nestcheck.ns_run_utils.check_ns_run(run)<EOL>return run<EOL> | Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | f15352:m4 |
def process_polychord_stats(file_root, base_dir): | filename = os.path.join(base_dir, file_root) + '<STR_LIT>'<EOL>output = {'<STR_LIT>': base_dir,<EOL>'<STR_LIT>': file_root}<EOL>with open(filename, '<STR_LIT:r>') as stats_file:<EOL><INDENT>lines = stats_file.readlines()<EOL><DEDENT>output['<STR_LIT>'] = float(lines[<NUM_LIT:8>].split()[<NUM_LIT:2>])<EOL>output['<STR_LIT>'] = float(lines[<NUM_LIT:8>].split()[<NUM_LIT:4>])<EOL>output['<STR_LIT>'] = []<EOL>output['<STR_LIT>'] = []<EOL>for line in lines[<NUM_LIT>:]:<EOL><INDENT>if line[:<NUM_LIT:5>] != '<STR_LIT>':<EOL><INDENT>break<EOL><DEDENT>output['<STR_LIT>'].append(float(<EOL>re.findall(r'<STR_LIT>', line)[<NUM_LIT:0>].split()[<NUM_LIT:0>]))<EOL>output['<STR_LIT>'].append(float(<EOL>re.findall(r'<STR_LIT>', line)[<NUM_LIT:0>].split()[<NUM_LIT:2>]))<EOL><DEDENT>nclust = len(output['<STR_LIT>'])<EOL>output['<STR_LIT>'] = nclust<EOL>output['<STR_LIT>'] = int(lines[<NUM_LIT:20> + nclust].split()[<NUM_LIT:1>])<EOL>output['<STR_LIT>'] = int(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:1>])<EOL>output['<STR_LIT>'] = int(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:1>])<EOL>output['<STR_LIT>'] = int(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:1>])<EOL>try:<EOL><INDENT>output['<STR_LIT>'] = int(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:1>])<EOL><DEDENT>except ValueError:<EOL><INDENT>output['<STR_LIT>'] = np.nan<EOL><DEDENT>output['<STR_LIT>'] = float(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:1>])<EOL>output['<STR_LIT>'] = float(lines[<NUM_LIT> + nclust].split()[<NUM_LIT:3>])<EOL>if len(lines) > <NUM_LIT> + nclust:<EOL><INDENT>output['<STR_LIT>'] = []<EOL>output['<STR_LIT>'] = []<EOL>for line in lines[<NUM_LIT> + nclust:]:<EOL><INDENT>output['<STR_LIT>'].append(float(line.split()[<NUM_LIT:1>]))<EOL>output['<STR_LIT>'].append(float(line.split()[<NUM_LIT:3>]))<EOL><DEDENT><DEDENT>return output<EOL> | Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details. | f15352:m5 |
def process_samples_array(samples, **kwargs): | samples = samples[np.argsort(samples[:, -<NUM_LIT:2>])]<EOL>ns_run = {}<EOL>ns_run['<STR_LIT>'] = samples[:, -<NUM_LIT:2>]<EOL>ns_run['<STR_LIT>'] = samples[:, :-<NUM_LIT:2>]<EOL>birth_contours = samples[:, -<NUM_LIT:1>]<EOL>birth_inds = birth_inds_given_contours(<EOL>birth_contours, ns_run['<STR_LIT>'], **kwargs)<EOL>ns_run['<STR_LIT>'] = threads_given_birth_inds(birth_inds)<EOL>unique_threads = np.unique(ns_run['<STR_LIT>'])<EOL>assert np.array_equal(unique_threads,<EOL>np.asarray(range(unique_threads.shape[<NUM_LIT:0>])))<EOL>thread_min_max = np.zeros((unique_threads.shape[<NUM_LIT:0>], <NUM_LIT:2>))<EOL>delta_nlive = np.zeros(samples.shape[<NUM_LIT:0>] + <NUM_LIT:1>)<EOL>for label in unique_threads:<EOL><INDENT>thread_inds = np.where(ns_run['<STR_LIT>'] == label)[<NUM_LIT:0>]<EOL>thread_min_max[label, <NUM_LIT:1>] = ns_run['<STR_LIT>'][thread_inds[-<NUM_LIT:1>]]<EOL>thread_start_birth_ind = birth_inds[thread_inds[<NUM_LIT:0>]]<EOL>delta_nlive[thread_inds[-<NUM_LIT:1>] + <NUM_LIT:1>] -= <NUM_LIT:1><EOL>if thread_start_birth_ind == birth_inds[<NUM_LIT:0>]:<EOL><INDENT>thread_min_max[label, <NUM_LIT:0>] = -np.inf<EOL>delta_nlive[<NUM_LIT:0>] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>assert thread_start_birth_ind >= <NUM_LIT:0><EOL>thread_min_max[label, <NUM_LIT:0>] = ns_run['<STR_LIT>'][thread_start_birth_ind]<EOL>delta_nlive[thread_start_birth_ind + <NUM_LIT:1>] += <NUM_LIT:1><EOL><DEDENT><DEDENT>ns_run['<STR_LIT>'] = thread_min_max<EOL>ns_run['<STR_LIT>'] = np.cumsum(delta_nlive)[:-<NUM_LIT:1>]<EOL>return ns_run<EOL> | Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key). | f15352:m6 |
def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs): | dup_assert = kwargs.pop('<STR_LIT>', False)<EOL>dup_warn = kwargs.pop('<STR_LIT>', False)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>assert logl_arr.ndim == <NUM_LIT:1>, logl_arr.ndim<EOL>assert birth_logl_arr.ndim == <NUM_LIT:1>, birth_logl_arr.ndim<EOL>nestcheck.ns_run_utils.check_ns_run_logls(<EOL>{'<STR_LIT>': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)<EOL>state = np.random.get_state() <EOL>np.random.seed(<NUM_LIT:0>)<EOL>init_birth = birth_logl_arr[<NUM_LIT:0>]<EOL>assert np.all(birth_logl_arr <= logl_arr), (<EOL>logl_arr[birth_logl_arr > logl_arr])<EOL>birth_inds = np.full(birth_logl_arr.shape, np.nan)<EOL>birth_inds[birth_logl_arr == init_birth] = -<NUM_LIT:1><EOL>for i, birth_logl in enumerate(birth_logl_arr):<EOL><INDENT>if not np.isnan(birth_inds[i]):<EOL><INDENT>continue<EOL><DEDENT>dup_deaths = np.where(logl_arr == birth_logl)[<NUM_LIT:0>]<EOL>if dup_deaths.shape == (<NUM_LIT:1>,):<EOL><INDENT>birth_inds[i] = dup_deaths[<NUM_LIT:0>]<EOL>continue<EOL><DEDENT>dup_births = np.where(birth_logl_arr == birth_logl)[<NUM_LIT:0>]<EOL>assert dup_deaths.shape[<NUM_LIT:0>] > <NUM_LIT:1>, dup_deaths<EOL>if np.all(birth_logl_arr[dup_deaths] != birth_logl):<EOL><INDENT>np.random.shuffle(dup_deaths)<EOL>inds_to_use = dup_deaths<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>inds_to_use = sample_less_than_condition(<EOL>dup_deaths, dup_births)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>dup_deaths, dup_births))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>birth_inds[dup_births] = inds_to_use[:dup_births.shape[<NUM_LIT:0>]]<EOL><DEDENT>except ValueError:<EOL><INDENT>warnings.warn((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>birth_logl, dup_births, inds_to_use), UserWarning)<EOL>extra_inds = np.random.choice(<EOL>inds_to_use, size=dup_births.shape[<NUM_LIT:0>] - inds_to_use.shape[<NUM_LIT:0>])<EOL>inds_to_use = np.concatenate((inds_to_use, extra_inds))<EOL>np.random.shuffle(inds_to_use)<EOL>birth_inds[dup_births] = inds_to_use[:dup_births.shape[<NUM_LIT:0>]]<EOL><DEDENT><DEDENT>assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()<EOL>np.random.set_state(state) <EOL>return birth_inds.astype(int)<EOL> | Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1. | f15352:m7 |
def sample_less_than_condition(choices_in, condition): | output = np.zeros(min(condition.shape[<NUM_LIT:0>], choices_in.shape[<NUM_LIT:0>]))<EOL>choices = copy.deepcopy(choices_in)<EOL>for i, _ in enumerate(output):<EOL><INDENT>avail_inds = np.where(choices < condition[i])[<NUM_LIT:0>]<EOL>selected_ind = np.random.choice(avail_inds)<EOL>output[i] = choices[selected_ind]<EOL>choices = np.delete(choices, selected_ind)<EOL><DEDENT>return output<EOL> | Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order. | f15352:m8 |
def threads_given_birth_inds(birth_inds): | unique, counts = np.unique(birth_inds, return_counts=True)<EOL>thread_start_inds = np.concatenate((<EOL>unique[:<NUM_LIT:1>], unique[<NUM_LIT:1>:][counts[<NUM_LIT:1>:] > <NUM_LIT:1>]))<EOL>thread_start_counts = np.concatenate((<EOL>counts[:<NUM_LIT:1>], counts[<NUM_LIT:1>:][counts[<NUM_LIT:1>:] > <NUM_LIT:1>] - <NUM_LIT:1>))<EOL>thread_labels = np.full(birth_inds.shape, np.nan)<EOL>thread_num = <NUM_LIT:0><EOL>for nmulti, multi in enumerate(thread_start_inds):<EOL><INDENT>for i, start_ind in enumerate(np.where(birth_inds == multi)[<NUM_LIT:0>]):<EOL><INDENT>if i != <NUM_LIT:0> or nmulti == <NUM_LIT:0>:<EOL><INDENT>assert np.isnan(thread_labels[start_ind])<EOL>thread_labels[start_ind] = thread_num<EOL>next_ind = np.where(birth_inds == start_ind)[<NUM_LIT:0>]<EOL>while next_ind.shape != (<NUM_LIT:0>,):<EOL><INDENT>assert np.isnan(thread_labels[next_ind[<NUM_LIT:0>]])<EOL>thread_labels[next_ind[<NUM_LIT:0>]] = thread_num<EOL>next_ind = np.where(birth_inds == next_ind[<NUM_LIT:0>])[<NUM_LIT:0>]<EOL><DEDENT>thread_num += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if not np.all(~np.isnan(thread_labels)):<EOL><INDENT>warnings.warn((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>(np.isnan(thread_labels)).sum(), birth_inds.shape[<NUM_LIT:0>],<EOL>np.where(np.isnan(thread_labels))[<NUM_LIT:0>],<EOL>thread_start_inds, thread_start_counts))<EOL>inds = np.where(np.isnan(thread_labels))[<NUM_LIT:0>]<EOL>state = np.random.get_state() <EOL>np.random.seed(<NUM_LIT:0>) <EOL>for ind in inds:<EOL><INDENT>labels_to_choose = np.intersect1d( <EOL>thread_labels[:ind], thread_labels[ind + <NUM_LIT:1>:])<EOL>if labels_to_choose.shape[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>labels_to_choose = np.unique(<EOL>thread_labels[~np.isnan(thread_labels)])<EOL><DEDENT>thread_labels[ind] = np.random.choice(labels_to_choose)<EOL><DEDENT>np.random.set_state(state) <EOL><DEDENT>assert np.all(~np.isnan(thread_labels)), (<EOL>'<STR_LIT>'.format(<EOL>(np.isnan(thread_labels)).sum()))<EOL>assert np.array_equal(thread_labels, thread_labels.astype(int)), (<EOL>'<STR_LIT>')<EOL>thread_labels = thread_labels.astype(int)<EOL>assert np.array_equal(<EOL>np.unique(thread_labels),<EOL>np.asarray(range(sum(thread_start_counts)))), (<EOL>str(np.unique(thread_labels)) + '<STR_LIT>'<EOL>+ str(sum(thread_start_counts)) + '<STR_LIT:)>')<EOL>return thread_labels<EOL> | Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to. | f15352:m9 |
def get_dummy_thread(nsamples, **kwargs): | seed = kwargs.pop('<STR_LIT>', False)<EOL>ndim = kwargs.pop('<STR_LIT>', <NUM_LIT:2>)<EOL>logl_start = kwargs.pop('<STR_LIT>', -np.inf)<EOL>logl_range = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>if seed is not False:<EOL><INDENT>np.random.seed(seed)<EOL><DEDENT>thread = {'<STR_LIT>': np.sort(np.random.random(nsamples)) * logl_range,<EOL>'<STR_LIT>': np.full(nsamples, <NUM_LIT:1.>),<EOL>'<STR_LIT>': np.random.random((nsamples, ndim)),<EOL>'<STR_LIT>': np.zeros(nsamples).astype(int)}<EOL>if logl_start != -np.inf:<EOL><INDENT>thread['<STR_LIT>'] += logl_start<EOL><DEDENT>thread['<STR_LIT>'] = np.asarray([[logl_start, thread['<STR_LIT>'][-<NUM_LIT:1>]]])<EOL>return thread<EOL> | Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | f15354:m0 |
def get_dummy_run(nthread, nsamples, **kwargs): | seed = kwargs.pop('<STR_LIT>', False)<EOL>ndim = kwargs.pop('<STR_LIT>', <NUM_LIT:2>)<EOL>logl_start = kwargs.pop('<STR_LIT>', -np.inf)<EOL>logl_range = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>threads = []<EOL>if seed is not False:<EOL><INDENT>np.random.seed(seed)<EOL><DEDENT>threads = []<EOL>for _ in range(nthread):<EOL><INDENT>threads.append(get_dummy_thread(<EOL>nsamples, ndim=ndim, seed=False, logl_start=logl_start,<EOL>logl_range=logl_range))<EOL><DEDENT>threads = sorted(threads, key=lambda th: th['<STR_LIT>'][<NUM_LIT:0>])<EOL>for i, _ in enumerate(threads):<EOL><INDENT>threads[i]['<STR_LIT>'] = np.full(nsamples, i)<EOL><DEDENT>return nestcheck.ns_run_utils.combine_threads(threads)<EOL> | Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | f15354:m1 |
def get_dummy_dynamic_run(nsamples, **kwargs): | seed = kwargs.pop('<STR_LIT>', False)<EOL>ndim = kwargs.pop('<STR_LIT>', <NUM_LIT:2>)<EOL>nthread_init = kwargs.pop('<STR_LIT>', <NUM_LIT:2>)<EOL>nthread_dyn = kwargs.pop('<STR_LIT>', <NUM_LIT:3>)<EOL>logl_range = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,<EOL>logl_start=-np.inf, logl_range=logl_range)<EOL>dyn_starts = list(np.random.choice(<EOL>init['<STR_LIT>'], nthread_dyn, replace=True))<EOL>threads = nestcheck.ns_run_utils.get_run_threads(init)<EOL>threads += [get_dummy_thread(<EOL>nsamples, ndim=ndim, seed=False, logl_start=start,<EOL>logl_range=logl_range) for start in dyn_starts]<EOL>for i, _ in enumerate(threads):<EOL><INDENT>threads[i]['<STR_LIT>'] = np.full(nsamples, i)<EOL><DEDENT>run = nestcheck.ns_run_utils.combine_threads(threads)<EOL>samples = nestcheck.write_polychord_output.run_dead_birth_array(run)<EOL>return nestcheck.data_processing.process_samples_array(samples)<EOL> | Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | f15354:m2 |
def plot_run_nlive(method_names, run_dict, **kwargs): | logx_given_logl = kwargs.pop('<STR_LIT>', None)<EOL>logl_given_logx = kwargs.pop('<STR_LIT>', None)<EOL>logx_min = kwargs.pop('<STR_LIT>', None)<EOL>ymax = kwargs.pop('<STR_LIT>', None)<EOL>npoints = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>figsize = kwargs.pop('<STR_LIT>', (<NUM_LIT>, <NUM_LIT:2>))<EOL>post_mass_norm = kwargs.pop('<STR_LIT>', None)<EOL>cum_post_mass_norm = kwargs.pop('<STR_LIT>', None)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>assert set(method_names) == set(run_dict.keys()), (<EOL>'<STR_LIT>' + str(method_names) + '<STR_LIT>'<EOL>'<STR_LIT>' + str(run_dict.keys()))<EOL>fig = plt.figure(figsize=figsize)<EOL>ax = plt.gca()<EOL>colors = plt.rcParams['<STR_LIT>'].by_key()['<STR_LIT>']<EOL>linecolor_dict = {'<STR_LIT>': colors[<NUM_LIT:2>],<EOL>'<STR_LIT>': colors[<NUM_LIT:8>],<EOL>'<STR_LIT>': colors[<NUM_LIT:9>]}<EOL>ax.set_prop_cycle('<STR_LIT>', [colors[i] for i in [<NUM_LIT:4>, <NUM_LIT:1>, <NUM_LIT:6>, <NUM_LIT:0>, <NUM_LIT:3>, <NUM_LIT:5>, <NUM_LIT:7>]])<EOL>integrals_dict = {}<EOL>logx_min_list = []<EOL>for method_name in method_names:<EOL><INDENT>integrals = np.zeros(len(run_dict[method_name]))<EOL>for nr, run in enumerate(run_dict[method_name]):<EOL><INDENT>if '<STR_LIT>' in run:<EOL><INDENT>logx = run['<STR_LIT>']<EOL><DEDENT>elif logx_given_logl is not None:<EOL><INDENT>logx = logx_given_logl(run['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>logx = nestcheck.ns_run_utils.get_logx(<EOL>run['<STR_LIT>'], simulate=False)<EOL><DEDENT>logx_min_list.append(logx[-<NUM_LIT:1>])<EOL>logx[<NUM_LIT:0>] = <NUM_LIT:0> <EOL>if nr == <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>line, = ax.plot(logx, run['<STR_LIT>'], linewidth=<NUM_LIT:1>,<EOL>label=method_name,<EOL>color=linecolor_dict[method_name])<EOL><DEDENT>except KeyError:<EOL><INDENT>line, = ax.plot(logx, run['<STR_LIT>'], linewidth=<NUM_LIT:1>,<EOL>label=method_name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ax.plot(logx, run['<STR_LIT>'], linewidth=<NUM_LIT:1>,<EOL>color=line.get_color())<EOL><DEDENT>integrals[nr] = -np.trapz(run['<STR_LIT>'], x=logx)<EOL><DEDENT>integrals_dict[method_name] = integrals[np.isfinite(integrals)]<EOL><DEDENT>if logx_min is None:<EOL><INDENT>logx_min = np.asarray(logx_min_list).min()<EOL><DEDENT>if logl_given_logx is not None:<EOL><INDENT>logx_plot = np.linspace(logx_min, <NUM_LIT:0>, npoints)<EOL>logl = logl_given_logx(logx_plot)<EOL>logx_plot = logx_plot[np.where(~np.isnan(logl))[<NUM_LIT:0>]]<EOL>logl = logl[np.where(~np.isnan(logl))[<NUM_LIT:0>]]<EOL>w_an = rel_posterior_mass(logx_plot, logl)<EOL>w_an *= average_by_key(integrals_dict, post_mass_norm)<EOL>ax.plot(logx_plot, w_an,<EOL>linewidth=<NUM_LIT:2>, label='<STR_LIT>',<EOL>linestyle='<STR_LIT::>', color='<STR_LIT:k>')<EOL>w_an_c = np.cumsum(w_an)<EOL>w_an_c /= np.trapz(w_an_c, x=logx_plot)<EOL>w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)<EOL>ax.plot(logx_plot, w_an_c, linewidth=<NUM_LIT:2>, linestyle='<STR_LIT>', dashes=(<NUM_LIT:2>, <NUM_LIT:3>),<EOL>label='<STR_LIT>', color='<STR_LIT>')<EOL><DEDENT>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel(r'<STR_LIT>')<EOL>if ymax is not None:<EOL><INDENT>ax.set_ylim([<NUM_LIT:0>, ymax])<EOL><DEDENT>else:<EOL><INDENT>ax.set_ylim(bottom=<NUM_LIT:0>)<EOL><DEDENT>ax.set_xlim([logx_min, <NUM_LIT:0>])<EOL>ax.legend()<EOL>return fig<EOL> | Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure | f15355:m0 |
def kde_plot_df(df, xlims=None, **kwargs): | assert xlims is None or isinstance(xlims, dict)<EOL>figsize = kwargs.pop('<STR_LIT>', (<NUM_LIT>, <NUM_LIT>))<EOL>num_xticks = kwargs.pop('<STR_LIT>', None)<EOL>nrows = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>ncols = kwargs.pop('<STR_LIT>', int(np.ceil(len(df.columns) / nrows)))<EOL>normalize = kwargs.pop('<STR_LIT>', True)<EOL>legend = kwargs.pop('<STR_LIT>', False)<EOL>legend_kwargs = kwargs.pop('<STR_LIT>', {})<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)<EOL>for nax, col in enumerate(df):<EOL><INDENT>if nrows == <NUM_LIT:1>:<EOL><INDENT>ax = axes[nax]<EOL><DEDENT>else:<EOL><INDENT>ax = axes[nax // ncols, nax % ncols]<EOL><DEDENT>supmin = df[col].apply(np.min).min()<EOL>supmax = df[col].apply(np.max).max()<EOL>support = np.linspace(supmin - <NUM_LIT:0.1> * (supmax - supmin),<EOL>supmax + <NUM_LIT:0.1> * (supmax - supmin), <NUM_LIT:200>)<EOL>handles = []<EOL>labels = []<EOL>for name, samps in df[col].iteritems():<EOL><INDENT>pdf = scipy.stats.gaussian_kde(samps)(support)<EOL>if not normalize:<EOL><INDENT>pdf /= pdf.max()<EOL><DEDENT>handles.append(ax.plot(support, pdf, label=name)[<NUM_LIT:0>])<EOL>labels.append(name)<EOL><DEDENT>ax.set_ylim(bottom=<NUM_LIT:0>)<EOL>ax.set_yticks([])<EOL>if xlims is not None:<EOL><INDENT>try:<EOL><INDENT>ax.set_xlim(xlims[col])<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>ax.set_xlabel(col)<EOL>if num_xticks is not None:<EOL><INDENT>ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(<EOL>nbins=num_xticks))<EOL><DEDENT><DEDENT>if legend:<EOL><INDENT>fig.legend(handles, labels, **legend_kwargs)<EOL><DEDENT>return fig<EOL> | Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure | f15355:m1 |
def bs_param_dists(run_list, **kwargs): | fthetas = kwargs.pop('<STR_LIT>', [lambda theta: theta[:, <NUM_LIT:0>],<EOL>lambda theta: theta[:, <NUM_LIT:1>]])<EOL>labels = kwargs.pop('<STR_LIT>', [r'<STR_LIT>' + str(i + <NUM_LIT:1>) + '<STR_LIT:$>' for i in<EOL>range(len(fthetas))])<EOL>ftheta_lims = kwargs.pop('<STR_LIT>', [[-<NUM_LIT:1>, <NUM_LIT:1>]] * len(fthetas))<EOL>n_simulate = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>random_seed = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>figsize = kwargs.pop('<STR_LIT>', (<NUM_LIT>, <NUM_LIT:2>))<EOL>nx = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>ny = kwargs.pop('<STR_LIT>', nx)<EOL>cache_in = kwargs.pop('<STR_LIT>', None)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>rasterize_contours = kwargs.pop('<STR_LIT>', True)<EOL>tqdm_kwargs = kwargs.pop('<STR_LIT>', {'<STR_LIT>': True})<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>state = np.random.get_state() <EOL>np.random.seed(random_seed)<EOL>if not isinstance(run_list, list):<EOL><INDENT>run_list = [run_list]<EOL><DEDENT>assert len(labels) == len(fthetas), (<EOL>'<STR_LIT>')<EOL>width_ratios = [<NUM_LIT>] * len(fthetas) + [<NUM_LIT:1>] * len(run_list)<EOL>fig, axes = plt.subplots(nrows=<NUM_LIT:1>, ncols=len(run_list) + len(fthetas),<EOL>gridspec_kw={'<STR_LIT>': <NUM_LIT:0.1>,<EOL>'<STR_LIT>': width_ratios},<EOL>figsize=figsize)<EOL>colormaps = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>mean_colors = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']<EOL>for nrun, run in reversed(list(enumerate(run_list))):<EOL><INDENT>try:<EOL><INDENT>cache = cache_in + '<STR_LIT:_>' + str(nrun)<EOL><DEDENT>except TypeError:<EOL><INDENT>cache = None<EOL><DEDENT>cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)],<EOL>parallel=parallel,<EOL>ftheta_lims=ftheta_lims, cache=cache,<EOL>n_simulate=n_simulate, nx=nx, ny=ny,<EOL>rasterize_contours=rasterize_contours,<EOL>mean_color=mean_colors[nrun],<EOL>colormap=colormaps[nrun],<EOL>tqdm_kwargs=tqdm_kwargs)<EOL>colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],<EOL>ticks=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>])<EOL>colorbar_plot.solids.set_edgecolor('<STR_LIT>')<EOL>colorbar_plot.ax.set_yticklabels([])<EOL>if nrun == len(run_list) - <NUM_LIT:1>:<EOL><INDENT>colorbar_plot.ax.set_yticklabels(<EOL>[r'<STR_LIT>', r'<STR_LIT>', r'<STR_LIT>'])<EOL><DEDENT><DEDENT>for nax, ax in enumerate(axes[:len(fthetas)]):<EOL><INDENT>ax.set_yticks([])<EOL>ax.set_xlabel(labels[nax])<EOL>if ax.is_first_col():<EOL><INDENT>ax.set_ylabel('<STR_LIT>')<EOL><DEDENT>prune = '<STR_LIT>' if nax != len(fthetas) - <NUM_LIT:1> else None<EOL>ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(<EOL>nbins=<NUM_LIT:5>, prune=prune))<EOL><DEDENT>np.random.set_state(state) <EOL>return fig<EOL> | Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | f15355:m2 |
def param_logx_diagram(run_list, **kwargs): | fthetas = kwargs.pop('<STR_LIT>', [lambda theta: theta[:, <NUM_LIT:0>],<EOL>lambda theta: theta[:, <NUM_LIT:1>]])<EOL>labels = kwargs.pop('<STR_LIT>', [r'<STR_LIT>' + str(i + <NUM_LIT:1>) + '<STR_LIT:$>' for i in<EOL>range(len(fthetas))])<EOL>ftheta_lims = kwargs.pop('<STR_LIT>', [[-<NUM_LIT:1>, <NUM_LIT:1>]] * len(fthetas))<EOL>threads_to_plot = kwargs.pop('<STR_LIT>', [<NUM_LIT:0>])<EOL>plot_means = kwargs.pop('<STR_LIT>', True)<EOL>n_simulate = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>random_seed = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>logx_min = kwargs.pop('<STR_LIT>', None)<EOL>figsize = kwargs.pop('<STR_LIT>', (<NUM_LIT>, <NUM_LIT:2> * (<NUM_LIT:1> + len(fthetas))))<EOL>colors = kwargs.pop('<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>colormaps = kwargs.pop('<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>cache_in = kwargs.pop('<STR_LIT>', None)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>rasterize_contours = kwargs.pop('<STR_LIT>', True)<EOL>point_size = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>thin = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>npoints = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>tqdm_kwargs = kwargs.pop('<STR_LIT>', {'<STR_LIT>': True})<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>if not isinstance(run_list, list):<EOL><INDENT>run_list = [run_list]<EOL><DEDENT>state = np.random.get_state() <EOL>np.random.seed(random_seed)<EOL>if not plot_means:<EOL><INDENT>mean_colors = [None] * len(colors)<EOL><DEDENT>else:<EOL><INDENT>mean_colors = ['<STR_LIT>' + col for col in colors]<EOL><DEDENT>nlogx = npoints<EOL>ny_posterior = npoints<EOL>assert len(fthetas) == len(labels)<EOL>assert len(fthetas) == len(ftheta_lims)<EOL>thread_linestyles = ['<STR_LIT:->', '<STR_LIT>', '<STR_LIT::>']<EOL>fig, axes = plt.subplots(nrows=<NUM_LIT:1> + len(fthetas), ncols=<NUM_LIT:2>, figsize=figsize,<EOL>gridspec_kw={'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': [<NUM_LIT:15>, <NUM_LIT>]})<EOL>axes[<NUM_LIT:0>, <NUM_LIT:0>].set_visible(False)<EOL>divider = mpl_toolkits.axes_grid1.make_axes_locatable(axes[<NUM_LIT:0>, <NUM_LIT:0>])<EOL>colorbar_ax_list = []<EOL>for i in range(len(run_list)):<EOL><INDENT>colorbar_ax_list.append(divider.append_axes("<STR_LIT:left>", size=<NUM_LIT>,<EOL>pad=<NUM_LIT>))<EOL><DEDENT>colorbar_ax_list = list(reversed(colorbar_ax_list))<EOL>for nrun, run in reversed(list(enumerate(run_list))):<EOL><INDENT>ax_weight = axes[<NUM_LIT:0>, <NUM_LIT:1>]<EOL>ax_weight.set_ylabel('<STR_LIT>')<EOL>samples = np.zeros((n_simulate, run['<STR_LIT>'].shape[<NUM_LIT:0>] * <NUM_LIT:2>))<EOL>for i in range(n_simulate):<EOL><INDENT>logx_temp = nestcheck.ns_run_utils.get_logx(<EOL>run['<STR_LIT>'], simulate=True)[::-<NUM_LIT:1>]<EOL>logw_rel = logx_temp + run['<STR_LIT>'][::-<NUM_LIT:1>]<EOL>w_rel = np.exp(logw_rel - logw_rel.max())<EOL>w_rel /= np.trapz(w_rel, x=logx_temp)<EOL>samples[i, ::<NUM_LIT:2>] = logx_temp<EOL>samples[i, <NUM_LIT:1>::<NUM_LIT:2>] = w_rel<EOL><DEDENT>if logx_min is None:<EOL><INDENT>logx_min = samples[:, <NUM_LIT:0>].min()<EOL><DEDENT>logx_sup = np.linspace(logx_min, <NUM_LIT:0>, nlogx)<EOL>try:<EOL><INDENT>cache = cache_in + '<STR_LIT:_>' + str(nrun) + '<STR_LIT>'<EOL><DEDENT>except TypeError:<EOL><INDENT>cache = None<EOL><DEDENT>interp_alt = functools.partial(alternate_helper, func=np.interp)<EOL>y, pmf = fgivenx.drivers.compute_pmf(<EOL>interp_alt, logx_sup, samples, cache=cache, ny=npoints,<EOL>parallel=parallel, tqdm_kwargs=tqdm_kwargs)<EOL>cbar = fgivenx.plot.plot(<EOL>logx_sup, y, pmf, ax_weight, rasterize_contours=rasterize_contours,<EOL>colors=plt.get_cmap(colormaps[nrun]))<EOL>ax_weight.set_xlim([logx_min, <NUM_LIT:0>])<EOL>ax_weight.set_ylim(bottom=<NUM_LIT:0>)<EOL>ax_weight.set_yticks([])<EOL>ax_weight.set_xticklabels([])<EOL>colorbar_plot = plt.colorbar(cbar, cax=colorbar_ax_list[nrun],<EOL>ticks=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>])<EOL>colorbar_ax_list[nrun].yaxis.set_ticks_position('<STR_LIT:left>')<EOL>colorbar_plot.solids.set_edgecolor('<STR_LIT>')<EOL>colorbar_plot.ax.set_yticklabels([])<EOL>if nrun == <NUM_LIT:0>:<EOL><INDENT>colorbar_plot.ax.set_yticklabels(<EOL>[r'<STR_LIT>', r'<STR_LIT>', r'<STR_LIT>'])<EOL><DEDENT>logx = nestcheck.ns_run_utils.get_logx(run['<STR_LIT>'],<EOL>simulate=False)<EOL>scatter_x = logx<EOL>scatter_theta = run['<STR_LIT>']<EOL>if thin != <NUM_LIT:1>:<EOL><INDENT>assert <NUM_LIT:0> < thin <= <NUM_LIT:1>, (<EOL>'<STR_LIT>'<EOL>.format(thin))<EOL>state = np.random.get_state() <EOL>np.random.seed(random_seed)<EOL>inds = np.where(np.random.random(logx.shape) <= thin)[<NUM_LIT:0>]<EOL>np.random.set_state(state) <EOL>scatter_x = logx[inds]<EOL>scatter_theta = run['<STR_LIT>'][inds, :]<EOL><DEDENT>for nf, ftheta in enumerate(fthetas):<EOL><INDENT>ax_samples = axes[<NUM_LIT:1> + nf, <NUM_LIT:1>]<EOL>ax_samples.scatter(scatter_x, ftheta(scatter_theta),<EOL>s=point_size, color=colors[nrun])<EOL>if threads_to_plot is not None:<EOL><INDENT>for i in threads_to_plot:<EOL><INDENT>thread_inds = np.where(run['<STR_LIT>'] == i)[<NUM_LIT:0>]<EOL>ax_samples.plot(logx[thread_inds],<EOL>ftheta(run['<STR_LIT>'][thread_inds]),<EOL>linestyle=thread_linestyles[nrun],<EOL>color='<STR_LIT>', lw=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>ax_samples.set_xlim([logx_min, <NUM_LIT:0>])<EOL>ax_samples.set_ylim(ftheta_lims[nf])<EOL><DEDENT>posterior_axes = [axes[i + <NUM_LIT:1>, <NUM_LIT:0>] for i in range(len(fthetas))]<EOL>_ = plot_bs_dists(run, fthetas, posterior_axes,<EOL>ftheta_lims=ftheta_lims,<EOL>flip_axes=True, n_simulate=n_simulate,<EOL>rasterize_contours=rasterize_contours,<EOL>cache=cache_in, nx=npoints, ny=ny_posterior,<EOL>colormap=colormaps[nrun],<EOL>mean_color=mean_colors[nrun],<EOL>parallel=parallel, tqdm_kwargs=tqdm_kwargs)<EOL>if plot_means:<EOL><INDENT>w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)<EOL>w_rel /= np.sum(w_rel)<EOL>means = [np.sum(w_rel * f(run['<STR_LIT>'])) for f in fthetas]<EOL>for nf, mean in enumerate(means):<EOL><INDENT>axes[nf + <NUM_LIT:1>, <NUM_LIT:1>].axhline(y=mean, lw=<NUM_LIT:1>, linestyle='<STR_LIT>',<EOL>color=mean_colors[nrun])<EOL><DEDENT><DEDENT><DEDENT>for nf, ax in enumerate(posterior_axes):<EOL><INDENT>ax.set_ylim(ftheta_lims[nf])<EOL>ax.invert_xaxis() <EOL><DEDENT>axes[-<NUM_LIT:1>, <NUM_LIT:1>].set_xlabel(r'<STR_LIT>')<EOL>for i, label in enumerate(labels):<EOL><INDENT>axes[i + <NUM_LIT:1>, <NUM_LIT:0>].set_ylabel(label)<EOL>prune = '<STR_LIT>' if i != <NUM_LIT:0> else None<EOL>axes[i + <NUM_LIT:1>, <NUM_LIT:0>].yaxis.set_major_locator(<EOL>matplotlib.ticker.MaxNLocator(nbins=<NUM_LIT:3>, prune=prune))<EOL><DEDENT>for _, ax in np.ndenumerate(axes):<EOL><INDENT>if not ax.is_first_col():<EOL><INDENT>ax.set_yticklabels([])<EOL><DEDENT>if not (ax.is_last_row() and ax.is_last_col()):<EOL><INDENT>ax.set_xticks([])<EOL><DEDENT><DEDENT>np.random.set_state(state) <EOL>return fig<EOL> | Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | f15355:m3 |
def plot_bs_dists(run, fthetas, axes, **kwargs): | ftheta_lims = kwargs.pop('<STR_LIT>', [[-<NUM_LIT:1>, <NUM_LIT:1>]] * len(fthetas))<EOL>n_simulate = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>colormap = kwargs.pop('<STR_LIT>', plt.get_cmap('<STR_LIT>'))<EOL>mean_color = kwargs.pop('<STR_LIT>', None)<EOL>nx = kwargs.pop('<STR_LIT>', <NUM_LIT:100>)<EOL>ny = kwargs.pop('<STR_LIT>', nx)<EOL>cache_in = kwargs.pop('<STR_LIT>', None)<EOL>parallel = kwargs.pop('<STR_LIT>', True)<EOL>rasterize_contours = kwargs.pop('<STR_LIT>', True)<EOL>smooth = kwargs.pop('<STR_LIT>', False)<EOL>flip_axes = kwargs.pop('<STR_LIT>', False)<EOL>tqdm_kwargs = kwargs.pop('<STR_LIT>', {'<STR_LIT>': False})<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>assert len(fthetas) == len(axes),'<STR_LIT>'<EOL>assert len(fthetas) == len(ftheta_lims),'<STR_LIT>'<EOL>threads = nestcheck.ns_run_utils.get_run_threads(run)<EOL>bs_samps = []<EOL>for i in range(n_simulate):<EOL><INDENT>run_temp = nestcheck.error_analysis.bootstrap_resample_run(<EOL>run, threads=threads)<EOL>w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)<EOL>bs_samps.append((run_temp['<STR_LIT>'], w_temp))<EOL><DEDENT>for nf, ftheta in enumerate(fthetas):<EOL><INDENT>max_samps = <NUM_LIT:2> * max([bs_samp[<NUM_LIT:0>].shape[<NUM_LIT:0>] for bs_samp in bs_samps])<EOL>samples_array = np.full((n_simulate, max_samps), np.nan)<EOL>for i, (theta, weights) in enumerate(bs_samps):<EOL><INDENT>nsamp = <NUM_LIT:2> * theta.shape[<NUM_LIT:0>]<EOL>samples_array[i, :nsamp:<NUM_LIT:2>] = ftheta(theta)<EOL>samples_array[i, <NUM_LIT:1>:nsamp:<NUM_LIT:2>] = weights<EOL><DEDENT>ftheta_vals = np.linspace(ftheta_lims[nf][<NUM_LIT:0>], ftheta_lims[nf][<NUM_LIT:1>], nx)<EOL>try:<EOL><INDENT>cache = cache_in + '<STR_LIT:_>' + str(nf)<EOL><DEDENT>except TypeError:<EOL><INDENT>cache = None<EOL><DEDENT>samp_kde = functools.partial(alternate_helper,<EOL>func=weighted_1d_gaussian_kde)<EOL>y, pmf = fgivenx.drivers.compute_pmf(<EOL>samp_kde, ftheta_vals, samples_array, ny=ny, cache=cache,<EOL>parallel=parallel, tqdm_kwargs=tqdm_kwargs)<EOL>if flip_axes:<EOL><INDENT>cbar = fgivenx.plot.plot(<EOL>y, ftheta_vals, np.swapaxes(pmf, <NUM_LIT:0>, <NUM_LIT:1>), axes[nf],<EOL>colors=colormap, rasterize_contours=rasterize_contours,<EOL>smooth=smooth)<EOL><DEDENT>else:<EOL><INDENT>cbar = fgivenx.plot.plot(<EOL>ftheta_vals, y, pmf, axes[nf], colors=colormap,<EOL>rasterize_contours=rasterize_contours, smooth=smooth)<EOL><DEDENT><DEDENT>if mean_color is not None:<EOL><INDENT>w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)<EOL>w_rel /= np.sum(w_rel)<EOL>means = [np.sum(w_rel * f(run['<STR_LIT>'])) for f in fthetas]<EOL>for nf, mean in enumerate(means):<EOL><INDENT>if flip_axes:<EOL><INDENT>axes[nf].axhline(y=mean, lw=<NUM_LIT:1>, linestyle='<STR_LIT>',<EOL>color=mean_color)<EOL><DEDENT>else:<EOL><INDENT>axes[nf].axvline(x=mean, lw=<NUM_LIT:1>, linestyle='<STR_LIT>',<EOL>color=mean_color)<EOL><DEDENT><DEDENT><DEDENT>return cbar<EOL> | Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions. | f15355:m4 |
def alternate_helper(x, alt_samps, func=None): | alt_samps = alt_samps[~np.isnan(alt_samps)]<EOL>arg1 = alt_samps[::<NUM_LIT:2>]<EOL>arg2 = alt_samps[<NUM_LIT:1>::<NUM_LIT:2>]<EOL>return func(x, arg1, arg2)<EOL> | Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths. | f15355:m5 |
def weighted_1d_gaussian_kde(x, samples, weights): | assert x.ndim == <NUM_LIT:1><EOL>assert samples.ndim == <NUM_LIT:1><EOL>assert samples.shape == weights.shape<EOL>weights /= np.sum(weights)<EOL>nz_weights = weights[np.nonzero(weights)]<EOL>nsamp_eff = np.exp(-<NUM_LIT:1.> * np.sum(nz_weights * np.log(nz_weights)))<EOL>mu = np.sum(weights * samples)<EOL>var = np.sum(weights * ((samples - mu) ** <NUM_LIT:2>))<EOL>var *= nsamp_eff / (nsamp_eff - <NUM_LIT:1>) <EOL>scott_factor = np.power(nsamp_eff, -<NUM_LIT:1.> / (<NUM_LIT:5>)) <EOL>sig = np.sqrt(var) * scott_factor<EOL>xx, ss = np.meshgrid(x, samples)<EOL>chisquared = ((xx - ss) / sig) ** <NUM_LIT:2><EOL>energy = np.exp(-<NUM_LIT:0.5> * chisquared) / np.sqrt(<NUM_LIT:2> * np.pi * (sig ** <NUM_LIT:2>))<EOL>result = np.sum(energy * weights[:, np.newaxis], axis=<NUM_LIT:0>)<EOL>return result<EOL> | Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values. | f15355:m6 |
def rel_posterior_mass(logx, logl): | logw = logx + logl<EOL>w_rel = np.exp(logw - logw.max())<EOL>w_rel /= np.abs(np.trapz(w_rel, x=logx))<EOL>return w_rel<EOL> | Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value. | f15355:m7 |
def average_by_key(dict_in, key): | if key is None:<EOL><INDENT>return np.mean(np.concatenate(list(dict_in.values())))<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return np.mean(dict_in[key])<EOL><DEDENT>except KeyError:<EOL><INDENT>print('<STR_LIT>' + key + '<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>')<EOL>return np.mean(np.concatenate(list(dict_in.values())))<EOL><DEDENT><DEDENT> | Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float | f15355:m8 |
def run_estimators(ns_run, estimator_list, simulate=False): | logw = get_logw(ns_run, simulate=simulate)<EOL>output = np.zeros(len(estimator_list))<EOL>for i, est in enumerate(estimator_list):<EOL><INDENT>output[i] = est(ns_run, logw=logw)<EOL><DEDENT>return output<EOL> | Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list. | f15356:m0 |
def array_given_run(ns_run): | samples = np.zeros((ns_run['<STR_LIT>'].shape[<NUM_LIT:0>], <NUM_LIT:3> + ns_run['<STR_LIT>'].shape[<NUM_LIT:1>]))<EOL>samples[:, <NUM_LIT:0>] = ns_run['<STR_LIT>']<EOL>samples[:, <NUM_LIT:1>] = ns_run['<STR_LIT>']<EOL>samples[:-<NUM_LIT:1>, <NUM_LIT:2>] = np.diff(ns_run['<STR_LIT>'])<EOL>samples[-<NUM_LIT:1>, <NUM_LIT:2>] = -<NUM_LIT:1> <EOL>samples[:, <NUM_LIT:3>:] = ns_run['<STR_LIT>']<EOL>return samples<EOL> | Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | f15356:m1 |
def dict_given_run_array(samples, thread_min_max): | ns_run = {'<STR_LIT>': samples[:, <NUM_LIT:0>],<EOL>'<STR_LIT>': samples[:, <NUM_LIT:1>],<EOL>'<STR_LIT>': thread_min_max,<EOL>'<STR_LIT>': samples[:, <NUM_LIT:3>:]}<EOL>if np.all(~np.isnan(ns_run['<STR_LIT>'])):<EOL><INDENT>ns_run['<STR_LIT>'] = ns_run['<STR_LIT>'].astype(int)<EOL>assert np.array_equal(samples[:, <NUM_LIT:1>], ns_run['<STR_LIT>']), ((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>samples[:, <NUM_LIT:1>], ns_run['<STR_LIT>']))<EOL><DEDENT>nlive_0 = (thread_min_max[:, <NUM_LIT:0>] <= ns_run['<STR_LIT>'].min()).sum()<EOL>assert nlive_0 > <NUM_LIT:0>, '<STR_LIT>'.format(nlive_0)<EOL>nlive_array = np.zeros(samples.shape[<NUM_LIT:0>]) + nlive_0<EOL>nlive_array[<NUM_LIT:1>:] += np.cumsum(samples[:-<NUM_LIT:1>, <NUM_LIT:2>])<EOL>dup_th_starts = (thread_min_max[:, <NUM_LIT:0>] == ns_run['<STR_LIT>'].min()).sum()<EOL>if dup_th_starts > <NUM_LIT:1>:<EOL><INDENT>nlive_array += (<NUM_LIT:1> - nlive_array[-<NUM_LIT:1>])<EOL>n_logl_min = (ns_run['<STR_LIT>'] == ns_run['<STR_LIT>'].min()).sum()<EOL>nlive_array[:n_logl_min] = nlive_0<EOL>warnings.warn((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>dup_th_starts, ns_run['<STR_LIT>'].min(), n_logl_min), UserWarning)<EOL><DEDENT>assert nlive_array.min() > <NUM_LIT:0>, ((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>nlive_0, nlive_array, thread_min_max))<EOL>assert nlive_array[-<NUM_LIT:1>] == <NUM_LIT:1>, (<EOL>'<STR_LIT>' + str(nlive_array))<EOL>ns_run['<STR_LIT>'] = nlive_array<EOL>return ns_run<EOL> | Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | f15356:m2 |
def get_run_threads(ns_run): | samples = array_given_run(ns_run)<EOL>unique_threads = np.unique(ns_run['<STR_LIT>'])<EOL>assert ns_run['<STR_LIT>'].shape[<NUM_LIT:0>] == unique_threads.shape[<NUM_LIT:0>], (<EOL>'<STR_LIT>'.format(<EOL>unique_threads.shape[<NUM_LIT:0>], ns_run['<STR_LIT>'].shape[<NUM_LIT:0>]))<EOL>threads = []<EOL>for i, th_lab in enumerate(unique_threads):<EOL><INDENT>thread_array = samples[np.where(samples[:, <NUM_LIT:1>] == th_lab)]<EOL>thread_array[:, <NUM_LIT:2>] = <NUM_LIT:0><EOL>thread_array[-<NUM_LIT:1>, <NUM_LIT:2>] = -<NUM_LIT:1><EOL>min_max = np.reshape(ns_run['<STR_LIT>'][i, :], (<NUM_LIT:1>, <NUM_LIT:2>))<EOL>assert min_max[<NUM_LIT:0>, <NUM_LIT:1>] == thread_array[-<NUM_LIT:1>, <NUM_LIT:0>], (<EOL>'<STR_LIT>')<EOL>threads.append(dict_given_run_array(thread_array, min_max))<EOL><DEDENT>return threads<EOL> | Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | f15356:m3 |
def combine_ns_runs(run_list_in, **kwargs): | run_list = copy.deepcopy(run_list_in)<EOL>if len(run_list) == <NUM_LIT:1>:<EOL><INDENT>run = run_list[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>nthread_tot = <NUM_LIT:0><EOL>for i, _ in enumerate(run_list):<EOL><INDENT>check_ns_run(run_list[i], **kwargs)<EOL>run_list[i]['<STR_LIT>'] += nthread_tot<EOL>nthread_tot += run_list[i]['<STR_LIT>'].shape[<NUM_LIT:0>]<EOL><DEDENT>thread_min_max = np.vstack([run['<STR_LIT>'] for run in run_list])<EOL>samples_temp = np.vstack([array_given_run(run) for run in run_list])<EOL>samples_temp = samples_temp[np.argsort(samples_temp[:, <NUM_LIT:0>])]<EOL>run = dict_given_run_array(samples_temp, thread_min_max)<EOL>run['<STR_LIT>'] = {}<EOL>for key in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>run['<STR_LIT>'][key] = sum([temp['<STR_LIT>'][key] for temp in<EOL>run_list_in])<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>check_ns_run(run, **kwargs)<EOL>return run<EOL> | Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | f15356:m4 |
def combine_threads(threads, assert_birth_point=False): | thread_min_max = np.vstack([td['<STR_LIT>'] for td in threads])<EOL>assert len(threads) == thread_min_max.shape[<NUM_LIT:0>]<EOL>samples_temp = np.vstack([array_given_run(thread) for thread in threads])<EOL>samples_temp = samples_temp[np.argsort(samples_temp[:, <NUM_LIT:0>])]<EOL>logl_starts = thread_min_max[:, <NUM_LIT:0>]<EOL>state = np.random.get_state() <EOL>np.random.seed(<NUM_LIT:0>) <EOL>for logl_start in logl_starts[logl_starts != -np.inf]:<EOL><INDENT>ind = np.where(samples_temp[:, <NUM_LIT:0>] == logl_start)[<NUM_LIT:0>]<EOL>if assert_birth_point:<EOL><INDENT>assert ind.shape == (<NUM_LIT:1>,),'<STR_LIT>' + str(ind.shape)<EOL><DEDENT>if ind.shape == (<NUM_LIT:1>,):<EOL><INDENT>samples_temp[ind[<NUM_LIT:0>], <NUM_LIT:2>] += <NUM_LIT:1><EOL><DEDENT>elif ind.shape == (<NUM_LIT:0>,):<EOL><INDENT>ind_closest = np.argmin(np.abs(samples_temp[:, <NUM_LIT:0>] - logl_start))<EOL>samples_temp[ind_closest, <NUM_LIT:2>] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>samples_temp[np.random.choice(ind), <NUM_LIT:2>] += <NUM_LIT:1><EOL><DEDENT><DEDENT>np.random.set_state(state)<EOL>ns_run = dict_given_run_array(samples_temp, thread_min_max)<EOL>try:<EOL><INDENT>check_ns_run_threads(ns_run)<EOL><DEDENT>except AssertionError:<EOL><INDENT>ns_run['<STR_LIT>'] = None<EOL>ns_run['<STR_LIT>'] = None<EOL><DEDENT>return ns_run<EOL> | Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | f15356:m5 |
def get_logw(ns_run, simulate=False): | try:<EOL><INDENT>logx = get_logx(ns_run['<STR_LIT>'], simulate=simulate)<EOL>logw = np.zeros(ns_run['<STR_LIT>'].shape[<NUM_LIT:0>])<EOL>logw[<NUM_LIT:1>:-<NUM_LIT:1>] = log_subtract(logx[:-<NUM_LIT:2>], logx[<NUM_LIT:2>:]) - np.log(<NUM_LIT:2>)<EOL>logw[<NUM_LIT:0>] = log_subtract(<NUM_LIT:0>, scipy.special.logsumexp([logx[<NUM_LIT:0>], logx[<NUM_LIT:1>]]) -<EOL>np.log(<NUM_LIT:2>))<EOL>logw[-<NUM_LIT:1>] = scipy.special.logsumexp([logx[-<NUM_LIT:2>], logx[-<NUM_LIT:1>]]) - np.log(<NUM_LIT:2>)<EOL>logw += ns_run['<STR_LIT>']<EOL>return logw<EOL><DEDENT>except IndexError:<EOL><INDENT>if ns_run['<STR_LIT>'].shape[<NUM_LIT:0>] == <NUM_LIT:1>:<EOL><INDENT>return copy.deepcopy(ns_run['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT> | r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points. | f15356:m6 |
def get_w_rel(ns_run, simulate=False): | logw = get_logw(ns_run, simulate=simulate)<EOL>return np.exp(logw - logw.max())<EOL> | Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points. | f15356:m7 |
def get_logx(nlive, simulate=False): | assert nlive.min() > <NUM_LIT:0>, (<EOL>'<STR_LIT>' + str(nlive))<EOL>if simulate:<EOL><INDENT>logx_steps = np.log(np.random.random(nlive.shape)) / nlive<EOL><DEDENT>else:<EOL><INDENT>logx_steps = -<NUM_LIT:1> * (nlive.astype(float) ** -<NUM_LIT:1>)<EOL><DEDENT>return np.cumsum(logx_steps)<EOL> | r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points. | f15356:m8 |
def log_subtract(loga, logb): | return loga + np.log(<NUM_LIT:1> - np.exp(logb - loga))<EOL> | r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float | f15356:m9 |
def check_ns_run(run, dup_assert=False, dup_warn=False): | assert isinstance(run, dict)<EOL>check_ns_run_members(run)<EOL>check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)<EOL>check_ns_run_threads(run)<EOL> | Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties. | f15356:m10 |
def check_ns_run_members(run): | run_keys = list(run.keys())<EOL>for key in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>assert key in run_keys<EOL>run_keys.remove(key)<EOL><DEDENT>for key in ['<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>run_keys.remove(key)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>assert not run_keys, '<STR_LIT>' + str(run_keys)<EOL>for key in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>assert isinstance(run[key], np.ndarray), (<EOL>key + '<STR_LIT>' + type(run[key]).__name__)<EOL><DEDENT>assert run['<STR_LIT>'].ndim == <NUM_LIT:1><EOL>assert run['<STR_LIT>'].shape == run['<STR_LIT>'].shape<EOL>assert run['<STR_LIT>'].shape == run['<STR_LIT>'].shape<EOL>assert run['<STR_LIT>'].ndim == <NUM_LIT:2><EOL>assert run['<STR_LIT>'].shape[<NUM_LIT:0>] == run['<STR_LIT>'].shape[<NUM_LIT:0>]<EOL> | Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties. | f15356:m11 |
def check_ns_run_logls(run, dup_assert=False, dup_warn=False): | assert np.array_equal(run['<STR_LIT>'], run['<STR_LIT>'][np.argsort(run['<STR_LIT>'])])<EOL>if dup_assert or dup_warn:<EOL><INDENT>unique_logls, counts = np.unique(run['<STR_LIT>'], return_counts=True)<EOL>repeat_logls = run['<STR_LIT>'].shape[<NUM_LIT:0>] - unique_logls.shape[<NUM_LIT:0>]<EOL>msg = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>repeat_logls, run['<STR_LIT>'].shape[<NUM_LIT:0>],<EOL>unique_logls[counts != <NUM_LIT:1>], counts[counts != <NUM_LIT:1>],<EOL>unique_logls.shape[<NUM_LIT:0>], np.where(counts != <NUM_LIT:1>)[<NUM_LIT:0>])<EOL>if dup_assert:<EOL><INDENT>assert repeat_logls == <NUM_LIT:0>, msg<EOL><DEDENT>elif dup_warn:<EOL><INDENT>if repeat_logls != <NUM_LIT:0>:<EOL><INDENT>warnings.warn(msg, UserWarning)<EOL><DEDENT><DEDENT><DEDENT> | Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties. | f15356:m12 |
def check_ns_run_threads(run): | assert run['<STR_LIT>'].dtype == int<EOL>uniq_th = np.unique(run['<STR_LIT>'])<EOL>assert np.array_equal(<EOL>np.asarray(range(run['<STR_LIT>'].shape[<NUM_LIT:0>])), uniq_th),str(uniq_th)<EOL>assert np.any(run['<STR_LIT>'][:, <NUM_LIT:0>] == -np.inf), (<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>')<EOL>for th_lab in uniq_th:<EOL><INDENT>inds = np.where(run['<STR_LIT>'] == th_lab)[<NUM_LIT:0>]<EOL>th_info = '<STR_LIT>'.format(<EOL>th_lab, run['<STR_LIT>'][inds[<NUM_LIT:0>]], run['<STR_LIT>'][th_lab, :])<EOL>assert run['<STR_LIT>'][th_lab, <NUM_LIT:0>] <= run['<STR_LIT>'][inds[<NUM_LIT:0>]], (<EOL>'<STR_LIT>' +<EOL>th_info + '<STR_LIT>'.format(<EOL>run['<STR_LIT>'][inds[<NUM_LIT:0>]] - run['<STR_LIT>'][th_lab, <NUM_LIT:0>]))<EOL>assert run['<STR_LIT>'][th_lab, <NUM_LIT:1>] == run['<STR_LIT>'][inds[-<NUM_LIT:1>]], (<EOL>'<STR_LIT>' + th_info)<EOL><DEDENT> | Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties. | f15356:m13 |
def bootstrap_resample_run(ns_run, threads=None, ninit_sep=False,<EOL>random_seed=False): | if random_seed is not False:<EOL><INDENT>state = np.random.get_state()<EOL>np.random.seed(random_seed)<EOL><DEDENT>if threads is None:<EOL><INDENT>threads = nestcheck.ns_run_utils.get_run_threads(ns_run)<EOL><DEDENT>n_threads = len(threads)<EOL>if ninit_sep:<EOL><INDENT>try:<EOL><INDENT>ninit = ns_run['<STR_LIT>']['<STR_LIT>']<EOL>assert np.all(ns_run['<STR_LIT>'][:ninit, <NUM_LIT:0>] == -np.inf), (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>inds = np.random.randint(<NUM_LIT:0>, ninit, ninit)<EOL>inds = np.append(inds, np.random.randint(ninit, n_threads,<EOL>n_threads - ninit))<EOL><DEDENT>except KeyError:<EOL><INDENT>warnings.warn((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'), UserWarning)<EOL>ninit_sep = False<EOL><DEDENT><DEDENT>if not ninit_sep:<EOL><INDENT>inds = np.random.randint(<NUM_LIT:0>, n_threads, n_threads)<EOL><DEDENT>threads_temp = [threads[i] for i in inds]<EOL>resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)<EOL>try:<EOL><INDENT>resampled_run['<STR_LIT>'] = ns_run['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if random_seed is not False:<EOL><INDENT>np.random.set_state(state)<EOL><DEDENT>return resampled_run<EOL> | Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary. | f15357:m0 |
def run_std_bootstrap(ns_run, estimator_list, **kwargs): | bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)<EOL>stds = np.zeros(bs_values.shape[<NUM_LIT:0>])<EOL>for j, _ in enumerate(stds):<EOL><INDENT>stds[j] = np.std(bs_values[j, :], ddof=<NUM_LIT:1>)<EOL><DEDENT>return stds<EOL> | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | f15357:m1 |
def run_bootstrap_values(ns_run, estimator_list, **kwargs): | ninit_sep = kwargs.pop('<STR_LIT>', False)<EOL>flip_skew = kwargs.pop('<STR_LIT>', True)<EOL>n_simulate = kwargs.pop('<STR_LIT>') <EOL>random_seeds = kwargs.pop('<STR_LIT>', range(n_simulate))<EOL>assert len(random_seeds) == n_simulate<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>threads = nestcheck.ns_run_utils.get_run_threads(ns_run)<EOL>bs_values = np.zeros((len(estimator_list), n_simulate))<EOL>for i, random_seed in enumerate(random_seeds):<EOL><INDENT>ns_run_temp = bootstrap_resample_run(<EOL>ns_run, threads=threads, ninit_sep=ninit_sep,<EOL>random_seed=random_seed)<EOL>bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(<EOL>ns_run_temp, estimator_list)<EOL>del ns_run_temp<EOL><DEDENT>if flip_skew:<EOL><INDENT>estimator_means = np.mean(bs_values, axis=<NUM_LIT:1>)<EOL>for i, mu in enumerate(estimator_means):<EOL><INDENT>bs_values[i, :] = (<NUM_LIT:2> * mu) - bs_values[i, :]<EOL><DEDENT><DEDENT>return bs_values<EOL> | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | f15357:m2 |
def run_ci_bootstrap(ns_run, estimator_list, **kwargs): | cred_int = kwargs.pop('<STR_LIT>') <EOL>bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)<EOL>expected_estimators = nestcheck.ns_run_utils.run_estimators(<EOL>ns_run, estimator_list)<EOL>cdf = ((np.asarray(range(bs_values.shape[<NUM_LIT:1>])) + <NUM_LIT:0.5>) /<EOL>bs_values.shape[<NUM_LIT:1>])<EOL>ci_output = expected_estimators * <NUM_LIT:2><EOL>for i, _ in enumerate(ci_output):<EOL><INDENT>ci_output[i] -= np.interp(<EOL><NUM_LIT:1.> - cred_int, cdf, np.sort(bs_values[i, :]))<EOL><DEDENT>return ci_output<EOL> | Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list. | f15357:m3 |
def run_std_simulate(ns_run, estimator_list, n_simulate=None): | assert n_simulate is not None, '<STR_LIT>'<EOL>all_values = np.zeros((len(estimator_list), n_simulate))<EOL>for i in range(n_simulate):<EOL><INDENT>all_values[:, i] = nestcheck.ns_run_utils.run_estimators(<EOL>ns_run, estimator_list, simulate=True)<EOL><DEDENT>stds = np.zeros(all_values.shape[<NUM_LIT:0>])<EOL>for i, _ in enumerate(stds):<EOL><INDENT>stds[i] = np.std(all_values[i, :], ddof=<NUM_LIT:1>)<EOL><DEDENT>return stds<EOL> | Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | f15357:m4 |
def implementation_std(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs): | nsim = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>random_seed = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>if kwargs:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(kwargs))<EOL><DEDENT>imp_var = (vals_std ** <NUM_LIT:2>) - (bs_std ** <NUM_LIT:2>)<EOL>imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)<EOL>ind = np.where(imp_std <= <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>imp_std[ind] = <NUM_LIT:0><EOL>imp_std_u = np.zeros(imp_std.shape)<EOL>imp_frac = imp_std / vals_std<EOL>imp_frac_u = np.zeros(imp_frac.shape)<EOL>for i, _ in enumerate(imp_std_u):<EOL><INDENT>state = np.random.get_state()<EOL>np.random.seed(random_seed)<EOL>sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)<EOL>sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)<EOL>sim_imp_var = (sim_vals_std ** <NUM_LIT:2>) - (sim_bs_std ** <NUM_LIT:2>)<EOL>sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)<EOL>imp_std_u[i] = np.std(sim_imp_std, ddof=<NUM_LIT:1>)<EOL>imp_frac_u[i] = np.std((sim_imp_std / sim_vals_std), ddof=<NUM_LIT:1>)<EOL>np.random.set_state(state)<EOL><DEDENT>return imp_std, imp_std_u, imp_frac, imp_frac_u<EOL> | r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac. | f15357:m5 |
def run_thread_values(run, estimator_list): | threads = nestcheck.ns_run_utils.get_run_threads(run)<EOL>vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list)<EOL>for th in threads]<EOL>vals_array = np.stack(vals_list, axis=<NUM_LIT:1>)<EOL>assert vals_array.shape == (len(estimator_list), len(threads))<EOL>return vals_array<EOL> | Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)). | f15357:m6 |
def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True): | out = []<EOL>index = []<EOL>for i, samp_i in enumerate(dist_list):<EOL><INDENT>for j, samp_j in enumerate(dist_list):<EOL><INDENT>if j < i:<EOL><INDENT>index.append(str((i, j)))<EOL>out.append(statistical_distances(<EOL>samp_i, samp_j, earth_mover_dist=earth_mover_dist,<EOL>energy_dist=energy_dist))<EOL><DEDENT><DEDENT><DEDENT>columns = ['<STR_LIT>', '<STR_LIT>']<EOL>if earth_mover_dist:<EOL><INDENT>columns.append('<STR_LIT>')<EOL><DEDENT>if energy_dist:<EOL><INDENT>columns.append('<STR_LIT>')<EOL><DEDENT>ser = pd.DataFrame(out, index=index, columns=columns).unstack()<EOL>ser.index.names = ['<STR_LIT>', '<STR_LIT>']<EOL>return ser<EOL> | Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed. | f15357:m7 |
def statistical_distances(samples1, samples2, earth_mover_dist=True,<EOL>energy_dist=True): | out = []<EOL>temp = scipy.stats.ks_2samp(samples1, samples2)<EOL>out.append(temp.pvalue)<EOL>out.append(temp.statistic)<EOL>if earth_mover_dist:<EOL><INDENT>out.append(scipy.stats.wasserstein_distance(samples1, samples2))<EOL><DEDENT>if energy_dist:<EOL><INDENT>out.append(scipy.stats.energy_distance(samples1, samples2))<EOL><DEDENT>return np.asarray(out)<EOL> | Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array | f15357:m8 |
def update_results(self, results): | if not results:<EOL><INDENT>raise PickerResultException('<STR_LIT>')<EOL><DEDENT>if results['<STR_LIT>'] != self.sequence or results['<STR_LIT>'] != self.season:<EOL><INDENT>raise PickerResultException('<STR_LIT>')<EOL><DEDENT>completed = {g['<STR_LIT>']: g for g in results['<STR_LIT>'] if g['<STR_LIT:status>'].startswith('<STR_LIT:F>')}<EOL>if not completed:<EOL><INDENT>return (<NUM_LIT:0>, None)<EOL><DEDENT>count = <NUM_LIT:0><EOL>for game in self.games.incomplete(home__abbr__in=completed.keys()):<EOL><INDENT>result = completed.get(game.home.abbr, None)<EOL>if result:<EOL><INDENT>winner = result['<STR_LIT>']<EOL>game.winner = (<EOL>game.home if game.home.abbr == winner<EOL>else game.away if game.away.abbr == winner else None<EOL>)<EOL>count += <NUM_LIT:1><EOL><DEDENT><DEDENT>last_game = self.last_game<EOL>if not self.points and last_game.winner:<EOL><INDENT>now = datetime_now()<EOL>if now > last_game.end_time:<EOL><INDENT>result = completed.get(last_game.home.abbr, None)<EOL>if result:<EOL><INDENT>self.points = result['<STR_LIT>'] + result['<STR_LIT>']<EOL>self.save()<EOL><DEDENT><DEDENT><DEDENT>if count:<EOL><INDENT>self.update_pick_status()<EOL><DEDENT>return (count, self.points)<EOL> | results schema: {'sequence': 1, 'season': 2018, 'games': [{
"home": "HOME",
"away": "AWAY",
"home_score": 15,
"away_score": 10,
"status": "Final",
"winner": "HOME",
}]} | f15389:c9:m13 |
def update_picks(self, games=None, points=None): | if games:<EOL><INDENT>game_dict = {g.id: g for g in self.gameset.games.filter(id__in=games)}<EOL>game_picks = {pick.game.id: pick for pick in self.gamepicks.filter(game__id__in=games)}<EOL>for key, winner in games.items():<EOL><INDENT>game = game_dict[key]<EOL>if not game.has_started:<EOL><INDENT>pick = game_picks[key]<EOL>pick.winner_id = winner<EOL>pick.save()<EOL><DEDENT><DEDENT><DEDENT>if points is not None:<EOL><INDENT>self.points = points<EOL>self.save()<EOL><DEDENT>if games or points:<EOL><INDENT>self.updated_signal.send(sender=self.__class__, pickset=self, auto_pick=False)<EOL><DEDENT> | games can be dict of {game.id: winner_id} for all picked games to update | f15389:c11:m7 |
@task<EOL>def clean(ctx): | rmdb(ctx)<EOL>ctx.run('<STR_LIT>')<EOL> | Remove build artifacts | f15406:m1 |
@task<EOL>def install(ctx): | ctx.run('<STR_LIT>', pty=True)<EOL> | Install base requirements | f15406:m2 |
@task<EOL>def demo(ctx, reset=False): | if reset:<EOL><INDENT>rmdb(ctx)<EOL><DEDENT>ctx.run('<STR_LIT>', pty=True)<EOL>ctx.run('<STR_LIT>', pty=True)<EOL>ctx.run('<STR_LIT>', pty=True)<EOL> | Set up the demo environment and run the server | f15406:m4 |
@task<EOL>def check(ctx): | ctx.run('<STR_LIT>')<EOL> | Run PEP8 checks | f15406:m5 |
def __init__(<EOL>self, name=None, bases=None, _dict=None, update=True,<EOL>*args, **kwargs<EOL>): | super(Transform, self).__init__(*args, **kwargs)<EOL>self.name = name<EOL>self.bases = (Annotation,) if bases is None else bases<EOL>self.dict = {} if _dict is None else _dict<EOL>self.update = update<EOL> | According to type constructor parameters, you can specifiy name,
bases and dict.
:param str name: new class name.
:param tuple bases: new class bases.
:param dict dict: class __dict__.
:param bool update: if True, update new properties on the new class.
Otherwise replace old ones. | f15409:c0:m0 |
@staticmethod<EOL><INDENT>def mixin_class(target, cls):<DEDENT> | for name, field in getmembers(cls):<EOL><INDENT>Mixin.mixin(target, field, name)<EOL><DEDENT> | Mix cls content in target. | f15409:c1:m2 |
@staticmethod<EOL><INDENT>def mixin_function_or_method(target, routine, name=None, isbound=False):<DEDENT> | function = None<EOL>if isfunction(routine):<EOL><INDENT>function = routine<EOL><DEDENT>elif ismethod(routine):<EOL><INDENT>function = get_method_function(routine)<EOL><DEDENT>else:<EOL><INDENT>raise Mixin.MixInError(<EOL>"<STR_LIT>".format(routine))<EOL><DEDENT>if name is None:<EOL><INDENT>name = routine.__name__<EOL><DEDENT>if not isclass(target) or isbound:<EOL><INDENT>_type = type(target)<EOL>method_args = [function, target]<EOL>if PY2:<EOL><INDENT>method_args += _type<EOL><DEDENT>result = MethodType(*method_args)<EOL><DEDENT>else:<EOL><INDENT>if PY2:<EOL><INDENT>result = MethodType(function, None, target)<EOL><DEDENT>else:<EOL><INDENT>result = function<EOL><DEDENT><DEDENT>Mixin.set_mixin(target, result, name)<EOL>return result<EOL> | Mixin a routine into the target.
:param routine: routine to mix in target.
:param str name: mixin name. Routine name by default.
:param bool isbound: If True (False by default), the mixin result is a
bound method to target. | f15409:c1:m3 |
@staticmethod<EOL><INDENT>def mixin(target, resource, name=None):<DEDENT> | result = None<EOL>if ismethod(resource) or isfunction(resource):<EOL><INDENT>result = Mixin.mixin_function_or_method(target, resource, name)<EOL><DEDENT>elif isclass(resource):<EOL><INDENT>result = list()<EOL>for name, content in getmembers(resource):<EOL><INDENT>if isclass(content):<EOL><INDENT>mixin = Mixin.set_mixin(target, content, name)<EOL><DEDENT>else:<EOL><INDENT>mixin = Mixin.mixin(target, content, name)<EOL><DEDENT>result.append(mixin)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = Mixin.set_mixin(target, resource, name)<EOL><DEDENT>return result<EOL> | Do the correct mixin depending on the type of input resource.
- Method or Function: mixin_function_or_method.
- class: mixin_class.
- other: set_mixin.
And returns the result of the choosen method (one or a list of mixins). | f15409:c1:m4 |
@staticmethod<EOL><INDENT>def get_mixedins_by_name(target):<DEDENT> | result = getattr(target, Mixin.__MIXEDIN_KEY__, None)<EOL>if result is None:<EOL><INDENT>result = dict()<EOL>setattr(target, Mixin.__MIXEDIN_KEY__, result)<EOL><DEDENT>return result<EOL> | Get a set of couple (name, field) of target mixedin. | f15409:c1:m5 |
@staticmethod<EOL><INDENT>def set_mixin(target, resource, name=None, override=True):<DEDENT> | if name is None and not hasattr(resource, '<STR_LIT>'):<EOL><INDENT>raise Mixin.MixInError(<EOL>"<STR_LIT>"<EOL>.format(resource))<EOL><DEDENT>result = None<EOL>name = resource.__name__ if name is None else name<EOL>mixedins_by_name = Mixin.get_mixedins_by_name(target)<EOL>result = getattr(target, name, Mixin.__NEW_CONTENT_KEY__)<EOL>if override or result == Mixin.__NEW_CONTENT_KEY__:<EOL><INDENT>if name not in mixedins_by_name:<EOL><INDENT>mixedins_by_name[name] = (result,)<EOL><DEDENT>else:<EOL><INDENT>mixedins_by_name[name] += (result,)<EOL><DEDENT>try:<EOL><INDENT>setattr(target, name, resource)<EOL><DEDENT>except (AttributeError, TypeError):<EOL><INDENT>if len(mixedins_by_name[name]) == <NUM_LIT:1>:<EOL><INDENT>del mixedins_by_name[name]<EOL>if len(mixedins_by_name) == <NUM_LIT:0>:<EOL><INDENT>delattr(target, Mixin.__MIXEDIN_KEY__)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>mixedins_by_name[name] = mixedins_by_name[name][:-<NUM_LIT:2>]<EOL><DEDENT>result = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = None<EOL><DEDENT>return result<EOL> | Set a resource and returns the mixed one in target content or
Mixin.__NEW_CONTENT_KEY__ if resource name didn't exist in target.
:param str name: target content item to mix with the resource.
:param bool override: If True (default) permits to replace an old
resource by the new one. | f15409:c1:m6 |
@staticmethod<EOL><INDENT>def remove_mixin(target, name, mixedin=None, replace=True):<DEDENT> | try:<EOL><INDENT>result = getattr(target, name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise Mixin.MixInError(<EOL>"<STR_LIT>".format(name, target)<EOL>)<EOL><DEDENT>mixedins_by_name = Mixin.get_mixedins_by_name(target)<EOL>mixedins = mixedins_by_name.get(name)<EOL>if mixedins:<EOL><INDENT>if mixedin is None:<EOL><INDENT>mixedin = mixedins[-<NUM_LIT:1>]<EOL>mixedins = mixedins[:-<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>index = mixedins.index(mixedin)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise Mixin.MixInError(<EOL>"<STR_LIT>"<EOL>.format(mixedin, name, target)<EOL>)<EOL><DEDENT>mixedins = mixedins[<NUM_LIT:0>:index] + mixedins[index + <NUM_LIT:1>:]<EOL><DEDENT>if len(mixedins) == <NUM_LIT:0>:<EOL><INDENT>if mixedin != Mixin.__NEW_CONTENT_KEY__:<EOL><INDENT>setattr(target, name, mixedin)<EOL><DEDENT>else:<EOL><INDENT>delattr(target, name)<EOL><DEDENT>del mixedins_by_name[name]<EOL><DEDENT>else:<EOL><INDENT>if replace:<EOL><INDENT>setattr(target, name, mixedin)<EOL><DEDENT>mixedins_by_name[name] = mixedins<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Mixin.MixInError(<EOL>"<STR_LIT>".format(name, target))<EOL><DEDENT>if len(mixedins_by_name) == <NUM_LIT:0>:<EOL><INDENT>delattr(target, Mixin.__MIXEDIN_KEY__)<EOL><DEDENT>return result<EOL> | Remove a mixin with name (and reference) from targetand returns the
replaced one or None.
:param mixedin: a mixedin value or the last defined mixedin if is None
(by default).
:param bool replace: If True (default), the removed mixedin replaces
the current mixin. | f15409:c1:m7 |
@staticmethod<EOL><INDENT>def remove_mixins(target):<DEDENT> | mixedins_by_name = Mixin.get_mixedins_by_name(target).copy()<EOL>for _name in mixedins_by_name: <EOL><INDENT>while True: <EOL><INDENT>try:<EOL><INDENT>Mixin.remove_mixin(target, _name)<EOL><DEDENT>except Mixin.MixInError:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT> | Tries to get back target in a no mixin consistent state. | f15409:c1:m8 |
def setUp(self): | self.annotation = Annotation()<EOL> | Create a new annotation. | f15412:c1:m0 |
def tearDown(self): | self.annotation.__del__()<EOL>del self.annotation<EOL> | Delete self.annotation. | f15412:c1:m1 |
def tearDown(self): | self.interceptor.__del__()<EOL>del self.interceptor<EOL> | Delete self.interceptor | f15413:c0:m4 |
def call_target(self): | self.target()<EOL> | Call target. | f15413:c1:m2 |
def _threaded(self, *args, **kwargs): | for target in self.targets:<EOL><INDENT>result = target(*args, **kwargs)<EOL>self.queue.put(result)<EOL><DEDENT> | Call the target and put the result in the Queue. | f15415:c2:m1 |
def start(self, *args, **kwargs): | self.queue = Queue()<EOL>thread = Thread(target=self._threaded, args=args, kwargs=kwargs)<EOL>thread.start()<EOL>return Asynchronous.Result(self.queue, thread)<EOL> | Start execution of the function. | f15415:c2:m3 |
def _handle_timeout(self, frame=None, **_): | raise TimeOut.TimeOutError(self, frame)<EOL> | Sig ALARM timeout function. | f15415:c3:m1 |
def register_observer(self, observer): | self.observers.add(observer)<EOL> | Register an observer. | f15415:c5:m1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.