source_code stringlengths 45 7.24k | function_name stringlengths 1 111 | messages list |
|---|---|---|
def x(data, var_names=None, quartiles=True, credible_interval=0.94, shade=
0.35, bw=4.5, sharey=True, figsize=None, textsize=None, ax=None,
kwargs_shade=None):
data = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, data)
plotters = list(xarray_var_iter(data, var_names=var_names, combined=True))
if kwargs_shade is None:
kwargs_shade = {}
figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(
figsize, textsize, 1, len(plotters))
ax_labelsize *= 2
if ax is None:
fig, ax = plt.subplots(1, len(plotters), figsize=figsize, sharey=
sharey, constrained_layout=True)
else:
fig = ax.figure
ax = np.atleast_1d(ax)
for axind, (var_name, selection, x) in enumerate(plotters):
val = x.flatten()
if val[0].dtype.kind == 'i':
cat_hist(val, shade, ax[axind], **kwargs_shade)
else:
_violinplot(val, shade, bw, ax[axind], **kwargs_shade)
per = np.percentile(val, [25, 75, 50])
hpd_intervals = hpd(val, credible_interval)
if quartiles:
ax[axind].plot([0, 0], per[:2], lw=linewidth * 3, color='k',
solid_capstyle='round')
ax[axind].plot([0, 0], hpd_intervals, lw=linewidth, color='k',
solid_capstyle='round')
ax[axind].plot(0, per[-1], 'wo', ms=linewidth * 1.5)
ax[axind].set_xlabel(make_label(var_name, selection), fontsize=
ax_labelsize)
ax[axind].set_xticks([])
ax[axind].tick_params(labelsize=xt_labelsize)
ax[axind].grid(None, axis='x')
if sharey:
fig.subplots_adjust(wspace=0)
else:
fig.tight_layout()
return ax
| plot_violin | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None, quartiles=True, credible_interval=0.94, shade=\n 0.35, bw=4.5, sharey=True, figsize=None, textsize=None, ax=None,\n kwargs_shade=No... |
def x(val, shade, bw, ax, **kwargs_shade):
density, low_b, up_b = _fast_kde(val, bw=bw)
x = np.linspace(low_b, up_b, len(density))
x = np.concatenate([x, x[::-1]])
density = np.concatenate([-density, density[::-1]])
ax.fill_betweenx(x, density, alpha=shade, lw=0, **kwargs_shade)
| violinplot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(val, shade, bw, ax, **kwargs_shade):\n density, low_b, up_b = _fast_kde(val, bw=bw)\n x = np.linspace(low_b, up_b, len(density))\n x = np.concatenate(... |
def x(val, shade, ax, **kwargs_shade):
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
lefts = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **
kwargs_shade)
| cat_hist | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(val, shade, ax, **kwargs_shade):\n bins = get_bins(val)\n binned_d, _ = np.histogram(val, bins=bins, normed=True)\n bin_edges = np.linspace(np.min(val... |
def x(data, var_names=None, coords=None, figsize=None, textsize=None, kind=
'scatter', gridsize='auto', contour=True, fill_last=True, joint_kwargs=
None, marginal_kwargs=None):
valid_kinds = ['scatter', 'kde', 'hexbin']
if kind not in valid_kinds:
raise ValueError('Plot type {} not recognized.Plot type must be in {}'
.format(kind, valid_kinds))
data = convert_to_dataset(data, group='posterior')
if coords is None:
coords = {}
var_names = _var_names(var_names, data)
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=
var_names, combined=True))
if len(plotters) != 2:
raise Exception(
'Number of variables to be plotted must 2 (you supplied {})'.
format(len(plotters)))
figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(
figsize, textsize)
if joint_kwargs is None:
joint_kwargs = {}
if marginal_kwargs is None:
marginal_kwargs = {}
marginal_kwargs.setdefault('plot_kwargs', {})
marginal_kwargs['plot_kwargs']['linewidth'] = linewidth
fig, _ = plt.subplots(0, 0, figsize=figsize, constrained_layout=True)
grid = plt.GridSpec(4, 4, hspace=0.1, wspace=0.1, figure=fig)
axjoin = fig.add_subplot(grid[1:, :-1])
ax_hist_x = fig.add_subplot(grid[0, :-1], sharex=axjoin)
ax_hist_x.tick_params(labelleft=False, labelbottom=False)
ax_hist_y = fig.add_subplot(grid[1:, -1], sharey=axjoin)
ax_hist_y.tick_params(labelleft=False, labelbottom=False)
x_var_name = make_label(plotters[0][0], plotters[0][1])
y_var_name = make_label(plotters[1][0], plotters[1][1])
axjoin.set_xlabel(x_var_name, fontsize=ax_labelsize)
axjoin.set_ylabel(y_var_name, fontsize=ax_labelsize)
axjoin.tick_params(labelsize=xt_labelsize)
x = plotters[0][2].flatten()
y = plotters[1][2].flatten()
if kind == 'scatter':
axjoin.scatter(x, y, **joint_kwargs)
elif kind == 'kde':
plot_kde(x, y, contour=contour, fill_last=fill_last, ax=axjoin, **
joint_kwargs)
else:
if gridsize == 'auto':
gridsize = int(len(x) ** 0.35)
axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)
axjoin.grid(False)
for val, ax, rotate in ((x, ax_hist_x, False), (y, ax_hist_y, True)):
plot_dist(val, textsize=xt_labelsize, rotated=rotate, ax=ax, **
marginal_kwargs)
ax_hist_x.set_xlim(axjoin.get_xlim())
ax_hist_y.set_ylim(axjoin.get_ylim())
return axjoin, ax_hist_x, ax_hist_y
| plot_joint | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None, coords=None, figsize=None, textsize=None, kind=\n 'scatter', gridsize='auto', contour=True, fill_last=True, joint_kwargs=\n None, m... |
def x(x, y, credible_interval=0.94, color='C1', circular=False, smooth=True,
smooth_kwargs=None, fill_kwargs=None, plot_kwargs=None, ax=None):
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault('color', color)
plot_kwargs.setdefault('alpha', 0)
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault('color', color)
fill_kwargs.setdefault('alpha', 0.5)
if ax is None:
ax = gca()
hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)
if smooth:
if smooth_kwargs is None:
smooth_kwargs = {}
smooth_kwargs.setdefault('window_length', 55)
smooth_kwargs.setdefault('polyorder', 2)
x_data = np.linspace(x.min(), x.max(), 200)
hpd_interp = griddata(x, hpd_, x_data)
y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs)
else:
idx = np.argsort(x)
x_data = x[idx]
y_data = hpd_[idx]
ax.plot(x_data, y_data, **plot_kwargs)
ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)
return ax
| plot_hpd | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(x, y, credible_interval=0.94, color='C1', circular=False, smooth=True,\n smooth_kwargs=None, fill_kwargs=None, plot_kwargs=None, ax=None):\n if plot_kwar... |
def x(data, *, var_names=None):
if isinstance(data, np.ndarray):
return _get_ess(data)
dataset = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
return xr.apply_ufunc(_ess_ufunc, dataset, input_core_dims=(('chain',
'draw'),))
| effective_sample_size | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, *, var_names=None):\n if isinstance(data, np.ndarray):\n return _get_ess(data)\n dataset = convert_to_dataset(data, group='posterior')\n ... |
def x(ary):
target = np.empty(ary.shape[:-2])
for idx in np.ndindex(target.shape):
target[idx] = _get_ess(ary[idx])
return target
| ess_ufunc | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ary):\n target = np.empty(ary.shape[:-2])\n for idx in np.ndindex(target.shape):\n target[idx] = _get_ess(ary[idx])\n return target\n",
"ro... |
def x(sample_array):
shape = sample_array.shape
if len(shape) != 2:
raise TypeError(
'Effective sample size calculation requires 2 dimensional arrays.')
n_chain, n_draws = shape
if n_chain <= 1:
raise TypeError(
'Effective sample size calculation requires multiple chains.')
acov = np.asarray([_autocov(sample_array[chain]) for chain in range(
n_chain)])
chain_mean = sample_array.mean(axis=1)
chain_var = acov[:, 0] * n_draws / (n_draws - 1.0)
acov_t = acov[:, 1] * n_draws / (n_draws - 1.0)
mean_var = np.mean(chain_var)
var_plus = mean_var * (n_draws - 1.0) / n_draws
var_plus += np.var(chain_mean, ddof=1)
rho_hat_t = np.zeros(n_draws)
rho_hat_even = 1.0
rho_hat_t[0] = rho_hat_even
rho_hat_odd = 1.0 - (mean_var - np.mean(acov_t)) / var_plus
rho_hat_t[1] = rho_hat_odd
max_t = 1
t = 1
while t < n_draws - 2 and rho_hat_even + rho_hat_odd >= 0.0:
rho_hat_even = 1.0 - (mean_var - np.mean(acov[:, t + 1])) / var_plus
rho_hat_odd = 1.0 - (mean_var - np.mean(acov[:, t + 2])) / var_plus
if rho_hat_even + rho_hat_odd >= 0:
rho_hat_t[t + 1] = rho_hat_even
rho_hat_t[t + 2] = rho_hat_odd
max_t = t + 2
t += 2
t = 3
while t <= max_t - 2:
if rho_hat_t[t + 1] + rho_hat_t[t + 2] > rho_hat_t[t - 1] + rho_hat_t[t
]:
rho_hat_t[t + 1] = (rho_hat_t[t - 1] + rho_hat_t[t]) / 2.0
rho_hat_t[t + 2] = rho_hat_t[t + 1]
t += 2
ess = int(n_chain * n_draws / (-1.0 + 2.0 * np.sum(rho_hat_t))
) if not np.any(np.isnan(rho_hat_t)) else np.nan
return ess
| get_ess | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sample_array):\n shape = sample_array.shape\n if len(shape) != 2:\n raise TypeError(\n 'Effective sample size calculation requires 2 di... |
def x(data, var_names=None):
if isinstance(data, np.ndarray):
return _get_split_rhat(data)
dataset = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
return xr.apply_ufunc(_rhat_ufunc, dataset, input_core_dims=(('chain',
'draw'),))
| rhat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None):\n if isinstance(data, np.ndarray):\n return _get_split_rhat(data)\n dataset = convert_to_dataset(data, group='posterior')\n... |
def x(ary):
target = np.empty(ary.shape[:-2])
for idx in np.ndindex(target.shape):
target[idx] = _get_split_rhat(ary[idx])
return target
| rhat_ufunc | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ary):\n target = np.empty(ary.shape[:-2])\n for idx in np.ndindex(target.shape):\n target[idx] = _get_split_rhat(ary[idx])\n return target\n",
... |
def x(values, round_to=2):
shape = values.shape
if len(shape) != 2:
raise TypeError(
'Effective sample size calculation requires 2 dimensional arrays.')
_, num_samples = shape
num_split = num_samples // 2
split_chain_mean1 = np.mean(values[:, :num_split], axis=1)
split_chain_mean2 = np.mean(values[:, num_split:], axis=1)
split_chain_mean = np.concatenate((split_chain_mean1, split_chain_mean2))
split_chain_var1 = np.var(values[:, :num_split], axis=1, ddof=1)
split_chain_var2 = np.var(values[:, num_split:], axis=1, ddof=1)
split_chain_var = np.concatenate((split_chain_var1, split_chain_var2))
between_chain_variance = num_samples / 2 * np.var(split_chain_mean, ddof=1)
within_chain_variance = np.mean(split_chain_var)
split_rhat = np.sqrt((between_chain_variance / within_chain_variance +
num_samples / 2 - 1) / (num_samples / 2))
return round(split_rhat, round_to)
| get_split_rhat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values, round_to=2):\n shape = values.shape\n if len(shape) != 2:\n raise TypeError(\n 'Effective sample size calculation requires 2 di... |
def x(values, first=0.1, last=0.5, intervals=20):
for interval in (first, last):
if interval <= 0 or interval >= 1:
raise ValueError(
'Invalid intervals for Geweke convergence analysis', (first,
last))
if first + last >= 1:
raise ValueError('Invalid intervals for Geweke convergence analysis',
(first, last))
zscores = []
end = len(values) - 1
last_start_idx = (1 - last) * end
start_indices = np.linspace(0, last_start_idx, num=intervals, endpoint=
True, dtype=int)
for start in start_indices:
first_slice = values[start:start + int(first * (end - start))]
last_slice = values[int(end - last * (end - start)):]
z_score = first_slice.mean() - last_slice.mean()
z_score /= np.sqrt(first_slice.var() + last_slice.var())
zscores.append([start, z_score])
return np.array(zscores)
| geweke | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values, first=0.1, last=0.5, intervals=20):\n for interval in (first, last):\n if interval <= 0 or interval >= 1:\n raise ValueError(\n ... |
def x(pareto_tail_indices):
kcounts, _ = np.histogram(pareto_tail_indices, bins=[-np.Inf, 0.5, 0.7,
1, np.Inf])
kprop = kcounts / len(pareto_tail_indices) * 100
df_k = pd.DataFrame(dict(_=['(good)', '(ok)', '(bad)', '(very bad)'],
Count=kcounts, Pct=kprop)).rename(index={(0): '(-Inf, 0.5]', (1):
' (0.5, 0.7]', (2): ' (0.7, 1]', (3): ' (1, Inf)'})
if np.sum(kcounts[1:]) == 0:
warnings.warn('All Pareto k estimates are good (k < 0.5)')
elif np.sum(kcounts[2:]) == 0:
warnings.warn('All Pareto k estimates are ok (k < 0.7)')
return df_k
| ks_summary | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(pareto_tail_indices):\n kcounts, _ = np.histogram(pareto_tail_indices, bins=[-np.Inf, 0.5, 0.7,\n 1, np.Inf])\n kprop = kcounts / len(pareto_tail_... |
def x(data, var_names=None, coords=None, divergences='bottom', figsize=None,
textsize=None, lines=None, combined=False, plot_kwargs=None,
fill_kwargs=None, rug_kwargs=None, hist_kwargs=None, trace_kwargs=None):
if divergences:
try:
divergence_data = convert_to_dataset(data, group='sample_stats'
).diverging
except (ValueError, AttributeError):
divergences = False
data = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, data)
if coords is None:
coords = {}
if lines is None:
lines = ()
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=
var_names, combined=True))
if figsize is None:
figsize = 12, len(plotters) * 2
if trace_kwargs is None:
trace_kwargs = {}
trace_kwargs.setdefault('alpha', 0.35)
if hist_kwargs is None:
hist_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
if fill_kwargs is None:
fill_kwargs = {}
if rug_kwargs is None:
rug_kwargs = {}
hist_kwargs.setdefault('alpha', 0.35)
figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(figsize
, textsize, rows=len(plotters), cols=2)
trace_kwargs.setdefault('linewidth', linewidth)
plot_kwargs.setdefault('linewidth', linewidth)
_, axes = plt.subplots(len(plotters), 2, squeeze=False, figsize=figsize,
constrained_layout=True)
colors = {}
for idx, (var_name, selection, value) in enumerate(plotters):
colors[idx] = []
if combined:
value = value.flatten()
value = np.atleast_2d(value)
for row in value:
axes[idx, 1].plot(np.arange(len(row)), row, **trace_kwargs)
colors[idx].append(axes[idx, 1].get_lines()[-1].get_color())
plot_kwargs['color'] = colors[idx][-1]
plot_dist(row, textsize=xt_labelsize, ax=axes[idx, 0],
hist_kwargs=hist_kwargs, plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs, rug_kwargs=rug_kwargs)
if value[0].dtype.kind == 'i':
xticks = get_bins(value)
axes[idx, 0].set_xticks(xticks[:-1])
axes[idx, 0].set_yticks([])
for col in (0, 1):
axes[idx, col].set_title(make_label(var_name, selection),
fontsize=titlesize, wrap=True)
axes[idx, col].tick_params(labelsize=xt_labelsize)
xlims = [ax.get_xlim() for ax in axes[idx, :]]
ylims = [ax.get_ylim() for ax in axes[idx, :]]
if divergences:
div_selection = {k: v for k, v in selection.items() if k in
divergence_data.dims}
divs = divergence_data.sel(**div_selection).values
if combined:
divs = divs.flatten()
divs = np.atleast_2d(divs)
for chain, chain_divs in enumerate(divs):
div_idxs = np.arange(len(chain_divs))[chain_divs]
if div_idxs.size > 0:
if divergences == 'top':
ylocs = [ylim[1] for ylim in ylims]
else:
ylocs = [ylim[0] for ylim in ylims]
values = value[chain, div_idxs]
axes[idx, 1].plot(div_idxs, np.zeros_like(div_idxs) +
ylocs[1], marker='|', color='black',
markeredgewidth=1.5, markersize=30, linestyle=
'None', alpha=hist_kwargs['alpha'], zorder=-5)
axes[idx, 1].set_ylim(*ylims[1])
axes[idx, 0].plot(values, np.zeros_like(values) + ylocs
[0], marker='|', color='black', markeredgewidth=1.5,
markersize=30, linestyle='None', alpha=trace_kwargs
['alpha'], zorder=-5)
axes[idx, 0].set_ylim(*ylims[0])
for _, _, vlines in (j for j in lines if j[0] == var_name and j[1] ==
selection):
if isinstance(vlines, (float, int)):
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
axes[idx, 0].vlines(line_values, *ylims[0], colors=colors[idx][
0], linewidth=1.5, alpha=0.75)
axes[idx, 1].hlines(line_values, *xlims[1], colors=colors[idx][
0], linewidth=1.5, alpha=trace_kwargs['alpha'])
axes[idx, 0].set_ylim(bottom=0, top=ylims[0][1])
axes[idx, 1].set_xlim(left=0, right=data.draw.max())
axes[idx, 1].set_ylim(*ylims[1])
return axes
| plot_trace | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None, coords=None, divergences='bottom', figsize=None,\n textsize=None, lines=None, combined=False, plot_kwargs=None,\n fill_kwargs=None,... |
def x(ax, data, **kwargs):
bins = get_bins(data)
ax.hist(data, bins=bins, align='left', density=True, **kwargs)
return ax
| histplot_op | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ax, data, **kwargs):\n bins = get_bins(data)\n ax.hist(data, bins=bins, align='left', density=True, **kwargs)\n return ax\n",
"role": "user"
},
... |
def x(*args, copy=True, inplace=False):
if len(args) == 0:
return InferenceData()
if len(args) == 1 and isinstance(args[0], Sequence):
args = args[0]
elif len(args) == 1:
if isinstance(args[0], InferenceData):
if inplace:
return None
elif copy:
return deepcopy(args[0])
else:
return args[0]
for i, arg in enumerate(args):
if not isinstance(arg, InferenceData):
raise TypeError(
'Concatenating is supported onlybetween InferenceData objects. Input arg {} is {}'
.format(i, type(arg)))
first_arg = args[0]
first_arg_groups = ccopy(first_arg._groups)
args_groups = dict()
for arg in args[1:]:
for group in arg._groups:
if group in args_groups or group in first_arg_groups:
raise NotImplementedError(
'Concatenating with overlapping groups is not supported.')
group_data = getattr(arg, group)
args_groups[group] = deepcopy(group_data) if copy else group_data
if not inplace:
for group in first_arg_groups:
group_data = getattr(first_arg, group)
args_groups[group] = deepcopy(group_data) if copy else group_data
basic_order = ['posterior', 'posterior_predictive', 'sample_stats',
'prior', 'prior_predictive', 'sample_stats_prior', 'observed_data']
other_groups = [group for group in args_groups if group not in basic_order]
if not inplace:
inference_data_dict = OrderedDict()
for group in (basic_order + other_groups):
if group not in args_groups:
continue
if inplace:
first_arg._groups.append(group)
setattr(first_arg, group, args_groups[group])
else:
inference_data_dict[group] = args_groups[group]
if inplace:
other_groups = [group for group in first_arg_groups if group not in
basic_order] + other_groups
sorted_groups = [group for group in basic_order + other_groups if
group in first_arg._groups]
setattr(first_arg, '_groups', sorted_groups)
return None
return InferenceData(**inference_data_dict)
| concat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*args, copy=True, inplace=False):\n if len(args) == 0:\n return InferenceData()\n if len(args) == 1 and isinstance(args[0], Sequence):\n ar... |
def x(filename):
groups = {}
with nc.Dataset(filename, mode='r') as data:
data_groups = list(data.groups)
for group in data_groups:
with xr.open_dataset(filename, group=group) as data:
groups[group] = data
return InferenceData(**groups)
| from_netcdf | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(filename):\n groups = {}\n with nc.Dataset(filename, mode='r') as data:\n data_groups = list(data.groups)\n for group in data_groups:\n ... |
def x(self, filename, compress=True):
mode = 'w'
if self._groups:
for group in self._groups:
data = getattr(self, group)
kwargs = {}
if compress:
kwargs['encoding'] = {var_name: {'zlib': True} for var_name in
data.variables}
data.to_netcdf(filename, mode=mode, group=group, **kwargs)
data.close()
mode = 'a'
else:
empty_netcdf_file = nc.Dataset(filename, mode='w', format='NETCDF4')
empty_netcdf_file.close()
return filename
| to_netcdf | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, filename, compress=True):\n mode = 'w'\n if self._groups:\n for group in self._groups:\n data = getattr(self, group)\n ... |
def x(posterior=None, *, posterior_predictive=None, sample_stats=None,
prior=None, prior_predictive=None, sample_stats_prior=None,
observed_data=None, coords=None, dims=None):
return DictConverter(posterior=posterior, posterior_predictive=
posterior_predictive, sample_stats=sample_stats, prior=prior,
prior_predictive=prior_predictive, sample_stats_prior=
sample_stats_prior, observed_data=observed_data, coords=coords,
dims=dims).to_inference_data()
| from_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(posterior=None, *, posterior_predictive=None, sample_stats=None,\n prior=None, prior_predictive=None, sample_stats_prior=None,\n observed_data=None, coor... |
def x(self):
data = self.posterior
if not isinstance(data, dict):
raise TypeError('DictConverter.posterior is not a dictionary')
if 'log_likelihood' in data:
warnings.warn(
'log_likelihood found in posterior. For stats functions log_likelihood needs to be in sample_stats.'
, SyntaxWarning)
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| posterior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.posterior\n if not isinstance(data, dict):\n raise TypeError('DictConverter.posterior is not a dictionary')\n if 'log_like... |
def x(self):
data = self.sample_stats
if not isinstance(data, dict):
raise TypeError('DictConverter.sample_stats is not a dictionary')
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| sample_stats_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.sample_stats\n if not isinstance(data, dict):\n raise TypeError('DictConverter.sample_stats is not a dictionary')\n return... |
def x(self):
data = self.posterior_predictive
if not isinstance(data, dict):
raise TypeError(
'DictConverter.posterior_predictive is not a dictionary')
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| posterior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.posterior_predictive\n if not isinstance(data, dict):\n raise TypeError(\n 'DictConverter.posterior_predictive is ... |
def x(self):
data = self.prior
if not isinstance(data, dict):
raise TypeError('DictConverter.prior is not a dictionary')
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.prior\n if not isinstance(data, dict):\n raise TypeError('DictConverter.prior is not a dictionary')\n return dict_to_datas... |
def x(self):
data = self.sample_stats_prior
if not isinstance(data, dict):
raise TypeError('DictConverter.sample_stats_prior is not a dictionary')
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| sample_stats_prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.sample_stats_prior\n if not isinstance(data, dict):\n raise TypeError('DictConverter.sample_stats_prior is not a dictionary')... |
def x(self):
data = self.prior_predictive
if not isinstance(data, dict):
raise TypeError('DictConverter.prior_predictive is not a dictionary')
return dict_to_dataset(data, library=None, coords=self.coords, dims=
self.dims)
| prior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.prior_predictive\n if not isinstance(data, dict):\n raise TypeError('DictConverter.prior_predictive is not a dictionary')\n ... |
def x(self):
data = self.observed_data
if not isinstance(data, dict):
raise TypeError('DictConverter.observed_data is not a dictionary')
if self.dims is None:
dims = {}
else:
dims = self.dims
observed_data = dict()
for key, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(key)
val_dims, coords = generate_dims_coords(vals.shape, key, dims=
val_dims, coords=self.coords)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=None))
| observed_data_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = self.observed_data\n if not isinstance(data, dict):\n raise TypeError('DictConverter.observed_data is not a dictionary')\n if s... |
def x(comp_df, insample_dev=True, plot_standard_error=True, plot_ic_diff=
True, figsize=None, textsize=None, plot_kwargs=None, ax=None):
if figsize is None:
figsize = 6, len(comp_df)
figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(
figsize, textsize, 1, 1)
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
if plot_kwargs is None:
plot_kwargs = {}
yticks_pos, step = np.linspace(0, -1, comp_df.shape[0] * 2 - 1, retstep
=True)
yticks_pos[1::2] = yticks_pos[1::2] + step / 2
yticks_labels = [''] * len(yticks_pos)
_information_criterion = ['waic', 'loo']
for information_criterion in _information_criterion:
if information_criterion in comp_df.columns:
break
else:
raise ValueError(
'comp_df must contain one of the following information criterion: {}'
.format(_information_criterion))
if plot_ic_diff:
yticks_labels[0] = comp_df.index[0]
yticks_labels[2::2] = comp_df.index[1:]
ax.set_yticks(yticks_pos)
ax.errorbar(x=comp_df[information_criterion].iloc[1:], y=yticks_pos
[1::2], xerr=comp_df.dse[1:], color=plot_kwargs.get('color_dse',
'grey'), fmt=plot_kwargs.get('marker_dse', '^'), mew=linewidth,
elinewidth=linewidth)
else:
yticks_labels = comp_df.index
ax.set_yticks(yticks_pos[::2])
if plot_standard_error:
ax.errorbar(x=comp_df[information_criterion], y=yticks_pos[::2],
xerr=comp_df.se, color=plot_kwargs.get('color_ic', 'k'), fmt=
plot_kwargs.get('marker_ic', 'o'), mfc='None', mew=linewidth,
lw=linewidth)
else:
ax.plot(comp_df[information_criterion], yticks_pos[::2], color=
plot_kwargs.get('color_ic', 'k'), marker=plot_kwargs.get(
'marker_ic', 'o'), mfc='None', mew=linewidth, lw=0)
if insample_dev:
ax.plot(comp_df[information_criterion] - 2 * comp_df['p_' +
information_criterion], yticks_pos[::2], color=plot_kwargs.get(
'color_insample_dev', 'k'), marker=plot_kwargs.get(
'marker_insample_dev', 'o'), mew=linewidth, lw=0)
ax.axvline(comp_df[information_criterion].iloc[0], ls=plot_kwargs.get(
'ls_min_ic', '--'), color=plot_kwargs.get('color_ls_min_ic', 'grey'
), lw=linewidth)
scale_col = information_criterion + '_scale'
if scale_col in comp_df:
scale = comp_df[scale_col].iloc[0].capitalize()
else:
scale = 'Deviance'
ax.set_xlabel(scale, fontsize=ax_labelsize)
ax.set_yticklabels(yticks_labels)
ax.set_ylim(-1 + step, 0 - step)
ax.tick_params(labelsize=xt_labelsize)
return ax
| plot_compare | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(comp_df, insample_dev=True, plot_standard_error=True, plot_ic_diff=\n True, figsize=None, textsize=None, plot_kwargs=None, ax=None):\n if figsize is None... |
def x(data, var_names=None, coords=None, figsize=None, textsize=None,
legend=True, colornd='k', colord='C1', shadend=0.025, ax=None,
norm_method=None):
if coords is None:
coords = {}
divergent_data = convert_to_dataset(data, group='sample_stats')
_, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(
'diverging',), combined=True)
diverging_mask = np.squeeze(diverging_mask)
posterior_data = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, posterior_data)
var_names, _posterior = xarray_to_ndarray(get_coords(posterior_data,
coords), var_names=var_names, combined=True)
if len(var_names) < 2:
raise ValueError('This plot needs at least two variables')
if norm_method is not None:
if norm_method == 'normal':
mean = np.mean(_posterior, axis=1)
standard_deviation = np.std(_posterior, axis=1)
for i in range(0, np.shape(mean)[0]):
_posterior[i, :] = (_posterior[i, :] - mean[i]
) / standard_deviation[i]
elif norm_method == 'minmax':
min_elem = np.min(_posterior, axis=1)
max_elem = np.max(_posterior, axis=1)
for i in range(0, np.shape(min_elem)[0]):
_posterior[i, :] = (_posterior[i, :] - min_elem[i]) / (max_elem
[i] - min_elem[i])
elif norm_method == 'rank':
_posterior = rankdata(_posterior, axis=1)
else:
raise ValueError('{} is not supported. Use normal, minmax or rank.'
.format(norm_method))
figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1
)
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)
if np.any(diverging_mask):
ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)
ax.tick_params(labelsize=textsize)
ax.set_xticks(range(len(var_names)))
ax.set_xticklabels(var_names)
if legend:
ax.plot([], color=colornd, label='non-divergent')
if np.any(diverging_mask):
ax.plot([], color=colord, label='divergent')
ax.legend(fontsize=xt_labelsize)
return ax
| plot_parallel | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None, coords=None, figsize=None, textsize=None,\n legend=True, colornd='k', colord='C1', shadend=0.025, ax=None,\n norm_method=None):\n ... |
def x(comments):
num_samples = None
num_warmup = None
save_warmup = None
for comment in comments:
comment = comment.strip('#').strip()
if comment.startswith('num_samples'):
num_samples = int(comment.strip('num_samples = ').strip(
'(Default)'))
elif comment.startswith('num_warmup'):
num_warmup = int(comment.strip('num_warmup = ').strip('(Default)'))
elif comment.startswith('save_warmup'):
save_warmup = bool(int(comment.strip('save_warmup = ').strip(
'(Default)')))
elif comment.startswith('thin'):
thin = int(comment.strip('thin = ').strip('(Default)'))
return {'num_samples': num_samples, 'num_warmup': num_warmup,
'save_warmup': save_warmup, 'thin': thin}
| process_configuration | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(comments):\n num_samples = None\n num_warmup = None\n save_warmup = None\n for comment in comments:\n comment = comment.strip('#').strip()\n... |
def x(path):
chains = []
configuration_info = []
adaptation_info = []
timing_info = []
i = 0
with open(path, 'r') as f_obj:
column_names = False
for i, line in enumerate(f_obj):
line = line.strip()
if line.startswith('#'):
if column_names:
adaptation_info.append(line.strip())
else:
configuration_info.append(line.strip())
elif not column_names:
column_names = True
pconf = _process_configuration(configuration_info)
if pconf['save_warmup']:
warmup_range = range(pconf['num_warmup'] // pconf['thin'])
for _, _ in zip(warmup_range, f_obj):
continue
else:
break
with open(path, 'r') as f_obj:
df = pd.read_csv(f_obj, comment='#')
if df.iloc[:, 0].dtype.kind == 'O':
first_col = df.columns[0]
col_locations = first_col == df.loc[:, first_col]
col_locations = list(col_locations.loc[col_locations].index)
dfs = []
for idx, last_idx in zip(col_locations, [-1] + list(col_locations[:-1])
):
df_ = deepcopy(df.loc[last_idx + 1:idx - 1, :])
for col in df_.columns:
df_.loc[:, col] = pd.to_numeric(df_.loc[:, col])
if len(df_):
dfs.append(df_.reset_index(drop=True))
df = df.loc[idx + 1:, :]
for col in df.columns:
df.loc[:, col] = pd.to_numeric(df.loc[:, col])
dfs.append(df)
else:
dfs = [df]
for j, df in enumerate(dfs):
if j == 0:
line_num = i + df.shape[0] + 1
for k in range(5):
line = linecache.getline(path, line_num + k).strip()
if len(line):
timing_info.append(line)
configuration_info_len = len(configuration_info)
adaptation_info_len = len(adaptation_info)
timing_info_len = len(timing_info)
num_of_samples = df.shape[0]
header_count = 1
last_line_num = (configuration_info_len + adaptation_info_len +
timing_info_len + num_of_samples + header_count)
else:
configuration_info = []
adaptation_info = []
timing_info = []
line_num = last_line_num + 1
config_start = line_num
config_end = config_start + configuration_info_len
for reading_line in range(config_start, config_end):
line = linecache.getline(path, reading_line)
if line.startswith('#'):
configuration_info.append(line)
else:
msg = (
'Invalid input file. Header information missing from combined csv. Configuration: {}'
.format(path))
raise ValueError(msg)
pconf = _process_configuration(configuration_info)
warmup_rows = pconf['save_warmup'] * pconf['num_warmup'] // pconf[
'thin']
adaption_start = config_end + 1 + warmup_rows
adaption_end = adaption_start + adaptation_info_len
for reading_line in range(adaption_start, adaption_end):
line = linecache.getline(path, reading_line)
if line.startswith('#'):
adaptation_info.append(line)
else:
msg = (
'Invalid input file. Header information missing from combined csv. Adaptation: {}'
.format(path))
raise ValueError(msg)
timing_start = adaption_end + len(df) - warmup_rows
timing_end = timing_start + timing_info_len
raise_timing_error = False
for reading_line in range(timing_start, timing_end):
line = linecache.getline(path, reading_line)
if line.startswith('#'):
timing_info.append(line)
else:
raise_timing_error = True
break
no_elapsed_time = not any('elapsed time' in row.lower() for row in
timing_info)
if raise_timing_error or no_elapsed_time:
msg = (
'Invalid input file. Header information missing from combined csv. Timing: {}'
.format(path))
raise ValueError(msg)
last_line_num = reading_line
if pconf['save_warmup']:
saved_samples = pconf['num_samples'] // pconf['thin']
df = df.iloc[-saved_samples:, :]
sample_stats_columns = [col for col in df.columns if col.endswith('__')
]
sample_columns = [col for col in df.columns if col not in
sample_stats_columns]
sample_stats = df.loc[:, sample_stats_columns]
sample_df = df.loc[:, sample_columns]
chains.append((sample_df, sample_stats, configuration_info,
adaptation_info, timing_info))
return chains
| read_output | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(path):\n chains = []\n configuration_info = []\n adaptation_info = []\n timing_info = []\n i = 0\n with open(path, 'r') as f_obj:\n co... |
def x(string):
key, var = string.split('<-')
if 'structure' in var:
var, dim = var.replace('structure(', '').replace(',', '').split('.Dim')
dtype = float
var = var.replace('c(', '').replace(')', '').strip().split()
dim = dim.replace('=', '').replace('c(', '').replace(')', '').strip(
).split()
dim = tuple(map(int, dim))
var = np.fromiter(map(dtype, var), dtype).reshape(dim, order='F')
elif 'c(' in var:
dtype = float
var = var.replace('c(', '').replace(')', '').split(',')
var = np.fromiter(map(dtype, var), dtype)
else:
dtype = float
var = dtype(var)
return key.strip(), var
| process_data_var | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(string):\n key, var = string.split('<-')\n if 'structure' in var:\n var, dim = var.replace('structure(', '').replace(',', '').split('.Dim')\n ... |
def x(path):
data = {}
with open(path, 'r') as f_obj:
var = ''
for line in f_obj:
if '<-' in line:
if len(var):
key, var = _process_data_var(var)
data[key] = var
var = ''
var += ' ' + line.strip()
if len(var):
key, var = _process_data_var(var)
data[key] = var
return data
| read_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(path):\n data = {}\n with open(path, 'r') as f_obj:\n var = ''\n for line in f_obj:\n if '<-' in line:\n if len(v... |
def x(dfs):
col_groups = defaultdict(list)
columns = dfs[0].columns
for col in columns:
key, *loc = col.split('.')
loc = tuple(int(i) - 1 for i in loc)
col_groups[key].append((col, loc))
chains = len(dfs)
draws = len(dfs[0])
sample = {}
for key, cols_locs in col_groups.items():
ndim = np.array([loc for _, loc in cols_locs]).max(0) + 1
sample[key] = np.full((chains, draws, *ndim), np.nan)
for col, loc in cols_locs:
for chain_id, df in enumerate(dfs):
draw = df[col].values
if loc == ():
sample[key][chain_id, :] = draw
else:
axis1_all = range(sample[key].shape[1])
slicer = chain_id, axis1_all, *loc
sample[key][slicer] = draw
return sample
| unpack_dataframes | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(dfs):\n col_groups = defaultdict(list)\n columns = dfs[0].columns\n for col in columns:\n key, *loc = col.split('.')\n loc = tuple(int(i... |
def x(posterior=None, *, posterior_predictive=None, prior=None,
prior_predictive=None, observed_data=None, observed_data_var=None,
log_likelihood=None, coords=None, dims=None):
return CmdStanConverter(posterior=posterior, posterior_predictive=
posterior_predictive, prior=prior, prior_predictive=
prior_predictive, observed_data=observed_data, observed_data_var=
observed_data_var, log_likelihood=log_likelihood, coords=coords,
dims=dims).to_inference_data()
| from_cmdstan | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(posterior=None, *, posterior_predictive=None, prior=None,\n prior_predictive=None, observed_data=None, observed_data_var=None,\n log_likelihood=None, coo... |
def x(self):
paths = self.posterior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
for path in paths:
parsed_output = _read_output(path)
for sample, sample_stats, config, adaptation, timing in parsed_output:
chain_data.append({'sample': sample, 'sample_stats':
sample_stats, 'configuration_info': config,
'adaptation_info': adaptation, 'timing_info': timing})
self.posterior = [item['sample'] for item in chain_data]
self.sample_stats = [item['sample_stats'] for item in chain_data]
| parse_posterior | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n paths = self.posterior_\n if isinstance(paths, str):\n paths = [paths]\n chain_data = []\n for path in paths:\n parsed_outpu... |
def x(self):
paths = self.prior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
for path in paths:
parsed_output = _read_output(path)
for sample, sample_stats, config, adaptation, timing in parsed_output:
chain_data.append({'sample': sample, 'sample_stats':
sample_stats, 'configuration_info': config,
'adaptation_info': adaptation, 'timing_info': timing})
self.prior = [item['sample'] for item in chain_data]
self.sample_stats_prior = [item['sample_stats'] for item in chain_data]
| parse_prior | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n paths = self.prior_\n if isinstance(paths, str):\n paths = [paths]\n chain_data = []\n for path in paths:\n parsed_output = ... |
def x(self):
columns = self.posterior[0].columns
posterior_predictive = self.posterior_predictive
if posterior_predictive is None or isinstance(posterior_predictive, str
) and posterior_predictive.lower().endswith('.csv'):
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [col for col in columns if
posterior_predictive == col.split('.')[0]]
else:
posterior_predictive = [col for col in columns if any(item == col.
split('.')[0] for item in posterior_predictive)]
log_likelihood = self.log_likelihood
if log_likelihood is None:
log_likelihood = []
else:
log_likelihood = [col for col in columns if log_likelihood == col.
split('.')[0]]
invalid_cols = posterior_predictive + log_likelihood
valid_cols = [col for col in columns if col not in invalid_cols]
data = _unpack_dataframes([item[valid_cols] for item in self.posterior])
return dict_to_dataset(data, coords=self.coords, dims=self.dims)
| posterior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n columns = self.posterior[0].columns\n posterior_predictive = self.posterior_predictive\n if posterior_predictive is None or isinstance(poster... |
def x(self):
dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':
np.int64}
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
sampler_params = self.sample_stats
log_likelihood = self.log_likelihood
if isinstance(log_likelihood, str):
log_likelihood_cols = [col for col in self.posterior[0].columns if
log_likelihood == col.split('.')[0]]
log_likelihood_vals = [item[log_likelihood_cols] for item in self.
posterior]
for i, _ in enumerate(sampler_params):
for col in log_likelihood_cols:
col_ll = col.replace(log_likelihood, 'log_likelihood')
sampler_params[i][col_ll] = log_likelihood_vals[i][col]
if log_likelihood in dims:
dims['log_likelihood'] = dims.pop(log_likelihood)
log_likelihood_dims = np.array([list(map(int, col.split('.')[1:])) for
col in log_likelihood_cols])
max_dims = log_likelihood_dims.max(0)
max_dims = max_dims if hasattr(max_dims, '__iter__') else (max_dims,)
default_dim_names, _ = generate_dims_coords(shape=max_dims,
var_name=log_likelihood)
log_likelihood_dim_names, _ = generate_dims_coords(shape=max_dims,
var_name='log_likelihood')
for default_dim_name, log_likelihood_dim_name in zip(default_dim_names,
log_likelihood_dim_names):
if default_dim_name in coords:
coords[log_likelihood_dim_name] = coords.pop(default_dim_name)
for j, s_params in enumerate(sampler_params):
rename_dict = {}
for key in s_params:
key_, *end = key.split('.')
name = re.sub('__$', '', key_)
name = 'diverging' if name == 'divergent' else name
rename_dict[key] = '.'.join((name, *end))
sampler_params[j][key] = s_params[key].astype(dtypes.get(key))
sampler_params[j] = sampler_params[j].rename(columns=rename_dict)
data = _unpack_dataframes(sampler_params)
return dict_to_dataset(data, coords=coords, dims=dims)
| sample_stats_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':\n np.int64}\n dims = deepcopy(self.dims) if self.dims is not None... |
def x(self):
posterior_predictive = self.posterior_predictive
columns = self.posterior[0].columns
if isinstance(posterior_predictive, (tuple, list)
) and posterior_predictive[0].endswith('.csv') or isinstance(
posterior_predictive, str) and posterior_predictive.endswith('.csv'):
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
chain_data = []
for path in posterior_predictive:
parsed_output = _read_output(path)
for sample, *_ in parsed_output:
chain_data.append(sample)
data = _unpack_dataframes(chain_data)
else:
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
posterior_predictive_cols = [col for col in columns if any(item ==
col.split('.')[0] for item in posterior_predictive)]
data = _unpack_dataframes([item[posterior_predictive_cols] for item in
self.posterior])
return dict_to_dataset(data, coords=self.coords, dims=self.dims)
| posterior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior_predictive = self.posterior_predictive\n columns = self.posterior[0].columns\n if isinstance(posterior_predictive, (tuple, list)\n ... |
def x(self):
prior_predictive = self.prior_predictive
columns = self.prior[0].columns
if prior_predictive is None or isinstance(prior_predictive, str
) and prior_predictive.lower().endswith('.csv'):
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [col for col in columns if prior_predictive ==
col.split('.')[0]]
else:
prior_predictive = [col for col in columns if any(item == col.split
('.')[0] for item in prior_predictive)]
invalid_cols = prior_predictive
valid_cols = [col for col in columns if col not in invalid_cols]
data = _unpack_dataframes([item[valid_cols] for item in self.prior])
return dict_to_dataset(data, coords=self.coords, dims=self.dims)
| prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior_predictive = self.prior_predictive\n columns = self.prior[0].columns\n if prior_predictive is None or isinstance(prior_predictive, str\... |
def x(self):
dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':
np.int64}
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
sampler_params = self.sample_stats_prior
for j, s_params in enumerate(sampler_params):
rename_dict = {}
for key in s_params:
key_, *end = key.split('.')
name = re.sub('__$', '', key_)
name = 'diverging' if name == 'divergent' else name
rename_dict[key] = '.'.join((name, *end))
sampler_params[j][key] = s_params[key].astype(dtypes.get(key))
sampler_params[j] = sampler_params[j].rename(columns=rename_dict)
data = _unpack_dataframes(sampler_params)
return dict_to_dataset(data, coords=coords, dims=dims)
| sample_stats_prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':\n np.int64}\n dims = deepcopy(self.dims) if self.dims is not None... |
def x(self):
prior_predictive = self.prior_predictive
if isinstance(prior_predictive, (tuple, list)) and prior_predictive[0
].endswith('.csv') or isinstance(prior_predictive, str
) and prior_predictive.endswith('.csv'):
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
chain_data = []
for path in prior_predictive:
parsed_output = _read_output(path)
for sample, *_ in parsed_output:
chain_data.append(sample)
data = _unpack_dataframes(chain_data)
else:
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
prior_predictive_cols = [col for col in self.prior[0].columns if
any(item == col.split('.')[0] for item in prior_predictive)]
data = _unpack_dataframes([item[prior_predictive_cols] for item in
self.prior])
return dict_to_dataset(data, coords=self.coords, dims=self.dims)
| prior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior_predictive = self.prior_predictive\n if isinstance(prior_predictive, (tuple, list)) and prior_predictive[0\n ].endswith('.csv') or ... |
def x(self):
observed_data_raw = _read_data(self.observed_data)
variables = self.observed_data_var
if isinstance(variables, str):
variables = [variables]
observed_data = {}
for key, vals in observed_data_raw.items():
if variables is not None and key not in variables:
continue
vals = np.atleast_1d(vals)
val_dims = self.dims.get(key)
val_dims, coords = generate_dims_coords(vals.shape, key, dims=
val_dims, coords=self.coords)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data)
| observed_data_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n observed_data_raw = _read_data(self.observed_data)\n variables = self.observed_data_var\n if isinstance(variables, str):\n variables =... |
def x(fit, variables=None, ignore=None):
if ignore is None:
ignore = []
if fit.mode == 1:
msg = "Model in mode 'test_grad'. Sampling is not conducted."
raise AttributeError(msg)
if fit.mode == 2 or fit.sim.get('samples') is None:
msg = "Fit doesn't contain samples."
raise AttributeError(msg)
dtypes = infer_dtypes(fit)
if variables is None:
variables = fit.sim['pars_oi']
elif isinstance(variables, str):
variables = [variables]
variables = list(variables)
for var, dim in zip(fit.sim['pars_oi'], fit.sim['dims_oi']):
if var in variables and np.prod(dim) == 0:
del variables[variables.index(var)]
ndraws = [(s - w) for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
nchain = len(fit.sim['samples'])
shift = 1
if any(fit.sim['dims_oi']):
par_idx = min((dim, i) for i, dim in enumerate(fit.sim['dims_oi']) if
dim)[1]
offset = int(sum(map(np.product, fit.sim['dims_oi'][:par_idx])))
par_offset = int(np.product(fit.sim['dims_oi'][par_idx]))
par_keys = fit.sim['fnames_oi'][offset:offset + par_offset]
shift = len(par_keys)
for item in par_keys:
_, shape = item.replace(']', '').split('[')
shape_idx_min = min(int(shape_value) for shape_value in shape.
split(','))
if shape_idx_min < shift:
shift = shape_idx_min
shift = int(min(shift, 1))
var_keys = OrderedDict((var, []) for var in fit.sim['pars_oi'])
for key in fit.sim['fnames_oi']:
var, *tails = key.split('[')
loc = [Ellipsis]
for tail in tails:
loc = []
for i in tail[:-1].split(','):
loc.append(int(i) - shift)
var_keys[var].append((key, loc))
shapes = dict(zip(fit.sim['pars_oi'], fit.sim['dims_oi']))
variables = [var for var in variables if var not in ignore]
data = OrderedDict()
for var in variables:
if var in data:
continue
keys_locs = var_keys.get(var, [(var, [Ellipsis])])
shape = shapes.get(var, [])
dtype = dtypes.get(var)
ndraw = max(ndraws)
ary_shape = [nchain, ndraw] + shape
ary = np.empty(ary_shape, dtype=dtype, order='F')
for chain, (pyholder, ndraw) in enumerate(zip(fit.sim['samples'],
ndraws)):
axes = [chain, slice(None)]
for key, loc in keys_locs:
ary_slice = tuple(axes + loc)
ary[ary_slice] = pyholder.chains[key][-ndraw:]
data[var] = ary
return data
| get_draws | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fit, variables=None, ignore=None):\n if ignore is None:\n ignore = []\n if fit.mode == 1:\n msg = \"Model in mode 'test_grad'. Sampling is ... |
def x(fit, log_likelihood=None):
dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':
np.int64}
ndraws = [(s - w) for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
extraction = OrderedDict()
for chain, (pyholder, ndraws) in enumerate(zip(fit.sim['samples'], ndraws)
):
if chain == 0:
for key in pyholder['sampler_param_names']:
extraction[key] = []
for key, values in zip(pyholder['sampler_param_names'], pyholder[
'sampler_params']):
extraction[key].append(values[-ndraws:])
data = OrderedDict()
for key, values in extraction.items():
values = np.stack(values, axis=0)
dtype = dtypes.get(key)
values = values.astype(dtype)
name = re.sub('__$', '', key)
name = 'diverging' if name == 'divergent' else name
data[name] = values
if log_likelihood is not None:
log_likelihood_data = get_draws(fit, variables=log_likelihood)
data['log_likelihood'] = log_likelihood_data[log_likelihood]
stat_lp = get_draws(fit, variables='lp__')
data['lp'] = stat_lp['lp__']
return data
| get_sample_stats | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fit, log_likelihood=None):\n dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':\n np.int64}\n ndraws = [(s - w) for s, w in z... |
def x(fit, model=None, variables=None, ignore=None):
if ignore is None:
ignore = []
dtypes = {}
if model is not None:
dtypes = infer_dtypes(fit, model)
if variables is None:
variables = fit.param_names
elif isinstance(variables, str):
variables = [variables]
variables = list(variables)
data = OrderedDict()
for var in variables:
if var in data:
continue
dtype = dtypes.get(var)
new_shape = *fit.dims[fit.param_names.index(var)], -1, fit.num_chains
values = fit._draws[fit._parameter_indexes(var), :]
values = values.reshape(new_shape, order='F')
values = np.moveaxis(values, [-2, -1], [1, 0])
values = values.astype(dtype)
data[var] = values
return data
| get_draws_stan3 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fit, model=None, variables=None, ignore=None):\n if ignore is None:\n ignore = []\n dtypes = {}\n if model is not None:\n dtypes = infer... |
def x(fit, model=None, log_likelihood=None):
dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':
np.int64}
data = OrderedDict()
for key in fit.sample_and_sampler_param_names:
new_shape = -1, fit.num_chains
values = fit._draws[fit._parameter_indexes(key)]
values = values.reshape(new_shape, order='F')
values = np.moveaxis(values, [-2, -1], [1, 0])
dtype = dtypes.get(key)
values = values.astype(dtype)
name = re.sub('__$', '', key)
name = 'diverging' if name == 'divergent' else name
data[name] = values
if log_likelihood is not None:
log_likelihood_data = get_draws_stan3(fit, model=model, variables=
log_likelihood)
data['log_likelihood'] = log_likelihood_data[log_likelihood]
return data
| get_sample_stats_stan3 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fit, model=None, log_likelihood=None):\n dtypes = {'divergent__': bool, 'n_leapfrog__': np.int64, 'treedepth__':\n np.int64}\n data = OrderedDict(... |
def x(fit, model=None):
pattern_remove_comments = re.compile(
'//.*?$|/\\*.*?\\*/|\\\'(?:\\\\.|[^\\\\\\\'])*\\\'|"(?:\\\\.|[^\\\\"])*"'
, re.DOTALL | re.MULTILINE)
stan_integer = 'int'
stan_limits = '(?:\\<[^\\>]+\\>)*'
stan_param = '([^;=\\s\\[]+)'
stan_ws = '\\s*'
pattern_int = re.compile(''.join((stan_integer, stan_ws, stan_limits,
stan_ws, stan_param)), re.IGNORECASE)
if model is None:
stan_code = fit.get_stancode()
model_pars = fit.model_pars
else:
stan_code = model.program_code
model_pars = fit.param_names
stan_code = '\n'.join(line if '#' not in line else line[:line.find('#')
] for line in stan_code.splitlines())
stan_code = re.sub(pattern_remove_comments, '', stan_code)
stan_code = stan_code.split('generated quantities')[-1]
dtypes = re.findall(pattern_int, stan_code)
dtypes = {item.strip(): 'int' for item in dtypes if item.strip() in
model_pars}
return dtypes
| infer_dtypes | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fit, model=None):\n pattern_remove_comments = re.compile(\n '//.*?$|/\\\\*.*?\\\\*/|\\\\\\'(?:\\\\\\\\.|[^\\\\\\\\\\\\\\'])*\\\\\\'|\"(?:\\\\\\\\.|[^... |
def x(posterior=None, *, posterior_predictive=None, prior=None,
prior_predictive=None, observed_data=None, log_likelihood=None, coords=
None, dims=None, posterior_model=None, prior_model=None):
check_posterior = posterior is not None and type(posterior
).__module__ == 'stan.fit'
check_prior = prior is not None and type(prior).__module__ == 'stan.fit'
if check_posterior or check_prior:
return PyStan3Converter(posterior=posterior, posterior_model=
posterior_model, posterior_predictive=posterior_predictive,
prior=prior, prior_model=prior_model, prior_predictive=
prior_predictive, observed_data=observed_data, log_likelihood=
log_likelihood, coords=coords, dims=dims).to_inference_data()
else:
return PyStanConverter(posterior=posterior, posterior_predictive=
posterior_predictive, prior=prior, prior_predictive=
prior_predictive, observed_data=observed_data, log_likelihood=
log_likelihood, coords=coords, dims=dims).to_inference_data()
| from_pystan | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(posterior=None, *, posterior_predictive=None, prior=None,\n prior_predictive=None, observed_data=None, log_likelihood=None, coords=\n None, dims=None, po... |
def x(self):
posterior = self.posterior
posterior_predictive = self.posterior_predictive
if posterior_predictive is None:
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
log_likelihood = self.log_likelihood
if not isinstance(log_likelihood, str):
log_likelihood = []
else:
log_likelihood = [log_likelihood]
ignore = posterior_predictive + log_likelihood + ['lp__']
data = get_draws(posterior, ignore=ignore)
return dict_to_dataset(data, library=self.pystan, coords=self.coords,
dims=self.dims)
| posterior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n posterior_predictive = self.posterior_predictive\n if posterior_predictive is None:\n posterior_predictiv... |
def x(self):
posterior = self.posterior
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
log_likelihood = self.log_likelihood
if log_likelihood is not None:
if isinstance(log_likelihood, str) and log_likelihood in dims:
dims['log_likelihood'] = dims.pop(log_likelihood)
data = get_sample_stats(posterior, log_likelihood)
return dict_to_dataset(data, library=self.pystan, coords=coords, dims=dims)
| sample_stats_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n dims = deepcopy(self.dims) if self.dims is not None else {}\n coords = deepcopy(self.coords) if self.coords is n... |
def x(self):
posterior = self.posterior
posterior_predictive = self.posterior_predictive
data = get_draws(posterior, variables=posterior_predictive)
return dict_to_dataset(data, library=self.pystan, coords=self.coords,
dims=self.dims)
| posterior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n posterior_predictive = self.posterior_predictive\n data = get_draws(posterior, variables=posterior_predictive)\n... |
def x(self):
prior = self.prior
prior_predictive = self.prior_predictive
if prior_predictive is None:
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
ignore = prior_predictive + ['lp__']
data = get_draws(prior, ignore=ignore)
return dict_to_dataset(data, library=self.pystan, coords=self.coords,
dims=self.dims)
| prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n prior_predictive = self.prior_predictive\n if prior_predictive is None:\n prior_predictive = []\n elif isinsta... |
def x(self):
prior = self.prior
data = get_sample_stats(prior)
return dict_to_dataset(data, library=self.pystan, coords=self.coords,
dims=self.dims)
| sample_stats_prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n data = get_sample_stats(prior)\n return dict_to_dataset(data, library=self.pystan, coords=self.coords,\n dims=sel... |
def x(self):
prior = self.prior
prior_predictive = self.prior_predictive
data = get_draws(prior, variables=prior_predictive)
return dict_to_dataset(data, library=self.pystan, coords=self.coords,
dims=self.dims)
| prior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n prior_predictive = self.prior_predictive\n data = get_draws(prior, variables=prior_predictive)\n return dict_to_datas... |
def x(self):
posterior = self.posterior
posterior_model = self.posterior_model
posterior_predictive = self.posterior_predictive
if posterior_predictive is None:
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
log_likelihood = self.log_likelihood
if not isinstance(log_likelihood, str):
log_likelihood = []
else:
log_likelihood = [log_likelihood]
ignore = posterior_predictive + log_likelihood
data = get_draws_stan3(posterior, model=posterior_model, ignore=ignore)
return dict_to_dataset(data, library=self.stan, coords=self.coords,
dims=self.dims)
| posterior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n posterior_model = self.posterior_model\n posterior_predictive = self.posterior_predictive\n if posterior_pred... |
def x(self):
posterior = self.posterior
posterior_model = self.posterior_model
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
log_likelihood = self.log_likelihood
if log_likelihood is not None:
if isinstance(log_likelihood, str) and log_likelihood in dims:
dims['log_likelihood'] = dims.pop(log_likelihood)
data = get_sample_stats_stan3(posterior, model=posterior_model,
log_likelihood=log_likelihood)
return dict_to_dataset(data, library=self.stan, coords=coords, dims=dims)
| sample_stats_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n posterior_model = self.posterior_model\n dims = deepcopy(self.dims) if self.dims is not None else {}\n coords... |
def x(self):
posterior = self.posterior
posterior_model = self.posterior_model
posterior_predictive = self.posterior_predictive
data = get_draws_stan3(posterior, model=posterior_model, variables=
posterior_predictive)
return dict_to_dataset(data, library=self.stan, coords=self.coords,
dims=self.dims)
| posterior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior = self.posterior\n posterior_model = self.posterior_model\n posterior_predictive = self.posterior_predictive\n data = get_draws_... |
def x(self):
prior = self.prior
prior_model = self.prior_model
prior_predictive = self.prior_predictive
if prior_predictive is None:
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
ignore = prior_predictive
data = get_draws_stan3(prior, model=prior_model, ignore=ignore)
return dict_to_dataset(data, library=self.stan, coords=self.coords,
dims=self.dims)
| prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n prior_model = self.prior_model\n prior_predictive = self.prior_predictive\n if prior_predictive is None:\n pri... |
def x(self):
prior = self.prior
prior_model = self.prior_model
data = get_sample_stats_stan3(prior, model=prior_model)
return dict_to_dataset(data, library=self.stan, coords=self.coords,
dims=self.dims)
| sample_stats_prior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n prior_model = self.prior_model\n data = get_sample_stats_stan3(prior, model=prior_model)\n return dict_to_dataset(dat... |
def x(self):
prior = self.prior
prior_model = self.prior_model
prior_predictive = self.prior_predictive
data = get_draws_stan3(prior, model=prior_model, variables=prior_predictive
)
return dict_to_dataset(data, library=self.stan, coords=self.coords,
dims=self.dims)
| prior_predictive_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n prior = self.prior\n prior_model = self.prior_model\n prior_predictive = self.prior_predictive\n data = get_draws_stan3(prior, model=prior... |
def x(self):
posterior_model = self.posterior_model
if self.dims is None:
dims = {}
else:
dims = self.dims
observed_names = self.observed_data
if isinstance(observed_names, str):
observed_names = [observed_names]
observed_data = OrderedDict()
for key in observed_names:
vals = np.atleast_1d(posterior_model.data[key])
val_dims = dims.get(key)
val_dims, coords = generate_dims_coords(vals.shape, key, dims=
val_dims, coords=self.coords)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=
self.stan))
| observed_data_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n posterior_model = self.posterior_model\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_names = sel... |
def x(posterior):
sample_point = posterior.exec_traces[0]
nodes = [node for node in sample_point.nodes.values() if node['type'] ==
'sample']
observed = [node['name'] for node in nodes if node['is_observed']]
latent = [node['name'] for node in nodes if not node['is_observed']]
return observed, latent
| get_var_names | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(posterior):\n sample_point = posterior.exec_traces[0]\n nodes = [node for node in sample_point.nodes.values() if node['type'] ==\n 'sample']\n ... |
def x(posterior=None, *, coords=None, dims=None):
return PyroConverter(posterior=posterior, coords=coords, dims=dims
).to_inference_data()
| from_pyro | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(posterior=None, *, coords=None, dims=None):\n return PyroConverter(posterior=posterior, coords=coords, dims=dims\n ).to_inference_data()\n",
"rol... |
def x(self):
from pyro.infer import EmpiricalMarginal
try:
data = {name: (np.expand_dims(samples.enumerate_support().squeeze(),
0) if self.posterior.num_chains == 1 else samples.
enumerate_support().squeeze()) for name, samples in self.
posterior.marginal(sites=self.latent_vars).empirical.items()}
except AttributeError:
data = {}
for var_name in self.latent_vars:
samples = EmpiricalMarginal(self.posterior, sites=var_name
).get_samples_and_weights()[0]
data[var_name] = np.expand_dims(samples.numpy().squeeze(), 0)
return dict_to_dataset(data, library=self.pyro, coords=self.coords,
dims=self.dims)
| posterior_to_xarray | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n from pyro.infer import EmpiricalMarginal\n try:\n data = {name: (np.expand_dims(samples.enumerate_support().squeeze(),\n 0) if... |
def x(data, var_names=None, max_lag=100, combined=False, figsize=None,
textsize=None):
data = convert_to_dataset(data, group='posterior')
var_names = _var_names(var_names, data)
plotters = list(xarray_var_iter(data, var_names, combined))
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters)
figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(figsize
, textsize, rows, cols)
_, axes = _create_axes_grid(length_plotters, rows, cols, figsize=
figsize, squeeze=False, sharex=True, sharey=True)
axes = np.atleast_2d(axes)
for (var_name, selection, x), ax in zip(plotters, axes.flatten()):
x_prime = x
if combined:
x_prime = x.flatten()
y = autocorr(x_prime)
ax.vlines(x=np.arange(0, max_lag), ymin=0, ymax=y[0:max_lag], lw=
linewidth)
ax.hlines(0, 0, max_lag, 'steelblue')
ax.set_title(make_label(var_name, selection), fontsize=titlesize,
wrap=True)
ax.tick_params(labelsize=xt_labelsize)
if axes.size > 0:
axes[0, 0].set_xlim(0, max_lag)
axes[0, 0].set_ylim(-1, 1)
return axes
| plot_autocorr | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, var_names=None, max_lag=100, combined=False, figsize=None,\n textsize=None):\n data = convert_to_dataset(data, group='posterior')\n var_names = ... |
def x(data, kind='kde', bfmi=True, figsize=None, legend=True, fill_alpha=(1,
0.75), fill_color=('C0', 'C5'), bw=4.5, textsize=None, fill_kwargs=None,
plot_kwargs=None, ax=None):
energy = convert_to_dataset(data, group='sample_stats').energy.values
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
if fill_kwargs is None:
fill_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize,
textsize, 1, 1)
series = zip(fill_alpha, fill_color, ('Marginal Energy',
'Energy transition'), (energy - energy.mean(), np.diff(energy)))
if kind == 'kde':
for alpha, color, label, value in series:
fill_kwargs['alpha'] = alpha
fill_kwargs['color'] = color
plot_kwargs.setdefault('color', color)
plot_kwargs.setdefault('alpha', 0)
plot_kwargs.setdefault('linewidth', linewidth)
plot_kde(value, bw=bw, label=label, textsize=xt_labelsize,
plot_kwargs=plot_kwargs, fill_kwargs=fill_kwargs, ax=ax)
elif kind == 'hist':
for alpha, color, label, value in series:
ax.hist(value.flatten(), bins='auto', density=True, alpha=alpha,
label=label, color=color, **plot_kwargs)
else:
raise ValueError('Plot type {} not recognized.'.format(kind))
if bfmi:
for idx, val in enumerate(e_bfmi(energy)):
ax.plot([], label='chain {:>2} BFMI = {:.2f}'.format(idx, val),
alpha=0)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.legend()
return ax
| plot_energy | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, kind='kde', bfmi=True, figsize=None, legend=True, fill_alpha=(1,\n 0.75), fill_color=('C0', 'C5'), bw=4.5, textsize=None, fill_kwargs=None,\n plot_... |
def x(iterable):
first, second = tee(iterable)
next(second, None)
return zip(first, second)
| pairwise | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(iterable):\n first, second = tee(iterable)\n next(second, None)\n return zip(first, second)\n",
"role": "user"
},
{
"content": "pairwise",... |
def x(data, kind='forestplot', model_names=None, var_names=None, combined=
False, credible_interval=0.94, rope=None, quartiles=True, ess=False,
r_hat=False, colors='cycle', textsize=None, linewidth=None, markersize=
None, ridgeplot_alpha=None, ridgeplot_overlap=2, figsize=None):
if not isinstance(data, (list, tuple)):
data = [data]
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
var_names = _var_names(var_names, datasets)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
plot_handler = PlotHandler(datasets, var_names=var_names, model_names=
model_names, combined=combined, colors=colors)
if figsize is None:
figsize = min(12, sum(width_ratios) * 2), plot_handler.fig_height()
(figsize, _, titlesize, xt_labelsize, auto_linewidth, auto_markersize
) = _scale_fig_size(figsize, textsize, 1.1, 1)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
fig, axes = plt.subplots(nrows=1, ncols=ncols, figsize=figsize,
gridspec_kw={'width_ratios': width_ratios}, sharey=True,
constrained_layout=True)
axes = np.atleast_1d(axes)
if kind == 'forestplot':
plot_handler.forestplot(credible_interval, quartiles, xt_labelsize,
titlesize, linewidth, markersize, axes[0], rope)
elif kind == 'ridgeplot':
plot_handler.ridgeplot(ridgeplot_overlap, linewidth,
ridgeplot_alpha, axes[0])
else:
raise TypeError(
"Argument 'kind' must be one of 'forestplot' or 'ridgeplot' (you provided {})"
.format(kind))
idx = 1
if ess:
plot_handler.plot_neff(axes[idx], xt_labelsize, titlesize, markersize)
idx += 1
if r_hat:
plot_handler.plot_rhat(axes[idx], xt_labelsize, titlesize, markersize)
idx += 1
for ax in axes:
ax.grid(False)
for ticks in ax.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in ax.spines.items():
if loc in ['left', 'right']:
spine.set_visible(False)
if len(plot_handler.data) > 1:
plot_handler.make_bands(ax)
labels, ticks = plot_handler.labels_and_ticks()
axes[0].set_yticks(ticks)
axes[0].set_yticklabels(labels)
all_plotters = list(plot_handler.plotters.values())
y_max = plot_handler.y_max() - all_plotters[-1].group_offset
if kind == 'ridgeplot':
y_max += ridgeplot_overlap
axes[0].set_ylim(-all_plotters[0].group_offset, y_max)
return fig, axes
| plot_forest | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, kind='forestplot', model_names=None, var_names=None, combined=\n False, credible_interval=0.94, rope=None, quartiles=True, ess=False,\n r_hat=False... |
def x(self):
plotters, y = {}, 0
for var_name in self.var_names:
plotters[var_name] = VarHandler(var_name, self.data, y, model_names
=self.model_names, combined=self.combined, colors=self.colors)
y = plotters[var_name].y_max()
return plotters
| make_plotters | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n plotters, y = {}, 0\n for var_name in self.var_names:\n plotters[var_name] = VarHandler(var_name, self.data, y, model_names\n ... |
def x(self):
labels, idxs = [], []
for plotter in self.plotters.values():
sub_labels, sub_idxs, _, _ = plotter.labels_ticks_and_vals()
labels.append(sub_labels)
idxs.append(sub_idxs)
return np.concatenate(labels), np.concatenate(idxs)
| labels_and_ticks | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n labels, idxs = [], []\n for plotter in self.plotters.values():\n sub_labels, sub_idxs, _, _ = plotter.labels_ticks_and_vals()\n la... |
def x(self, rope, ax, y, linewidth, rope_var):
vals = dict(rope[rope_var][0])['rope']
ax.plot(vals, (y + 0.05, y + 0.05), lw=linewidth * 2, color='C2',
solid_capstyle='round', zorder=0, alpha=0.7)
return ax
| display_multiple_ropes | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rope, ax, y, linewidth, rope_var):\n vals = dict(rope[rope_var][0])['rope']\n ax.plot(vals, (y + 0.05, y + 0.05), lw=linewidth * 2, color='C2',\n ... |
def x(self, mult, linewidth, alpha, ax):
if alpha is None:
alpha = 1.0
zorder = 0
for plotter in self.plotters.values():
for x, y_min, y_max, color in plotter.ridgeplot(mult):
if alpha == 0:
border = color
else:
border = 'k'
ax.plot(x, y_max, '-', linewidth=linewidth, color=border,
zorder=zorder)
ax.plot(x, y_min, '-', linewidth=linewidth, color=border,
zorder=zorder)
ax.fill_between(x, y_min, y_max, alpha=alpha, color=color,
zorder=zorder)
zorder -= 1
return ax
| ridgeplot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, mult, linewidth, alpha, ax):\n if alpha is None:\n alpha = 1.0\n zorder = 0\n for plotter in self.plotters.values():\n for x, y_mi... |
def x(self, credible_interval, quartiles, xt_labelsize, titlesize,
linewidth, markersize, ax, rope):
endpoint = 100 * (1 - credible_interval) / 2
if quartiles:
qlist = [endpoint, 25, 50, 75, 100 - endpoint]
else:
qlist = [endpoint, 50, 100 - endpoint]
for plotter in self.plotters.values():
for y, rope_var, values, color in plotter.treeplot(qlist,
credible_interval):
if isinstance(rope, dict):
self.display_multiple_ropes(rope, ax, y, linewidth, rope_var)
mid = len(values) // 2
param_iter = zip(np.linspace(2 * linewidth, linewidth, mid,
endpoint=True)[-1::-1], range(mid))
for width, j in param_iter:
ax.hlines(y, values[j], values[-(j + 1)], linewidth=width,
color=color)
ax.plot(values[mid], y, 'o', mfc=ax.get_facecolor(), markersize
=markersize * 0.75, color=color)
ax.tick_params(labelsize=xt_labelsize)
ax.set_title('{:.1%} Credible Interval'.format(credible_interval),
fontsize=titlesize, wrap=True)
if rope is None or isinstance(rope, dict):
return
elif len(rope) == 2:
ax.axvspan(rope[0], rope[1], 0, self.y_max(), color='C2', alpha=0.5)
else:
raise ValueError(
'Argument `rope` must be None, a dictionary like{"var_name": {"rope": (lo, hi)}}, or an iterable of length 2'
)
return ax
| forestplot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, credible_interval, quartiles, xt_labelsize, titlesize,\n linewidth, markersize, ax, rope):\n endpoint = 100 * (1 - credible_interval) / 2\n if q... |
def x(self, ax, xt_labelsize, titlesize, markersize):
for plotter in self.plotters.values():
for y, ess, color in plotter.ess():
if ess is not None:
ax.plot(ess, y, 'o', color=color, clip_on=False, markersize
=markersize, markeredgecolor='k')
ax.set_xlim(left=0)
ax.set_title('ess', fontsize=titlesize, wrap=True)
ax.tick_params(labelsize=xt_labelsize)
return ax
| plot_neff | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ax, xt_labelsize, titlesize, markersize):\n for plotter in self.plotters.values():\n for y, ess, color in plotter.ess():\n if ess is... |
def x(self, ax, xt_labelsize, titlesize, markersize):
for plotter in self.plotters.values():
for y, r_hat, color in plotter.r_hat():
if r_hat is not None:
ax.plot(r_hat, y, 'o', color=color, markersize=markersize,
markeredgecolor='k')
ax.set_xlim(left=0.9, right=2.1)
ax.set_xticks([1, 2])
ax.tick_params(labelsize=xt_labelsize)
ax.set_title('r_hat', fontsize=titlesize, wrap=True)
return ax
| plot_rhat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ax, xt_labelsize, titlesize, markersize):\n for plotter in self.plotters.values():\n for y, r_hat, color in plotter.r_hat():\n if r_... |
def x(self, ax):
y_vals, y_prev, is_zero = [0], None, False
prev_color_index = 0
for plotter in self.plotters.values():
for y, *_, color in plotter.iterator():
if self.colors.index(color) < prev_color_index:
if not is_zero and y_prev is not None:
y_vals.append((y + y_prev) * 0.5)
is_zero = True
else:
is_zero = False
prev_color_index = self.colors.index(color)
y_prev = y
offset = plotter.group_offset
y_vals.append(y_prev + offset)
for idx, (y_start, y_stop) in enumerate(pairwise(y_vals)):
ax.axhspan(y_start, y_stop, color='k', alpha=0.1 * (idx % 2))
return ax
| make_bands | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ax):\n y_vals, y_prev, is_zero = [0], None, False\n prev_color_index = 0\n for plotter in self.plotters.values():\n for y, *_, color in p... |
def x(self):
return 4 + len(self.data) * len(self.var_names) - 1 + 0.1 * sum(1 for j in
self.plotters.values() for _ in j.iterator())
| fig_height | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return 4 + len(self.data) * len(self.var_names) - 1 + 0.1 * sum(1 for j in\n self.plotters.values() for _ in j.iterator())\n",
"role": "... |
def x(self):
if self.combined:
grouped_data = [[(0, datum)] for datum in self.data]
skip_dims = {'chain'}
else:
grouped_data = [datum.groupby('chain') for datum in self.data]
skip_dims = set()
label_dict = OrderedDict()
for name, grouped_datum in zip(self.model_names, grouped_data):
for _, sub_data in grouped_datum:
datum_iter = xarray_var_iter(sub_data, var_names=[self.var_name
], skip_dims=skip_dims, reverse_selections=True)
for _, selection, values in datum_iter:
label = make_label(self.var_name, selection, position='beside')
if label not in label_dict:
label_dict[label] = OrderedDict()
if name not in label_dict[label]:
label_dict[label][name] = []
label_dict[label][name].append(values)
y = self.y_start
for label, model_data in label_dict.items():
for model_name, value_list in model_data.items():
if model_name:
row_label = '{}: {}'.format(model_name, label)
else:
row_label = label
for values in value_list:
yield y, row_label, label, values, self.model_color[model_name]
y += self.chain_offset
y += self.var_offset
y += self.group_offset
| iterator | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.combined:\n grouped_data = [[(0, datum)] for datum in self.data]\n skip_dims = {'chain'}\n else:\n grouped_data = [... |
def x(self):
y_ticks = defaultdict(list)
for y, label, _, vals, color in self.iterator():
y_ticks[label].append((y, vals, color))
labels, ticks, vals, colors = [], [], [], []
for label, data in y_ticks.items():
labels.append(label)
ticks.append(np.mean([j[0] for j in data]))
vals.append(np.vstack([j[1] for j in data]))
colors.append(data[0][2])
return labels, ticks, vals, colors
| labels_ticks_and_vals | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n y_ticks = defaultdict(list)\n for y, label, _, vals, color in self.iterator():\n y_ticks[label].append((y, vals, color))\n labels, tic... |
def x(self, qlist, credible_interval):
for y, _, label, values, color in self.iterator():
ntiles = np.percentile(values.flatten(), qlist)
ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval)
yield y, label, ntiles, color
| treeplot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, qlist, credible_interval):\n for y, _, label, values, color in self.iterator():\n ntiles = np.percentile(values.flatten(), qlist)\n ntil... |
def x(self, mult):
xvals, yvals, pdfs, colors = [], [], [], []
for y, *_, values, color in self.iterator():
yvals.append(y)
colors.append(color)
values = values.flatten()
density, lower, upper = _fast_kde(values)
xvals.append(np.linspace(lower, upper, len(density)))
pdfs.append(density)
scaling = max(j.max() for j in pdfs)
for y, x, pdf, color in zip(yvals, xvals, pdfs, colors):
y = y * np.ones_like(x)
yield x, y, mult * pdf / scaling + y, color
| ridgeplot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, mult):\n xvals, yvals, pdfs, colors = [], [], [], []\n for y, *_, values, color in self.iterator():\n yvals.append(y)\n colors.append... |
def x(self):
_, y_vals, values, colors = self.labels_ticks_and_vals()
for y, value, color in zip(y_vals, values, colors):
if value.ndim != 2 or value.shape[0] < 2:
yield y, None, color
else:
yield y, _get_ess(value), color
| ess | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n _, y_vals, values, colors = self.labels_ticks_and_vals()\n for y, value, color in zip(y_vals, values, colors):\n if value.ndim != 2 or va... |
def x(self):
_, y_vals, values, colors = self.labels_ticks_and_vals()
for y, value, color in zip(y_vals, values, colors):
if value.ndim != 2 or value.shape[0] < 2:
yield y, None, color
else:
yield y, _get_split_rhat(value), color
| r_hat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n _, y_vals, values, colors = self.labels_ticks_and_vals()\n for y, value, color in zip(y_vals, values, colors):\n if value.ndim != 2 or va... |
def x(self):
end_y = max(y for y, *_ in self.iterator())
if self.combined:
end_y += self.group_offset
return end_y + 2 * self.group_offset
| y_max | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n end_y = max(y for y, *_ in self.iterator())\n if self.combined:\n end_y += self.group_offset\n return end_y + 2 * self.group_offset\n"... |
def x(khats, figsize=None, textsize=None, markersize=None, ax=None,
hlines_kwargs=None, **kwargs):
if hlines_kwargs is None:
hlines_kwargs = {}
(figsize, ax_labelsize, _, xt_labelsize, linewidth, scaled_markersize
) = _scale_fig_size(figsize, textsize)
if markersize is None:
markersize = scaled_markersize
if ax is None:
_, ax = plt.subplots(1, 1, figsize=figsize, constrained_layout=True)
ax.hlines([0, 0.5, 0.7, 1], xmin=-1, xmax=len(khats) + 1, alpha=0.25,
linewidth=linewidth, **hlines_kwargs)
alphas = 0.5 + 0.5 * (khats > 0.5)
rgba_c = np.zeros((len(khats), 4))
rgba_c[:, 2] = 0.8
rgba_c[:, 3] = alphas
ax.scatter(np.arange(len(khats)), khats, c=rgba_c, marker='+', s=
markersize, **kwargs)
ax.set_xlabel('Data point', fontsize=ax_labelsize)
ax.set_ylabel('Shape parameter κ', fontsize=ax_labelsize)
ax.tick_params(labelsize=xt_labelsize)
return ax
| plot_khat | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(khats, figsize=None, textsize=None, markersize=None, ax=None,\n hlines_kwargs=None, **kwargs):\n if hlines_kwargs is None:\n hlines_kwargs = {}\n ... |
def x(data, group='posterior', data_labels=None, var_names=None,
credible_interval=0.94, point_estimate='mean', colors='cycle', outline=
True, hpd_markers='', shade=0.0, bw=4.5, figsize=None, textsize=None):
if not isinstance(data, (list, tuple)):
datasets = [convert_to_dataset(data, group=group)]
else:
datasets = [convert_to_dataset(datum, group=group) for datum in data]
var_names = _var_names(var_names, datasets)
if point_estimate not in ('mean', 'median', None):
raise ValueError(
"Point estimate should be 'mean',median' or None, not {}".
format(point_estimate))
n_data = len(datasets)
if data_labels is None:
if n_data > 1:
data_labels = ['{}'.format(idx) for idx in range(n_data)]
else:
data_labels = ['']
elif len(data_labels) != n_data:
raise ValueError(
'The number of names for the models ({}) does not match the number of models ({})'
.format(len(data_labels), n_data))
if colors == 'cycle':
colors = ['C{}'.format(idx % 10) for idx in range(n_data)]
elif isinstance(colors, str):
colors = [colors for _ in range(n_data)]
if not 1 >= credible_interval > 0:
raise ValueError(
'The value of credible_interval should be in the interval (0, 1]')
to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for
data in datasets]
all_labels = []
length_plotters = []
for plotters in to_plot:
length_plotters.append(len(plotters))
for var_name, selection, _ in plotters:
label = make_label(var_name, selection)
if label not in all_labels:
all_labels.append(label)
length_plotters = max(length_plotters)
rows, cols = default_grid(length_plotters, max_cols=3)
figsize, _, titlesize, xt_labelsize, linewidth, markersize = (
_scale_fig_size(figsize, textsize, rows, cols))
_, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize,
squeeze=False)
axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}
for m_idx, plotters in enumerate(to_plot):
for var_name, selection, values in plotters:
label = make_label(var_name, selection)
_d_helper(values.flatten(), label, colors[m_idx], bw, titlesize,
xt_labelsize, linewidth, markersize, credible_interval,
point_estimate, hpd_markers, outline, shade, axis_map[label])
if n_data > 1:
for m_idx, label in enumerate(data_labels):
ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
ax[0].legend(fontsize=xt_labelsize)
return ax
| plot_density | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, group='posterior', data_labels=None, var_names=None,\n credible_interval=0.94, point_estimate='mean', colors='cycle', outline=\n True, hpd_markers=... |
def x(vec, vname, color, bw, titlesize, xt_labelsize, linewidth, markersize,
credible_interval, point_estimate, hpd_markers, outline, shade, ax):
if vec.dtype.kind == 'f':
if credible_interval != 1:
hpd_ = hpd(vec, credible_interval)
new_vec = vec[(vec >= hpd_[0]) & (vec <= hpd_[1])]
else:
new_vec = vec
density, xmin, xmax = _fast_kde(new_vec, bw=bw)
density *= credible_interval
x = np.linspace(xmin, xmax, len(density))
ymin = density[0]
ymax = density[-1]
if outline:
ax.plot(x, density, color=color, lw=linewidth)
ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls='-',
lw=linewidth)
ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls='-',
lw=linewidth)
if shade:
ax.fill_between(x, density, color=color, alpha=shade)
else:
xmin, xmax = hpd(vec, credible_interval)
bins = range(xmin, xmax + 2)
if outline:
ax.hist(vec, bins=bins, color=color, histtype='step', align='left')
if shade:
ax.hist(vec, bins=bins, color=color, alpha=shade)
if hpd_markers:
ax.plot(xmin, 0, hpd_markers, color=color, markeredgecolor='k',
markersize=markersize)
ax.plot(xmax, 0, hpd_markers, color=color, markeredgecolor='k',
markersize=markersize)
if point_estimate is not None:
if point_estimate == 'mean':
est = np.mean(vec)
elif point_estimate == 'median':
est = np.median(vec)
ax.plot(est, 0, 'o', color=color, markeredgecolor='k', markersize=
markersize)
ax.set_yticks([])
ax.set_title(vname, fontsize=titlesize, wrap=True)
for pos in ['left', 'right', 'top']:
ax.spines[pos].set_visible(False)
ax.tick_params(labelsize=xt_labelsize)
| d_helper | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(vec, vname, color, bw, titlesize, xt_labelsize, linewidth, markersize,\n credible_interval, point_estimate, hpd_markers, outline, shade, ax):\n if vec.dt... |
def x(values, values2=None, cumulative=False, rug=False, label=None, bw=4.5,
quantiles=None, rotated=False, contour=True, fill_last=True, textsize=
None, plot_kwargs=None, fill_kwargs=None, rug_kwargs=None,
contour_kwargs=None, contourf_kwargs=None, pcolormesh_kwargs=None, ax=
None, legend=True):
if ax is None:
ax = plt.gca()
figsize = ax.get_figure().get_size_inches()
figsize, *_, xt_labelsize, linewidth, markersize = _scale_fig_size(figsize,
textsize, 1, 1)
if isinstance(values, xr.Dataset):
raise ValueError(
'Xarray dataset object detected.Use plot_posterior, plot_density, plot_jointor plot_pair instead of plot_kde'
)
if isinstance(values, InferenceData):
raise ValueError(
' Inference Data object detected. Use plot_posterior instead of plot_kde'
)
if values2 is None:
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault('color', 'C0')
default_color = plot_kwargs.get('color')
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault('color', default_color)
if rug_kwargs is None:
rug_kwargs = {}
rug_kwargs.setdefault('marker', '_' if rotated else '|')
rug_kwargs.setdefault('linestyle', 'None')
rug_kwargs.setdefault('color', default_color)
rug_kwargs.setdefault('space', 0.2)
plot_kwargs.setdefault('linewidth', linewidth)
rug_kwargs.setdefault('markersize', 2 * markersize)
density, lower, upper = _fast_kde(values, cumulative, bw)
rug_space = max(density) * rug_kwargs.pop('space')
x = np.linspace(lower, upper, len(density))
if cumulative:
density_q = density
else:
density_q = density.cumsum() / density.sum()
fill_func = ax.fill_between
fill_x, fill_y = x, density
if rotated:
x, density = density, x
fill_func = ax.fill_betweenx
ax.tick_params(labelsize=xt_labelsize)
if rotated:
ax.set_xlim(0, auto=True)
rug_x, rug_y = np.zeros_like(values) - rug_space, values
else:
ax.set_ylim(0, auto=True)
rug_x, rug_y = values, np.zeros_like(values) - rug_space
if rug:
ax.plot(rug_x, rug_y, **rug_kwargs)
if quantiles is not None:
fill_kwargs.setdefault('alpha', 0.75)
idx = [np.sum(density_q < quant) for quant in quantiles]
fill_func(fill_x, fill_y, where=np.isin(fill_x, fill_x[idx],
invert=True, assume_unique=True), **fill_kwargs)
else:
fill_kwargs.setdefault('alpha', 0)
ax.plot(x, density, label=label, **plot_kwargs)
fill_func(fill_x, fill_y, **fill_kwargs)
if legend and label:
legend_element = [Patch(edgecolor=default_color, label=label)]
ax.legend(handles=legend_element)
else:
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs.setdefault('colors', '0.5')
if contourf_kwargs is None:
contourf_kwargs = {}
if pcolormesh_kwargs is None:
pcolormesh_kwargs = {}
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2,
gridsize=gridsize)
g_s = complex(gridsize[0])
x_x, y_y = np.mgrid[xmin:xmax:g_s, ymin:ymax:g_s]
ax.grid(False)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
qcfs = ax.contourf(x_x, y_y, density, antialiased=True, **
contourf_kwargs)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
qcfs.collections[0].set_alpha(0)
qcs.collections[0].set_alpha(0)
else:
ax.pcolormesh(x_x, y_y, density, **pcolormesh_kwargs)
return ax
| plot_kde | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values, values2=None, cumulative=False, rug=False, label=None, bw=4.5,\n quantiles=None, rotated=False, contour=True, fill_last=True, textsize=\n None, p... |
def x(x, cumulative=False, bw=4.5, xmin=None, xmax=None):
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
if x.size == 0:
warnings.warn('kde plot failed, you may want to check your data')
return np.array([np.nan]), np.nan, np.nan
len_x = len(x)
n_points = 200 if (xmin or xmax) is None else 500
if xmin is None:
xmin = np.min(x)
if xmax is None:
xmax = np.max(x)
assert np.min(x) >= xmin
assert np.max(x) <= xmax
log_len_x = np.log(len_x) * bw
n_bins = min(int(len_x ** (1 / 3) * log_len_x * 2), n_points)
if n_bins < 2:
warnings.warn('kde plot failed, you may want to check your data')
return np.array([np.nan]), np.nan, np.nan
d_x = (xmax - xmin) / (n_bins - 1)
grid = _histogram(x, n_bins, range_hist=(xmin, xmax))
scotts_factor = len_x ** -0.2
kern_nx = int(scotts_factor * 2 * np.pi * log_len_x)
kernel = gaussian(kern_nx, scotts_factor * log_len_x)
npad = min(n_bins, 2 * kern_nx)
grid = np.concatenate([grid[npad:0:-1], grid, grid[n_bins:n_bins - npad
:-1]])
density = convolve(grid, kernel, mode='same', method='direct')[npad:
npad + n_bins]
norm_factor = len_x * d_x * (2 * np.pi * log_len_x ** 2 * scotts_factor **
2) ** 0.5
density /= norm_factor
if cumulative:
density = density.cumsum() / density.sum()
return density, xmin, xmax
| fast_kde | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(x, cumulative=False, bw=4.5, xmin=None, xmax=None):\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n if x.size == 0:\n warnings.warn(... |
def x(x, y, gridsize=(128, 128), circular=False):
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
y = np.asarray(y, dtype=float)
y = y[np.isfinite(y)]
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
len_x = len(x)
weights = np.ones(len_x)
n_x, n_y = gridsize
d_x = (xmax - xmin) / (n_x - 1)
d_y = (ymax - ymin) / (n_y - 1)
xyi = np.vstack((x, y)).T
xyi -= [xmin, ymin]
xyi /= [d_x, d_y]
xyi = np.floor(xyi, xyi).T
scotts_factor = len_x ** (-1 / 6)
cov = np.cov(xyi)
std_devs = np.diag(cov ** 0.5)
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
x_x = np.arange(kern_nx) - kern_nx / 2
y_y = np.arange(kern_ny) - kern_ny / 2
x_x, y_y = np.meshgrid(x_x, y_y)
kernel = np.vstack((x_x.flatten(), y_y.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.exp(-kernel.sum(axis=0) / 2)
kernel = kernel.reshape((int(kern_ny), int(kern_nx)))
boundary = 'wrap' if circular else 'symm'
grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()
grid = convolve2d(grid, kernel, mode='same', boundary=boundary)
norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)
norm_factor = len_x * d_x * d_y * norm_factor ** 0.5
grid /= norm_factor
return grid, xmin, xmax, ymin, ymax
| fast_kde_2d | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(x, y, gridsize=(128, 128), circular=False):\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n y = np.asarray(y, dtype=float)\n y = y[np.is... |
def x(var_names, data):
if var_names is not None:
if isinstance(var_names, str):
var_names = [var_names]
if isinstance(data, (list, tuple)):
all_vars = []
for dataset in data:
dataset_vars = list(dataset.data_vars)
for var in dataset_vars:
if var not in all_vars:
all_vars.append(var)
else:
all_vars = list(data.data_vars)
excluded_vars = [i[1:] for i in var_names if i.startswith('~') and
i not in all_vars]
all_vars_tilde = [i for i in all_vars if i.startswith('~')]
if all_vars_tilde:
warnings.warn(
"""ArviZ treats '~' as a negation character for variable selection.
Your model has variables names starting with '~', {0}. Please double check
your results to ensure all variables are included"""
.format(', '.join(all_vars_tilde)))
if excluded_vars:
var_names = [i for i in all_vars if i not in excluded_vars]
return var_names
| var_names | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(var_names, data):\n if var_names is not None:\n if isinstance(var_names, str):\n var_names = [var_names]\n if isinstance(data, (lis... |
def x(function=None, **kwargs):
def x(function):
try:
numba = importlib.import_module('numba')
return numba.jit(**kwargs)(function)
except ImportError:
return function
if function:
return wrapper(function)
else:
return wrapper
| conditional_jit | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(function=None, **kwargs):\n\n def x(function):\n try:\n numba = importlib.import_module('numba')\n return numba.jit(**kwargs)(f... |
def x(self):
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda : next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
paragraphs = '\n'.join(line.rstrip() for line in docstring.
split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split('\n')):
m = re.match('^_thumb: (\\.\\d+),\\s*(\\.\\d+)', line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = '\n'.join([l for l in docstring.split('\n') if not l.
startswith('_thumb')])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
| extract_docstring | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n doc... |
def x(values, values2=None, color='C0', kind='auto', cumulative=False,
label=None, rotated=False, rug=False, bw=4.5, quantiles=None, contour=
True, fill_last=True, textsize=None, plot_kwargs=None, fill_kwargs=None,
rug_kwargs=None, contour_kwargs=None, hist_kwargs=None, ax=None):
if ax is None:
ax = plt.gca()
if hist_kwargs is None:
hist_kwargs = {}
hist_kwargs.setdefault('bins', None)
hist_kwargs.setdefault('cumulative', cumulative)
hist_kwargs.setdefault('color', color)
hist_kwargs.setdefault('label', label)
hist_kwargs.setdefault('rwidth', 0.9)
hist_kwargs.setdefault('align', 'left')
hist_kwargs.setdefault('density', True)
if plot_kwargs is None:
plot_kwargs = {}
if rotated:
hist_kwargs.setdefault('orientation', 'horizontal')
else:
hist_kwargs.setdefault('orientation', 'vertical')
if kind == 'auto':
kind = 'hist' if values.dtype.kind == 'i' else 'density'
if kind == 'hist':
_histplot_op(values=values, values2=values2, rotated=rotated, ax=ax,
hist_kwargs=hist_kwargs)
elif kind == 'density':
plot_kwargs.setdefault('color', color)
legend = label is not None
plot_kde(values, values2, cumulative=cumulative, rug=rug, label=
label, bw=bw, quantiles=quantiles, rotated=rotated, contour=
contour, legend=legend, fill_last=fill_last, textsize=textsize,
plot_kwargs=plot_kwargs, fill_kwargs=fill_kwargs, rug_kwargs=
rug_kwargs, contour_kwargs=contour_kwargs, ax=ax)
return ax
| plot_dist | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values, values2=None, color='C0', kind='auto', cumulative=False,\n label=None, rotated=False, rug=False, bw=4.5, quantiles=None, contour=\n True, fill_la... |
def x(values, values2, rotated, ax, hist_kwargs):
if values2 is not None:
raise NotImplementedError('Insert hexbin plot here')
bins = hist_kwargs.pop('bins')
if bins is None:
bins = get_bins(values)
ax.hist(values, bins=bins, **hist_kwargs)
if rotated:
ax.set_yticks(bins[:-1])
else:
ax.set_xticks(bins[:-1])
if hist_kwargs['label'] is not None:
ax.legend()
return ax
| histplot_op | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values, values2, rotated, ax, hist_kwargs):\n if values2 is not None:\n raise NotImplementedError('Insert hexbin plot here')\n bins = hist_kwargs.... |
def x(data, filename, *, group='posterior', coords=None, dims=None):
inference_data = convert_to_inference_data(data, group=group, coords=
coords, dims=dims)
file_name = inference_data.to_netcdf(filename)
return file_name
| to_netcdf | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, filename, *, group='posterior', coords=None, dims=None):\n inference_data = convert_to_inference_data(data, group=group, coords=\n coords, dims... |
def x(data, filename, *, group='posterior', coords=None, dims=None):
warnings.warn(
"The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead"
, DeprecationWarning)
return to_netcdf(data=data, filename=filename, group=group, coords=
coords, dims=dims)
| save_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, filename, *, group='posterior', coords=None, dims=None):\n warnings.warn(\n \"The 'save_data' function is deprecated as of 0.3.2, use 'to_netcd... |
def x(ary):
dim_0, *_ = np.atleast_1d(ary).shape
return ary.reshape(dim_0, -1, order='F')
| make_2d | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ary):\n dim_0, *_ = np.atleast_1d(ary).shape\n return ary.reshape(dim_0, -1, order='F')\n",
"role": "user"
},
{
"content": "make_2d",
"ro... |
def x(figsize, textsize, rows=1, cols=1):
params = mpl.rcParams
rc_width, rc_height = tuple(params['figure.figsize'])
rc_ax_labelsize = params['axes.labelsize']
rc_titlesize = params['axes.titlesize']
rc_xt_labelsize = params['xtick.labelsize']
rc_linewidth = params['lines.linewidth']
rc_markersize = params['lines.markersize']
if isinstance(rc_ax_labelsize, str):
rc_ax_labelsize = 15
if isinstance(rc_titlesize, str):
rc_titlesize = 16
if isinstance(rc_xt_labelsize, str):
rc_xt_labelsize = 14
if figsize is None:
width, height = rc_width, rc_height
sff = 1 if rows == cols == 1 else 1.15
width = width * cols * sff
height = height * rows * sff
else:
width, height = figsize
if textsize is not None:
scale_factor = textsize / rc_xt_labelsize
elif rows == cols == 1:
scale_factor = (width * height / (rc_width * rc_height)) ** 0.5
else:
scale_factor = 1
ax_labelsize = rc_ax_labelsize * scale_factor
titlesize = rc_titlesize * scale_factor
xt_labelsize = rc_xt_labelsize * scale_factor
linewidth = rc_linewidth * scale_factor
markersize = rc_markersize * scale_factor
return (width, height
), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize
| scale_fig_size | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(figsize, textsize, rows=1, cols=1):\n params = mpl.rcParams\n rc_width, rc_height = tuple(params['figure.figsize'])\n rc_ax_labelsize = params['axes.l... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.