code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
margin = compress_pruned( self._slice.margin( axis=None, weighted=False, include_transforms_for_dims=self._hs_dims, prune=self._prune, ) ) mask = margin < self._size if margin.shape == self._shape: return mask if self._slice.dim_types[0] == DT.MR: # If the margin is a column vector - broadcast it's mask to the array shape return np.logical_or(np.zeros(self._shape, dtype=bool), mask[:, None]) return np.logical_or(np.zeros(self._shape, dtype=bool), mask)
def table_mask(self)
ndarray, True where table margin <= min_base_size, same shape as slice.
8.673863
7.705242
1.125709
# TODO: Figure out how to intersperse pairwise objects for columns # that represent H&S return [ _ColumnPairwiseSignificance( self._slice, col_idx, self._axis, self._weighted, self._alpha, self._only_larger, self._hs_dims, ) for col_idx in range(self._slice.get_shape(hs_dims=self._hs_dims)[1]) ]
def values(self)
list of _ColumnPairwiseSignificance tests. Result has as many elements as there are coliumns in the slice. Each significance test contains `p_vals` and `t_stats` significance tests.
9.957018
6.923354
1.438178
return np.array([sig.pairwise_indices for sig in self.values]).T
def pairwise_indices(self)
ndarray containing tuples of pairwise indices.
11.346245
9.804601
1.157237
summary_pairwise_indices = np.empty( self.values[0].t_stats.shape[1], dtype=object ) summary_pairwise_indices[:] = [ sig.summary_pairwise_indices for sig in self.values ] return summary_pairwise_indices
def summary_pairwise_indices(self)
ndarray containing tuples of pairwise indices for the column summary.
4.139592
3.572109
1.158865
if self._heuristic_score is None: matches = self.heuristic() self._heuristic_score = float(sum(matches)) / float(len(matches)) return self._heuristic_score
def score(self)
Calculate and return a heuristic score for this Parser against the provided script source and path. This is used to order the ArgumentParsers as "most likely to work" against a given script/source file. Each parser has a calculate_score() function that returns a list of booleans representing the matches against conditions. This is converted into a % match and used to sort parse engines. :return: float
4.083982
3.599731
1.134524
simulation = self.survey_scenario.simulation holder = simulation.get_holder(self.weight_name) holder.array = numpy.array(self.initial_weight, dtype = holder.variable.dtype)
def reset(self)
Reset the calibration to it initial state
11.571556
11.134177
1.039283
self.survey_scenario = survey_scenario # TODO deal with baseline if reform is present if survey_scenario.simulation is None: survey_scenario.simulation = survey_scenario.new_simulation() period = self.period self.filter_by = filter_by = survey_scenario.calculate_variable( variable = self.filter_by_name, period = period) # TODO: shoud not be france specific self.weight_name = weight_name = self.survey_scenario.weight_column_name_by_entity['menage'] self.initial_weight_name = weight_name + "_ini" self.initial_weight = initial_weight = survey_scenario.calculate_variable( variable = weight_name, period = period) self.initial_total_population = sum(initial_weight * filter_by) self.weight = survey_scenario.calculate_variable(variable = weight_name, period = period)
def _set_survey_scenario(self, survey_scenario)
Set survey scenario :param survey_scenario: the survey scenario
5.153721
5.561922
0.926608
if parameter == 'lo': self.parameters['lo'] = 1 / value else: self.parameters[parameter] = value
def set_parameters(self, parameter, value)
Set parameters value :param parameter: the parameter to be set :param value: the valeu used to set the parameter
5.316044
7.371407
0.721171
# Select only filtered entities assert self.initial_weight_name is not None data = pd.DataFrame() data[self.initial_weight_name] = self.initial_weight * self.filter_by for variable in self.margins_by_variable: if variable == 'total_population': continue assert variable in self.survey_scenario.tax_benefit_system.variables period = self.period data[variable] = self.survey_scenario.calculate_variable(variable = variable, period = period) return data
def _build_calmar_data(self)
Builds the data dictionnary used as calmar input argument
7.78891
7.760944
1.003603
data = self._build_calmar_data() assert self.initial_weight_name is not None parameters['initial_weight'] = self.initial_weight_name val_pondfin, lambdasol, updated_margins = calmar( data, margins, **parameters) # Updating only afetr filtering weights self.weight = val_pondfin * self.filter_by + self.weight * (logical_not(self.filter_by)) return updated_margins
def _update_weights(self, margins, parameters = {})
Run calmar, stores new weights and returns adjusted margins
12.062605
10.272705
1.174238
period = self.period survey_scenario = self.survey_scenario assert survey_scenario.simulation is not None for simulation in [survey_scenario.simulation, survey_scenario.baseline_simulation]: if simulation is None: continue simulation.set_input(self.weight_name, period, self.weight)
def set_calibrated_weights(self)
Modify the weights to use the calibrated weights
6.500571
6.698923
0.97039
actions = set() if isinstance(action, argparse._AppendAction): actions.add(SPECIFY_EVERY_PARAM) return actions
def get_parameter_action(action)
To foster a general schema that can accomodate multiple parsers, the general behavior here is described rather than the specific language of a given parser. For instance, the 'append' action of an argument is collapsing each argument given to a single argument. It also returns a set of actions as well, since presumably some actions can impact multiple parameter options
12.392835
8.221109
1.507441
import feather assert receiver is not None and donor is not None assert matching_variables is not None temporary_directory_path = os.path.join(config_files_directory, 'tmp') assert os.path.exists(temporary_directory_path) receiver_path = os.path.join(temporary_directory_path, 'receiver.feather') donor_path = os.path.join(temporary_directory_path, 'donor.feather') feather.write_dataframe(receiver, receiver_path) feather.write_dataframe(donor, donor_path) if isinstance(matching_variables, str): match_vars = '"{}"'.format(matching_variables) elif len(matching_variables) == 1: match_vars = '"{}"'.format(matching_variables[0]) else: match_vars = '"{}"'.format('todo') r_script = .format( receiver_path = receiver_path, donor_path = donor_path, match_vars = match_vars, z_variables = z_variables, ) print(r_script)
def nnd_hotdeck_using_feather(receiver = None, donor = None, matching_variables = None, z_variables = None)
Not working
2.116979
2.100488
1.007851
return np.array( [Pfaffian(self, val).value for i, val in np.ndenumerate(self._chisq)] ).reshape(self._chisq.shape)
def wishart_pfaffian(self)
ndarray of wishart pfaffian CDF, before normalization
6.218621
5.868048
1.059743
return np.full(self.n_min, self.size - 1, dtype=np.int)
def other_ind(self)
last row or column of square A
9.968516
8.435347
1.181755
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min) K1 /= ( np.float_power(2, 0.5 * self.n_min * self._n_max) * self._mgamma(0.5 * self._n_max, self.n_min) * self._mgamma(0.5 * self.n_min, self.n_min) ) K2 = np.float_power( 2, self.alpha * self.size + 0.5 * self.size * (self.size + 1) ) for i in xrange(self.size): K2 *= gamma(self.alpha + i + 1) return K1 * K2
def K(self)
Normalizing constant for wishart CDF.
3.263257
3.062282
1.065629
wishart = self._wishart_cdf # Prepare variables for integration algorithm A = self.A p = self._gammainc_a g = gamma(wishart.alpha_vec) q_ind = np.arange(2 * wishart.n_min - 2) q_vec = 2 * wishart.alpha + q_ind + 2 q = np.float_power(0.5, q_vec) * gamma(q_vec) * gammainc(q_vec, self._chisq_val) # Perform integration (i.e. calculate Pfaffian CDF) for i in xrange(wishart.n_min): # TODO consider index tricks instead of iteration here b = 0.5 * p[i] * p[i] for j in xrange(i, wishart.n_min - 1): b -= q[i + j] / (g[i] * g[j + 1]) A[j + 1, i] = p[i] * p[j + 1] - 2 * b A[i, j + 1] = -A[j + 1, i] if np.any(np.isnan(A)): return 0 return np.sqrt(det(A))
def value(self)
return float Cumulative Distribution Function. The return value represents a floating point number of the CDF of the largest eigenvalue of a Wishart(n, p) evaluated at chisq_val.
5.528644
5.292988
1.044522
wishart = self._wishart_cdf base = np.zeros([wishart.size, wishart.size]) if wishart.n_min % 2: # If matrix has odd number of elements, we need to append a # row and a col, in order for the pfaffian algorithm to work base = self._make_size_even(base) return base
def A(self)
ndarray - a skew-symmetric matrix for integrating the target distribution
11.976111
10.918336
1.096881
return cls()._data(cube, weighted, prune)
def data(cls, cube, weighted, prune)
Return ndarray representing table index by margin.
7.710364
6.602339
1.167823
result = [] for slice_ in cube.slices: if cube.has_mr: return self._mr_index(cube, weighted, prune) num = slice_.margin(axis=0, weighted=weighted, prune=prune) den = slice_.margin(weighted=weighted, prune=prune) margin = num / den proportions = slice_.proportions(axis=1, weighted=weighted, prune=prune) result.append(proportions / margin) if len(result) == 1 and cube.ndim < 3: result = result[0] else: if prune: mask = np.array([slice_.mask for slice_ in result]) result = np.ma.masked_array(result, mask) else: result = np.array(result) return result
def _data(self, cube, weighted, prune)
ndarray representing table index by margin.
3.331643
3.218545
1.03514
if weights is None: weights = ones(len(values)) df = pd.DataFrame({'x': values, 'w': weights}) df = df.sort_values(by='x') x = df['x'] w = df['w'] wx = w * x cdf = cumsum(wx) - 0.5 * wx numerator = (w * cdf).sum() denominator = ((wx).sum()) * (w.sum()) gini = 1 - 2 * (numerator / denominator) return gini
def gini(values, weights = None, bin_size = None)
Gini coefficient (normalized to 1) Using fastgini formula : i=N j=i SUM W_i*(SUM W_j*X_j - W_i*X_i/2) i=1 j=1 G = 1 - 2* ---------------------------------- i=N i=N SUM W_i*X_i * SUM W_i i=1 i=1 where observations are sorted in ascending order of X. From http://fmwww.bc.edu/RePec/bocode/f/fastgini.html
2.757661
2.810901
0.981059
from scipy.integrate import simps if weights is None: weights = ones(len(values)) # sign = -1 # if tax == True: # sign = -1 # else: # sign = 1 PLCx, PLCy = pseudo_lorenz(values, ineq_axis, weights) LCx, LCy = lorenz(ineq_axis, weights) del PLCx return simps((LCy - PLCy), LCx)
def kakwani(values, ineq_axis, weights = None)
Computes the Kakwani index
5.169709
5.025382
1.02872
if weights is None: weights = ones(len(values)) df = pd.DataFrame({'v': values, 'w': weights}) df = df.sort_values(by = 'v') x = cumsum(df['w']) x = x / float(x[-1:]) y = cumsum(df['v'] * df['w']) y = y / float(y[-1:]) return x, y
def lorenz(values, weights = None)
Computes Lorenz Curve coordinates
2.52523
2.544411
0.992461
return cls._factory(slice_, axis, weighted).pvals
def pvals(cls, slice_, axis=0, weighted=True)
Wishart CDF values for slice columns as square ndarray. Wishart CDF (Cumulative Distribution Function) is calculated to determine statistical significance of slice columns, in relation to all other columns. These values represent the answer to the question "How much is a particular column different from each other column in the slice".
12.054926
23.365568
0.515927
n = self._element_count chi_squared = np.zeros([n, n]) for i in xrange(1, n): for j in xrange(0, n - 1): denominator = 1 / margin[i] + 1 / margin[j] chi_squared[i, j] = chi_squared[j, i] = ( np.sum(np.square(proportions[:, i] - proportions[:, j]) / observed) / denominator ) return chi_squared
def _chi_squared(self, proportions, margin, observed)
return ndarray of chi-squared measures for proportions' columns. *proportions* (ndarray): The basis of chi-squared calcualations *margin* (ndarray): Column margin for proportions (See `def _margin`) *observed* (ndarray): Row margin proportions (See `def _observed`)
2.795166
2.754245
1.014857
return self._intersperse_insertion_rows_and_columns( 1.0 - WishartCDF(pairwise_chisq, self._n_min, self._n_max).values )
def _pvals_from_chi_squared(self, pairwise_chisq)
return statistical significance for props' columns. *pairwise_chisq* (ndarray) Matrix of chi-squared values (bases for Wishart CDF)
11.940903
11.903934
1.003106
if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
def _factory(slice_, axis, weighted)
return subclass for PairwiseSignificance, based on slice dimension types.
11.37481
6.447767
1.764147
for i in self._insertion_indices: pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=0) pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=1) return pairwise_pvals
def _intersperse_insertion_rows_and_columns(self, pairwise_pvals)
Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray. Each insertion (a header or a subtotal) creates an offset in the calculated pvals. These need to be taken into account when converting each pval to a corresponding column letter. For this reason, we need to insert an all-NaN row and a column at the right indices. These are the inserted indices of each insertion, along respective dimensions.
1.834433
1.713485
1.070586
off_axis = 1 - self._axis return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat)
def _opposite_axis_margin(self)
ndarray representing margin along the axis opposite of self._axis In the process of calculating p-values for the column significance testing we need both the margin along the primary axis and the percentage margin along the opposite axis.
12.920722
12.050739
1.072193
return self._slice.proportions( axis=self._axis, include_mr_cat=self._include_mr_cat )
def _proportions(self)
ndarray representing slice proportions along correct axis.
11.724313
7.745131
1.513766
return self._chi_squared(self._proportions, self._margin, self._observed)
def _pairwise_chisq(self)
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a square, symmetric matrix of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
15.101513
23.991819
0.629444
return [ self._chi_squared( mr_subvar_proportions, self._margin[idx], self._opposite_axis_margin[idx] / np.sum(self._opposite_axis_margin[idx]), ) for (idx, mr_subvar_proportions) in enumerate(self._proportions) ]
def _pairwise_chisq(self)
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
5.932135
6.499285
0.912737
try: # Parse with --help to enforce exit usage_sections = docopt.docopt(self.parser, ['--help']) except SystemExit as e: parser = inspect.trace()[-2][0].f_locals ''' docopt represents all values as strings and doesn't automatically cast, we probably want to do some testing to see if we can convert the default value (Option.value) to a particular type. ''' def guess_type(s): try: v = float(s) v = int(s) v = s except ValueError: pass return type(v) self.script_groups = ['Arguments'] self.nodes = OrderedDict() self.containers = OrderedDict() self.containers['default'] = [] for option in parser['options']: if option.long in ['--help', '--version']: continue option.type = guess_type(option.value) option_name = option.long.strip('-') node = DocOptNode(option_name, option=option) self.nodes[option_name] = node self.containers['default'].append(option_name) self.class_name = os.path.splitext(os.path.basename(self.script_path))[0] self.script_path = self.script_path self.script_description = self.parser
def process_parser(self)
We can't use the exception catch trick for docopt because the module prevents access to it's innards __all__ = ['docopt']. Instead call with --help enforced, catch sys.exit and work up to the calling docopt function to pull out the elements. This is horrible. :return:
5.236472
4.970145
1.053585
unique_val_list = unique(data) output = {} for val in unique_val_list: output[val] = (data == val) return output
def build_dummies_dict(data)
Return a dict with unique values as keys and vectors as values
3.714393
3.450897
1.076356
try: ca_ind = self.dim_types.index(DT.CA_SUBVAR) return 1 - ca_ind except ValueError: return None
def ca_main_axis(self)
For univariate CA, the main axis is the categorical axis
10.567509
7.699182
1.37255
if self.ndim != 2: return False return all(dt in DT.ALLOWED_PAIRWISE_TYPES for dt in self.dim_types)
def can_compare_pairwise(self)
Return bool indicating if slice can compute pairwise comparisons. Currently, only the CAT x CAT slice can compute pairwise comparisons. This also includes the categorical array categories dimnension (CA_CAT).
10.032195
9.217634
1.08837
if not prune: return self.as_array(include_transforms_for_dims=hs_dims).shape shape = compress_pruned( self.as_array(prune=True, include_transforms_for_dims=hs_dims) ).shape # Eliminate dimensions that get reduced to 1 # (e.g. single element categoricals) return tuple(n for n in shape if n > 1)
def get_shape(self, prune=False, hs_dims=None)
Tuple of array dimensions' lengths. It returns a tuple of ints, each representing the length of a cube dimension, in the order those dimensions appear in the cube. Pruning is supported. Dimensions that get reduced to a single element (e.g. due to pruning) are removed from the returning shape, thus allowing for the differentiation between true 2D cubes (over which statistical testing can be performed) and essentially 1D cubes (over which it can't). Usage: >>> shape = get_shape() >>> pruned_shape = get_shape(prune=True)
6.389539
7.390684
0.86454
proportions = self.proportions(axis=axis) baseline = ( baseline if baseline is not None else self._prepare_index_baseline(axis) ) # Fix the shape to enable correct broadcasting if ( axis == 0 and len(baseline.shape) <= 1 and self.ndim == len(self.get_shape()) ): baseline = baseline[:, None] indexes = proportions / baseline * 100 return self._apply_pruning_mask(indexes) if prune else indexes
def index_table(self, axis=None, baseline=None, prune=False)
Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable.
5.128181
4.882817
1.050251
if self.ca_as_0th: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:] else: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:] if not prune: return labels def prune_dimension_labels(labels, prune_indices): labels = [label for label, prune in zip(labels, prune_indices) if not prune] return labels labels = [ prune_dimension_labels(dim_labels, dim_prune_inds) for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims)) ] return labels
def labels(self, hs_dims=None, prune=False)
Get labels for the cube slice, and perform pruning by slice.
3.823307
3.579165
1.068212
axis = self._calculate_correct_axis_for_cube(axis) hs_dims = self._hs_dims_for_cube(include_transforms_for_dims) margin = self._cube.margin( axis=axis, weighted=weighted, include_missing=include_missing, include_transforms_for_dims=hs_dims, prune=prune, include_mr_cat=include_mr_cat, ) return self._extract_slice_result_from_cube(margin)
def margin( self, axis=None, weighted=True, include_missing=False, include_transforms_for_dims=None, prune=False, include_mr_cat=False, )
Return ndarray representing slice margin across selected axis. A margin (or basis) can be calculated for a contingency table, provided that the dimensions of the desired directions are marginable. The dimensions are marginable if they represent mutualy exclusive data, such as true categorical data. For array types the items dimensions are not marginable. Requesting a margin across these dimensions (e.g. slice.margin(axis=0) for a categorical array cube slice) will produce an error. For multiple response slices, the implicit convention is that the provided direction scales to the selections dimension of the slice. These cases produce meaningful data, but of a slightly different shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray (variable dimensions are never collapsed!)). :param axis: Axis across which to sum. Can be 0 (columns margin), 1 (rows margin) and None (table margin). If requested across variables dimension (e.g. requesting 0 margin for CA array) it will produce an error. :param weighted: Weighted or unweighted counts. :param include_missing: Include missing categories or not. :param include_transforms_for_dims: Indices of dimensions for which to include transformations :param prune: Perform pruning based on unweighted counts. :returns: (weighed or unweighted counts) summed across provided axis. For multiple response types, items dimensions are not collapsed.
3.269297
3.208903
1.018821
return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune)
def min_base_size_mask(self, size, hs_dims=None, prune=False)
Returns MinBaseSizeMask object with correct row, col and table masks. The returned object stores the necessary information about the base size, as well as about the base values. It can create corresponding masks in teh row, column, and table directions, based on the corresponding base values (the values of the unweighted margins). Usage: >>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice >>> cube_slice.min_base_size_mask(30).row_mask >>> cube_slice.min_base_size_mask(50).column_mask >>> cube_slice.min_base_size_mask(22).table_mask
3.507439
4.753248
0.737904
mr_dim_ind = self._cube.mr_dim_ind if self._cube.ndim == 3: if isinstance(mr_dim_ind, int): if mr_dim_ind == 0: # If only the 0th dimension of a 3D is an MR, the sliced # don't actuall have the MR... Thus return None. return None return mr_dim_ind - 1 elif isinstance(mr_dim_ind, tuple): # If MR dimension index is a tuple, that means that the cube # (only a 3D one if it reached this path) has 2 MR dimensions. # If any of those is 0 ind dimension we don't need to include # in the slice dimension (because the slice doesn't see the tab # that it's on). If it's 1st and 2nd dimension, then subtract 1 # from those, and present them as 0th and 1st dimension of the # slice. This can happend e.g. in a CAT x MR x MR cube (which # renders MR x MR slices). mr_dim_ind = tuple(i - 1 for i in mr_dim_ind if i) return mr_dim_ind if len(mr_dim_ind) > 1 else mr_dim_ind[0] return mr_dim_ind
def mr_dim_ind(self)
Get the correct index of the MR dimension in the cube slice.
6.432319
6.103087
1.053945
scale_means = self._cube.scale_means(hs_dims, prune) if self.ca_as_0th: # If slice is used as 0th CA, then we need to observe the 1st dimension, # because the 0th dimension is CA items, which is only used for slicing # (and thus doesn't have numerical values, and also doesn't constitute any # dimension of the actual crosstabs that will be created in this case). scale_means = scale_means[0][-1] if scale_means is None: return [None] return [scale_means[self._index]] return scale_means[self._index]
def scale_means(self, hs_dims=None, prune=False)
Return list of column and row scaled means for this slice. If a row/col doesn't have numerical values, return None for the corresponding dimension. If a slice only has 1D, return only the column scaled mean (as numpy array). If both row and col scaled means are present, return them as two numpy arrays inside of a list.
9.282194
8.370766
1.108882
if self._cube.ndim < 3 and not self.ca_as_0th: return None title = self._cube.name table_name = self._cube.labels()[0][self._index] return "%s: %s" % (title, table_name)
def table_name(self)
Get slice name. In case of 2D return cube name. In case of 3D, return the combination of the cube name with the label of the corresponding slice (nth label of the 0th dimension).
9.114209
7.363985
1.237674
if axis != 0: raise NotImplementedError("Pairwise comparison only implemented for colums") return WishartPairwiseSignificance.pvals(self, axis=axis)
def wishart_pairwise_pvals(self, axis=0)
Return square symmetric matrix of pairwise column-comparison p-values. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently.
6.116395
7.04345
0.868381
stats = self.zscore(weighted=weighted, prune=prune, hs_dims=hs_dims) pvals = 2 * (1 - norm.cdf(np.abs(stats))) return self._apply_pruning_mask(pvals, hs_dims) if prune else pvals
def pvals(self, weighted=True, prune=False, hs_dims=None)
Return 2D ndarray with calculated P values This function calculates statistically significant cells for categorical contingency tables under the null hypothesis that the row and column variables are independent (uncorrelated). The values are calculated for 2D tables only. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns: 2 or 3 Dimensional ndarray, representing the p-values for each cell of the table-like representation of the crunch cube.
3.649156
4.649496
0.78485
counts = self.as_array(weighted=weighted) total = self.margin(weighted=weighted) colsum = self.margin(axis=0, weighted=weighted) rowsum = self.margin(axis=1, weighted=weighted) zscore = self._calculate_std_res(counts, total, colsum, rowsum) if hs_dims: zscore = intersperse_hs_in_std_res(self, hs_dims, zscore) if prune: return self._apply_pruning_mask(zscore, hs_dims) return zscore
def zscore(self, weighted=True, prune=False, hs_dims=None)
Return ndarray with slices's standardized residuals (Z-scores). (Only applicable to a 2D contingency tables.) The Z-score or standardized residual is the difference between observed and expected cell counts if row and column variables were independent divided by the residual cell variance. They are assumed to come from a N(0,1) or standard Normal distribution, and can show which cells deviate from the null hypothesis that the row and column variables are uncorrelated. See also *pairwise_chisq*, *pairwise_pvals* for a pairwise column- or row-based test of statistical significance. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns zscore: ndarray representing cell standardized residuals (Z)
4.273972
4.278886
0.998852
return PairwiseSignificance( self, alpha=alpha, only_larger=only_larger, hs_dims=hs_dims ).pairwise_indices
def pairwise_indices(self, alpha=0.05, only_larger=True, hs_dims=None)
Indices of columns where p < alpha for column-comparison t-tests Returns an array of tuples of columns that are significant at p<alpha, from a series of pairwise t-tests. Argument both_pairs returns indices striclty on the test statistic. If False, however, only the index of values *significantly smaller* than each cell are indicated.
3.107242
3.941356
0.788369
if self.mr_dim_ind == 0: # --This is a special case where broadcasting cannot be # --automatically done. We need to "inflate" the single dimensional # --ndarrays, to be able to treat them as "columns" (essentially a # --Nx1 ndarray). This is needed for subsequent multiplication # --that needs to happen column wise (rowsum * colsum) / total. total = total[:, np.newaxis] rowsum = rowsum[:, np.newaxis] expected_counts = rowsum * colsum / total variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3 return (counts - expected_counts) / np.sqrt(variance)
def _array_type_std_res(self, counts, total, colsum, rowsum)
Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array.
7.464437
7.428878
1.004786
if set(self.dim_types) & DT.ARRAY_TYPES: # ---has-mr-or-ca--- return self._array_type_std_res(counts, total, colsum, rowsum) return self._scalar_type_std_res(counts, total, colsum, rowsum)
def _calculate_std_res(self, counts, total, colsum, rowsum)
Return ndarray containing standard residuals. The shape of the return value is the same as that of *counts*.
7.25081
7.694875
0.942291
if self._cube.ndim < 3: if self.ca_as_0th and axis is None: # Special case for CA slices (in multitables). In this case, # we need to calculate a measurement across CA categories # dimension (and not across items, because it's not # allowed). The value for the axis parameter of None, would # imply both cat and items dimensions, and we don't want that. return 1 return axis # Expected usage of the 'axis' parameter from CubeSlice is 0, 1, or # None. CrunchCube handles all other logic. The only 'smart' thing # about the handling here, is that the axes are increased for 3D cubes. # This way the 3Dness is hidden from the user and he still sees 2D # crosstabs, with col and row axes (0 and 1), which are transformed to # corresponding numbers in case of 3D cubes (namely 1 and 2). In the # case of None, we need to analyze across all valid dimensions, and the # CrunchCube takes care of that (no need to update axis if it's None). # If the user provides a tuple, it's considered that he "knows" what # he's doing, and the axis argument is not updated in this case. if isinstance(axis, int): axis += 1 return axis
def _calculate_correct_axis_for_cube(self, axis)
Return correct axis for cube, based on ndim. If cube has 3 dimensions, increase axis by 1. This will translate the default 0 (cols direction) and 1 (rows direction) to actual 1 (cols direction) and 2 (rows direction). This is needed because the 0th dimension of the 3D cube is only used to slice across. The actual margins need to be calculated for each slice separately, and since they're implemented as an ndarray, the direction needs to be increased by one. For the value of `None`, don't modify the axis parameter. :param axis: 0, 1, or None. Axis that will be passed to self._cube methods. If the cube is 3D, the axis is typically increased by 1, to represent correct measure direction. :returns: int or None, representing the updated axis to pass to cube
13.767962
12.409173
1.109499
expected_counts = expected_freq(counts) residuals = counts - expected_counts variance = ( np.outer(rowsum, colsum) * np.outer(total - rowsum, total - colsum) / total ** 3 ) return residuals / np.sqrt(variance)
def _scalar_type_std_res(self, counts, total, colsum, rowsum)
Return ndarray containing standard residuals for category values. The shape of the return value is the same as that of *counts*.
4.452149
3.974455
1.120191
means = [] table = self._slice.as_array() products = self._inner_prods(table, self.values) for axis, product in enumerate(products): if product is None: means.append(product) continue # Calculate means valid_indices = self._valid_indices(axis) num = np.sum(product[valid_indices], axis) den = np.sum(table[valid_indices], axis) mean = num / den if not isinstance(mean, np.ndarray): mean = np.array([mean]) means.append(mean) return means
def data(self)
list of mean numeric values of categorical responses.
4.327613
4.020605
1.076359
if self._slice.ndim < 2: msg = ( "Scale Means marginal cannot be calculated on 1D cubes, as" "the scale means already get reduced to a scalar value." ) raise ValueError(msg) dimension_index = 1 - axis margin = self._slice.margin(axis=axis) if len(margin.shape) > 1: index = [ 0 if d.dimension_type == DT.MR else slice(None) for d in self._slice.dimensions ] margin = margin[index] total = np.sum(margin) values = self.values[dimension_index] if values is None: return None return np.sum(values * margin) / total
def margin(self, axis)
Return marginal value of the current slice scaled means. This value is the the same what you would get from a single variable (constituting a 2D cube/slice), when the "non-missing" filter of the opposite variable would be applied. This behavior is consistent with what is visible in the front-end client.
6.170323
5.328402
1.158006
return [ ( np.array(dim.numeric_values) if (dim.numeric_values and any(~np.isnan(dim.numeric_values))) else None ) for dim in self._slice.dimensions ]
def values(self)
list of ndarray value-ids for each dimension in slice. The values for each dimension appear as an ndarray. None appears instead of the array for each dimension having only NaN values.
6.144017
5.038944
1.219306
if not isinstance(table, np.ma.core.MaskedArray): return table if table.ndim == 0: return table.data if table.ndim == 1: return np.ma.compressed(table) row_inds = ~table.mask.all(axis=1) col_inds = ~table.mask.all(axis=0) table = table[row_inds, :][:, col_inds] if table.dtype == float and table.mask.any(): table[table.mask] = np.nan return table
def compress_pruned(table)
Compress table based on pruning mask. Only the rows/cols in which all of the elements are masked need to be pruned.
2.415282
2.486671
0.971292
for dim, inds in enumerate(slice_.inserted_hs_indices()): if dim not in hs_dims: continue for i in inds: res = np.insert(res, i, np.nan, axis=(dim - slice_.ndim)) return res
def intersperse_hs_in_std_res(slice_, hs_dims, res)
Perform the insertions of place-holding rows and cols for insertions.
4.695099
4.515057
1.039876
if isinstance(sub_parameter, Scale): if unit_type == 'threshold_unit': for bracket in sub_parameter.brackets: threshold = bracket.children['threshold'] inflate_parameter_leaf(threshold, base_year, inflator) return else: # Remove new values for year > base_year kept_instants_str = [ parameter_at_instant.instant_str for parameter_at_instant in sub_parameter.values_list if periods.instant(parameter_at_instant.instant_str).year <= base_year ] if not kept_instants_str: return last_admissible_instant_str = max(kept_instants_str) sub_parameter.update( start = last_admissible_instant_str, value = sub_parameter(last_admissible_instant_str) ) restricted_to_base_year_value_list = [ parameter_at_instant for parameter_at_instant in sub_parameter.values_list if periods.instant(parameter_at_instant.instant_str).year == base_year ] # When value is changed in the base year if restricted_to_base_year_value_list: for parameter_at_instant in reversed(restricted_to_base_year_value_list): if parameter_at_instant.instant_str.startswith(str(base_year)): value = ( parameter_at_instant.value * (1 + inflator) if parameter_at_instant.value is not None else None ) sub_parameter.update( start = parameter_at_instant.instant_str.replace( str(base_year), str(base_year + 1) ), value = value, ) # Or use the value at that instant even when it is defined earlier tahn the base year else: value = ( sub_parameter("{}-12-31".format(base_year)) * (1 + inflator) if sub_parameter("{}-12-31".format(base_year)) is not None else None ) sub_parameter.update( start = "{}-01-01".format(base_year + 1), value = value )
def inflate_parameter_leaf(sub_parameter, base_year, inflator, unit_type = 'unit')
Inflate a Parameter leaf according to unit type Basic unit type are supposed by default Other admissible unit types are threshold_unit and rate_unit
2.943833
2.842006
1.035829
if use_baseline: assert self.baseline_simulation is not None, "self.baseline_simulation is None" simulation = self.baseline_simulation else: assert self.simulation is not None simulation = self.simulation tax_benefit_system = simulation.tax_benefit_system assert period is not None if not isinstance(period, periods.Period): period = periods.period(period) assert simulation is not None assert tax_benefit_system is not None assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable) period_size_independent = tax_benefit_system.get_variable(variable).is_period_size_independent definition_period = tax_benefit_system.get_variable(variable).definition_period if period_size_independent is False and definition_period != u'eternity': values = simulation.calculate_add(variable, period = period) elif period_size_independent is True and definition_period == u'month' and period.size_in_months > 1: values = simulation.calculate(variable, period = period.first_month) elif period_size_independent is True and definition_period == u'month' and period.size_in_months == 1: values = simulation.calculate(variable, period = period) elif period_size_independent is True and definition_period == u'year' and period.size_in_months > 12: values = simulation.calculate(variable, period = period.start.offset('first-of', 'year').period('year')) elif period_size_independent is True and definition_period == u'year' and period.size_in_months == 12: values = simulation.calculate(variable, period = period) elif period_size_independent is True and definition_period == u'year': values = simulation.calculate(variable, period = period.this_year) elif definition_period == u'eternity': values = simulation.calculate(variable, period = period) else: values = None assert values is not None, 'Unspecified calculation period for variable {}'.format(variable) return values
def calculate_variable(self, variable = None, period = None, use_baseline = False)
Compute and return the variable values for period and baseline or reform tax_benefit_system
2.503136
2.390462
1.047135
assert input_data_frame is not None assert simulation is not None id_variable_by_entity_key = self.id_variable_by_entity_key role_variable_by_entity_key = self.role_variable_by_entity_key used_as_input_variables = self.used_as_input_variables tax_benefit_system = simulation.tax_benefit_system variables = tax_benefit_system.variables id_variables = [ id_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] role_variables = [ role_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] log.debug('Variable used_as_input_variables in filter: \n {}'.format(used_as_input_variables)) unknown_columns = [] for column_name in input_data_frame: if column_name in id_variables + role_variables: continue if column_name not in variables: unknown_columns.append(column_name) input_data_frame.drop(column_name, axis = 1, inplace = True) if unknown_columns: log.debug('The following unknown columns {}, are dropped from input table'.format( sorted(unknown_columns))) used_columns = [] dropped_columns = [] for column_name in input_data_frame: if column_name in id_variables + role_variables: continue variable = variables[column_name] # Keeping the calculated variables that are initialized by the input data if variable.formulas: if column_name in used_as_input_variables: used_columns.append(column_name) continue dropped_columns.append(column_name) input_data_frame.drop(column_name, axis = 1, inplace = True) # # # if used_columns: log.debug( 'These columns are not dropped because present in used_as_input_variables:\n {}'.format( sorted(used_columns))) if dropped_columns: log.debug( 'These columns in survey are set to be calculated, we drop them from the input table:\n {}'.format( sorted(dropped_columns))) log.info('Keeping the following variables in the input_data_frame:\n {}'.format( sorted(list(input_data_frame.columns)))) return input_data_frame
def filter_input_variables(self, input_data_frame = None, simulation = None)
Filter the input data frame from variables that won't be used or are set to be computed
2.614572
2.57421
1.015679
'''Initialises a survey scenario from data. :param rebuild_input_data: Whether or not to clean, format and save data. Take a look at :func:`build_input_data` :param data: Contains the data, or metadata needed to know where to find it. ''' # When not ``None``, it'll try to get the data for *year*. if data is not None: data_year = data.get("data_year", self.year) if calibration_kwargs is not None: assert set(calibration_kwargs.keys()).issubset(set( ['target_margins_by_variable', 'parameters', 'total_population'])) if inflation_kwargs is not None: assert set(inflation_kwargs.keys()).issubset(set(['inflator_by_variable', 'target_by_variable'])) self._set_id_variable_by_entity_key() self._set_role_variable_by_entity_key() self._set_used_as_input_variables_by_entity() # When ``True`` it'll assume it is raw data and do all that described supra. # When ``False``, it'll assume data is ready for consumption. if rebuild_input_data: if rebuild_kwargs is not None: self.build_input_data(year = data_year, **rebuild_kwargs) else: self.build_input_data(year = data_year) debug = self.debug trace = self.trace # Inverting reform and baseline because we are more likely # to use baseline input in reform than the other way around if self.baseline_tax_benefit_system is not None: self.new_simulation(debug = debug, data = data, trace = trace, memory_config = memory_config, use_baseline = True) # Note that I can pass a :class:`pd.DataFrame` directly, if I don't want to rebuild the data. self.new_simulation(debug = debug, data = data, trace = trace, memory_config = memory_config) if calibration_kwargs: self.calibrate(**calibration_kwargs) if inflation_kwargs: self.inflate(**inflation_kwargs)
def init_from_data(self, calibration_kwargs = None, inflation_kwargs = None, rebuild_input_data = False, rebuild_kwargs = None, data = None, memory_config = None)
Initialises a survey scenario from data. :param rebuild_input_data: Whether or not to clean, format and save data. Take a look at :func:`build_input_data` :param data: Contains the data, or metadata needed to know where to find it.
5.696631
4.431943
1.285357
assert entity is not None assert input_data_frame is not None assert period is not None assert simulation is not None used_as_input_variables = self.used_as_input_variables_by_entity[entity] variables_mismatch = set(used_as_input_variables).difference(set(input_data_frame.columns)) if used_as_input_variables else None if variables_mismatch: log.info( 'The following variables are used as input variables are not present in the input data frame: \n {}'.format( sorted(variables_mismatch))) if variables_mismatch: log.debug('The following variables are used as input variables: \n {}'.format( sorted(used_as_input_variables))) log.debug('The input_data_frame contains the following variables: \n {}'.format( sorted(list(input_data_frame.columns)))) entity = simulation.entities[entity] id_variables = [ self.id_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] role_variables = [ self.role_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] if entity.is_person: for id_variable in id_variables + role_variables: assert id_variable in input_data_frame.columns, \ "Variable {} is not present in input dataframe".format(id_variable) input_data_frame = self.filter_input_variables(input_data_frame = input_data_frame, simulation = simulation) if entity.is_person: entity.count = entity.step_size = len(input_data_frame) for collective_entity in simulation.entities.values(): if collective_entity.is_person: continue _key = collective_entity.key _id_variable = self.id_variable_by_entity_key[_key] _role_variable = self.role_variable_by_entity_key[_key] collective_entity.count = len(input_data_frame[_id_variable].unique()) collective_entity.members_entity_id = input_data_frame[_id_variable].astype('int').values # TODO remove legacy use collective_entity.members_legacy_role = input_data_frame[_role_variable].astype('int').values for (legacy_role, flattened_role) in enumerate(collective_entity.flattened_roles): if legacy_role < len(collective_entity.flattened_roles): collective_entity.members_role = np.where( collective_entity.members_legacy_role == legacy_role, flattened_role, collective_entity.members_role, ) else: collective_entity.members_role = np.where( collective_entity.members_legacy_role >= len(collective_entity.flattened_roles), collective_entity.flattened_roles[-1], collective_entity.members_role, ) else: entity.count = entity.step_size = len(input_data_frame) for column_name, column_serie in input_data_frame.iteritems(): if column_name in (id_variables + role_variables): continue variable_instance = self.tax_benefit_system.variables.get(column_name) if variable_instance.entity.key != entity.key: log.info("Ignoring variable {} which is not part of entity {} but {}".format( column_name, entity.key, variable_instance.entity.key)) continue init_variable_in_entity( entity = entity, variable_name = column_name, series = column_serie, period = period, )
def init_entity(self, entity = None, input_data_frame = None, period = None, simulation = None)
Initialize the simulation period with current input_data_frame
2.378111
2.382457
0.998176
for variable_name, variable in tax_benefit_system.variables.items(): if variable.formulas: continue if self.used_as_input_variables and (variable_name in self.used_as_input_variables): continue if self.non_neutralizable_variables and (variable_name in self.non_neutralizable_variables): continue if self.weight_column_name_by_entity and (variable_name in self.weight_column_name_by_entity.values()): continue tax_benefit_system.neutralize_variable(variable_name)
def neutralize_variables(self, tax_benefit_system)
Neutralizing input variables not in input dataframe and keep some crucial variables
3.085436
2.896783
1.065125
assert tax_benefit_system is not None self.tax_benefit_system = tax_benefit_system if self.cache_blacklist is not None: self.tax_benefit_system.cache_blacklist = self.cache_blacklist if baseline_tax_benefit_system is not None: self.baseline_tax_benefit_system = baseline_tax_benefit_system if self.cache_blacklist is not None: self.baseline_tax_benefit_system.cache_blacklist = self.cache_blacklist
def set_tax_benefit_systems(self, tax_benefit_system = None, baseline_tax_benefit_system = None)
Set the tax and benefit system and eventually the baseline tax and benefit system
1.779652
1.742465
1.021341
'''Identify and set the good ids for the different entities''' if self.id_variable_by_entity_key is None: self.id_variable_by_entity_key = dict( (entity.key, entity.key + '_id') for entity in self.tax_benefit_system.entities) log.debug("Use default id_variable names:\n {}".format(self.id_variable_by_entity_key)) return self.id_variable_by_entity_key
def _set_id_variable_by_entity_key(self) -> Dict[str, str]
Identify and set the good ids for the different entities
4.988366
3.780956
1.31934
'''Identify and set the good roles for the different entities''' if self.role_variable_by_entity_key is None: self.role_variable_by_entity_key = dict( (entity.key, entity.key + '_legacy_role') for entity in self.tax_benefit_system.entities) return self.role_variable_by_entity_key
def _set_role_variable_by_entity_key(self) -> Dict[str, str]
Identify and set the good roles for the different entities
5.674668
4.041657
1.404045
'''Identify and set the good input variables for the different entities''' if self.used_as_input_variables_by_entity is not None: return tax_benefit_system = self.tax_benefit_system assert set(self.used_as_input_variables) <= set(tax_benefit_system.variables.keys()), \ "Some variables used as input variables are not part of the tax benefit system:\n {}".format( set(self.used_as_input_variables).difference(set(tax_benefit_system.variables.keys())) ) self.used_as_input_variables_by_entity = dict() for entity in tax_benefit_system.entities: self.used_as_input_variables_by_entity[entity.key] = [ variable for variable in self.used_as_input_variables if tax_benefit_system.get_variable(variable).entity == entity ] return self.used_as_input_variables_by_entity
def _set_used_as_input_variables_by_entity(self) -> Dict[str, List[str]]
Identify and set the good input variables for the different entities
2.674293
2.304962
1.160233
return tuple(d for d in self._all_dimensions if d.dimension_type != DT.MR_CAT)
def _dimensions(self)
tuple of dimension objects in this collection. This composed tuple is the source for the dimension objects in this collection.
21.484261
17.652842
1.217043
return ( Dimension(raw_dimension.dimension_dict, raw_dimension.dimension_type) for raw_dimension in self._raw_dimensions )
def _iter_dimensions(self)
Generate Dimension object for each dimension dict.
6.820291
4.524449
1.50743
return tuple( _RawDimension(dimension_dict, self._dimension_dicts) for dimension_dict in self._dimension_dicts )
def _raw_dimensions(self)
Sequence of _RawDimension objects wrapping each dimension dict.
7.309967
3.688987
1.981565
base_type = self._base_type if base_type == "categorical": return self._resolve_categorical() if base_type == "enum.variable": return self._resolve_array_type() if base_type == "enum.datetime": return DT.DATETIME if base_type == "enum.numeric": return DT.BINNED_NUMERIC if base_type == "enum.text": return DT.TEXT raise NotImplementedError("unrecognized dimension type %s" % base_type)
def dimension_type(self)
Return member of DIMENSION_TYPE appropriate to dimension_dict.
4.142323
3.928003
1.054562
type_class = self._dimension_dict["type"]["class"] if type_class == "categorical": return "categorical" if type_class == "enum": subclass = self._dimension_dict["type"]["subtype"]["class"] return "enum.%s" % subclass raise NotImplementedError("unexpected dimension type class '%s'" % type_class)
def _base_type(self)
Return str like 'enum.numeric' representing dimension type. This string is a 'type.subclass' concatenation of the str keys used to identify the dimension type in the cube response JSON. The '.subclass' suffix only appears where a subtype is present.
4.455284
3.454287
1.289784
dimension_dicts = self._dimension_dicts this_idx = dimension_dicts.index(self._dimension_dict) if this_idx > len(dimension_dicts) - 2: return None return _RawDimension(dimension_dicts[this_idx + 1], self._dimension_dicts)
def _next_raw_dimension(self)
_RawDimension for next *dimension_dict* in sequence or None for last. Returns None if this dimension is the last in sequence for this cube.
3.903221
3.101586
1.25846
next_raw_dimension = self._next_raw_dimension if next_raw_dimension is None: return DT.CA is_mr_subvar = ( next_raw_dimension._base_type == "categorical" and next_raw_dimension._has_selected_category and next_raw_dimension._alias == self._alias ) return DT.MR if is_mr_subvar else DT.CA
def _resolve_array_type(self)
Return one of the ARRAY_TYPES members of DIMENSION_TYPE. This method distinguishes between CA and MR dimensions. The return value is only meaningful if the dimension is known to be of array type (i.e. either CA or MR, base-type 'enum.variable').
6.861956
5.107459
1.343517
# ---an array categorical is either CA_CAT or MR_CAT--- if self._is_array_cat: return DT.MR_CAT if self._has_selected_category else DT.CA_CAT # ---what's left is logical or plain-old categorical--- return DT.LOGICAL if self._has_selected_category else DT.CAT
def _resolve_categorical(self)
Return one of the categorical members of DIMENSION_TYPE. This method distinguishes between CAT, CA_CAT, MR_CAT, and LOGICAL dimension types, all of which have the base type 'categorical'. The return value is only meaningful if the dimension is known to be one of the categorical types (has base-type 'categorical').
13.972817
8.458959
1.651837
if self.dimension_type in {DT.MR_CAT, DT.LOGICAL}: return () return tuple( (subtotal.anchor_idx, subtotal.addend_idxs) for subtotal in self._subtotals )
def hs_indices(self)
tuple of (anchor_idx, addend_idxs) pair for each subtotal. Example:: ( (2, (0, 1, 2)), (3, (3,)), ('bottom', (4, 5)) ) Note that the `anchor_idx` item in the first position of each pair can be 'top' or 'bottom' as well as an int. The `addend_idxs` tuple will always contains at least one index (a subtotal with no addends is ignored).
17.931236
9.821087
1.825789
# ---don't do H&S insertions for CA and MR subvar dimensions--- if self.dimension_type in DT.ARRAY_TYPES: return [] return [ idx for idx, item in enumerate( self._iter_interleaved_items(self.valid_elements) ) if item.is_insertion ]
def inserted_hs_indices(self)
list of int index of each inserted subtotal for the dimension. Each value represents the position of a subtotal in the interleaved sequence of elements and subtotals items.
19.923937
15.576268
1.279121
return self.dimension_type not in {DT.CA, DT.MR, DT.MR_CAT, DT.LOGICAL}
def is_marginable(self)
True if adding counts across this dimension axis is meaningful.
23.654371
15.011918
1.575706
# TODO: Having an alternate return type triggered by a flag-parameter # (`include_cat_ids` in this case) is poor practice. Using flags like # that effectively squashes what should be two methods into one. # Either get rid of the need for that alternate return value type or # create a separate method for it. elements = self.all_elements if include_missing else self.valid_elements include_subtotals = include_transforms and self.dimension_type != DT.CA_SUBVAR # ---items are elements or subtotals, interleaved in display order--- interleaved_items = tuple(self._iter_interleaved_items(elements)) labels = list( item.label for item in interleaved_items if include_subtotals or not item.is_insertion ) if include_cat_ids: element_ids = tuple( None if item.is_insertion else item.element_id for item in interleaved_items if include_subtotals or not item.is_insertion ) return list(zip(labels, element_ids)) return labels
def labels( self, include_missing=False, include_transforms=False, include_cat_ids=False )
Return list of str labels for the elements of this dimension. Returns a list of (label, element_id) pairs if *include_cat_ids* is True. The `element_id` value in the second position of the pair is None for subtotal items (which don't have an element-id).
7.862132
6.739735
1.166534
subtotals = self._subtotals for subtotal in subtotals.iter_for_anchor("top"): yield subtotal for element in elements: yield element for subtotal in subtotals.iter_for_anchor(element.element_id): yield subtotal for subtotal in subtotals.iter_for_anchor("bottom"): yield subtotal
def _iter_interleaved_items(self, elements)
Generate element or subtotal items in interleaved order. This ordering corresponds to how value "rows" (or columns) are to appear after subtotals have been inserted at their anchor locations. Where more than one subtotal is anchored to the same location, they appear in their document order in the cube response. Only elements in the passed *elements* collection appear, which allows control over whether missing elements are included by choosing `.all_elements` or `.valid_elements`.
3.350098
3.152176
1.062789
view = self._dimension_dict.get("references", {}).get("view", {}) # ---view can be both None and {}, thus the edge case.--- insertion_dicts = ( [] if view is None else view.get("transform", {}).get("insertions", []) ) return _Subtotals(insertion_dicts, self.valid_elements)
def _subtotals(self)
_Subtotals sequence object for this dimension. The subtotals sequence provides access to any subtotal insertions defined on this dimension.
16.764797
13.674681
1.225974
if self._type_dict["class"] == "categorical": return _Category, self._type_dict["categories"] return _Element, self._type_dict["elements"]
def _element_makings(self)
(ElementCls, element_dicts) pair for this dimension's elements. All the elements of a given dimension are the same type. This method determines the type (class) and source dicts for the elements of this dimension and provides them for the element factory.
7.871023
8.265862
0.952233
ElementCls, element_dicts = self._element_makings return tuple( ElementCls(element_dict, idx, element_dicts) for idx, element_dict in enumerate(element_dicts) )
def _elements(self)
Composed tuple storing actual sequence of element objects.
7.531737
6.437346
1.170007
numeric_value = self._element_dict.get("numeric_value") return np.nan if numeric_value is None else numeric_value
def numeric_value(self)
Numeric value assigned to element by user, np.nan if absent.
5.816726
3.389546
1.716078
value = self._element_dict.get("value") type_name = type(value).__name__ if type_name == "NoneType": return "" if type_name == "list": # ---like '10-15' or 'A-F'--- return "-".join([str(item) for item in value]) if type_name in ("float", "int"): return str(value) if type_name in ("str", "unicode"): return value # ---For CA and MR subvar dimensions--- name = value.get("references", {}).get("name") return name if name else ""
def label(self)
str display-name for this element, '' when absent from cube response. This property handles numeric, datetime and text variables, but also subvar dimensions
5.964522
4.998851
1.193179
return (subtotal for subtotal in self._subtotals if subtotal.anchor == anchor)
def iter_for_anchor(self, anchor)
Generate each subtotal having matching *anchor*.
7.744063
3.094155
2.502804
for insertion_dict in self._insertion_dicts: # ---skip any non-dicts--- if not isinstance(insertion_dict, dict): continue # ---skip any non-subtotal insertions--- if insertion_dict.get("function") != "subtotal": continue # ---skip any malformed subtotal-dicts--- if not {"anchor", "args", "name"}.issubset(insertion_dict.keys()): continue # ---skip if doesn't reference at least one non-missing element--- if not self._element_ids.intersection(insertion_dict["args"]): continue # ---an insertion-dict that successfully runs this gauntlet # ---is a valid subtotal dict yield insertion_dict
def _iter_valid_subtotal_dicts(self)
Generate each insertion dict that represents a valid subtotal.
6.123902
5.426286
1.128562
return tuple( _Subtotal(subtotal_dict, self.valid_elements) for subtotal_dict in self._iter_valid_subtotal_dicts() )
def _subtotals(self)
Composed tuple storing actual sequence of _Subtotal objects.
8.865394
5.932077
1.494484
anchor = self._subtotal_dict["anchor"] try: anchor = int(anchor) if anchor not in self.valid_elements.element_ids: return "bottom" return anchor except (TypeError, ValueError): return anchor.lower()
def anchor(self)
int or str indicating element under which to insert this subtotal. An int anchor is the id of the dimension element (category or subvariable) under which to place this subtotal. The return value can also be one of 'top' or 'bottom'. The return value defaults to 'bottom' for an anchor referring to an element that is no longer present in the dimension or an element that represents missing data.
9.434911
6.938154
1.359859
anchor = self.anchor if anchor in ["top", "bottom"]: return anchor return self.valid_elements.get_by_id(anchor).index_in_valids
def anchor_idx(self)
int or str representing index of anchor element in dimension. When the anchor is an operation, like 'top' or 'bottom'
12.241226
9.913991
1.234743
return tuple( arg for arg in self._subtotal_dict.get("args", []) if arg in self.valid_elements.element_ids )
def addend_ids(self)
tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded.
15.35349
11.156205
1.376229
return tuple( self.valid_elements.get_by_id(addend_id).index_in_valids for addend_id in self.addend_ids )
def addend_idxs(self)
tuple of int index of each addend element for this subtotal. The length of the tuple is the same as that for `.addend_ids`, but each value repesents the offset of that element within the dimension, rather than its element id.
6.784226
6.082621
1.115346
stata_files = [] sas_files = [] for root, subdirs, files in os.walk(directory_path): for file_name in files: file_path = os.path.join(root, file_name) if os.path.basename(file_name).endswith(".dta"): log.info("Found stata file {}".format(file_path)) stata_files.append(file_path) if os.path.basename(file_name).endswith(".sas7bdat"): log.info("Found sas file {}".format(file_path)) sas_files.append(file_path) return {'stata': stata_files, 'sas': sas_files}
def create_data_file_by_format(directory_path = None)
Browse subdirectories to extract stata and sas files
1.91983
1.658262
1.157736
array = self._as_array( include_missing=include_missing, weighted=weighted, include_transforms_for_dims=include_transforms_for_dims, ) # ---prune array if pruning was requested--- if prune: array = self._prune_body(array, transforms=include_transforms_for_dims) return self._drop_mr_cat_dims(array)
def as_array( self, include_missing=False, weighted=True, include_transforms_for_dims=None, prune=False, )
Return `ndarray` representing cube values. Returns the tabular representation of the crunch cube. The returned array has the same number of dimensions as the cube. E.g. for a cross-tab representation of a categorical and numerical variable, the resulting cube will have two dimensions. *include_missing* (bool): Include rows/cols for missing values. Example 1 (Categorical x Categorical):: >>> cube = CrunchCube(response) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) Example 2 (Categorical x Categorical, include missing values):: >>> cube = CrunchCube(response) >>> cube.as_array(include_missing=True) np.array([ [5, 3, 2, 0], [5, 2, 3, 0], [0, 0, 0, 0], ])
4.063844
5.183751
0.783958
return self._measures.weighted_n if weighted else self._measures.unweighted_n
def count(self, weighted=True)
Return numberic count of rows considered for cube response.
8.249582
7.118002
1.158974
if self.ndim < 3 and not ca_as_0th: return [CubeSlice(self, 0)] return [CubeSlice(self, i, ca_as_0th) for i, _ in enumerate(self.labels()[0])]
def get_slices(self, ca_as_0th=False)
Return list of :class:`.CubeSlice` objects. The number of slice objects in the returned list depends on the dimensionality of this cube. A 1D or 2D cube will return a list containing one slice object. A 3D cube will return a list of slices the same length as the first dimension.
4.935656
3.716521
1.328031
warnings.warn( "CrunchCube.index() is deprecated. Use CubeSlice.index_table().", DeprecationWarning, ) return Index.data(self, weighted, prune)
def index(self, weighted=True, prune=False)
Return cube index measurement. This function is deprecated. Use index_table from CubeSlice.
11.236129
5.365052
2.094319
if self.ndim == 2 and prune: # If pruning is applied, we need to subtract from the H&S indes # the number of pruned rows (cols) that come before that index. pruning_bases = [self._pruning_base(axis=i, hs_dims=[0, 1]) for i in [1, 0]] pruning_bases = [ base if base.ndim == 1 else np.sum(base, axis=(1 - i)) for i, base in enumerate(pruning_bases) ] # Obtain prune indices as subscripts prune_indices_list = [ np.arange(len(base))[np.logical_or(base == 0, np.isnan(base))] for base in pruning_bases ] inserted_indices_list = [dim.inserted_hs_indices for dim in self.dimensions] return self._adjust_inserted_indices( inserted_indices_list, prune_indices_list ) return [dim.inserted_hs_indices for dim in self.dimensions]
def inserted_hs_indices(self, prune=False)
Get indices of the inserted H&S (for formatting purposes).
4.590243
4.360991
1.052569
return self.ndim == 2 and set(self.dim_types) == {DT.CA_SUBVAR, DT.CA_CAT}
def is_univariate_ca(self)
True if cube only contains a CA dimension-pair, in either order.
11.485125
8.108836
1.416372
return [ dim.labels(include_missing, include_transforms_for_dims) for dim in self.dimensions ]
def labels(self, include_missing=False, include_transforms_for_dims=False)
Gets labels for each cube's dimension. Args include_missing (bool): Include labels for missing values Returns labels (list of lists): Labels for each dimension
3.606422
5.082134
0.709628