sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def intersperse_hs_in_std_res(slice_, hs_dims, res): """Perform the insertions of place-holding rows and cols for insertions.""" for dim, inds in enumerate(slice_.inserted_hs_indices()): if dim not in hs_dims: continue for i in inds: res = np.insert(res, i, np.nan, axis=(dim - slice_.ndim)) return res
Perform the insertions of place-holding rows and cols for insertions.
entailment
def inflate_parameter_leaf(sub_parameter, base_year, inflator, unit_type = 'unit'): """ Inflate a Parameter leaf according to unit type Basic unit type are supposed by default Other admissible unit types are threshold_unit and rate_unit """ if isinstance(sub_parameter, Scale): if unit_type == 'threshold_unit': for bracket in sub_parameter.brackets: threshold = bracket.children['threshold'] inflate_parameter_leaf(threshold, base_year, inflator) return else: # Remove new values for year > base_year kept_instants_str = [ parameter_at_instant.instant_str for parameter_at_instant in sub_parameter.values_list if periods.instant(parameter_at_instant.instant_str).year <= base_year ] if not kept_instants_str: return last_admissible_instant_str = max(kept_instants_str) sub_parameter.update( start = last_admissible_instant_str, value = sub_parameter(last_admissible_instant_str) ) restricted_to_base_year_value_list = [ parameter_at_instant for parameter_at_instant in sub_parameter.values_list if periods.instant(parameter_at_instant.instant_str).year == base_year ] # When value is changed in the base year if restricted_to_base_year_value_list: for parameter_at_instant in reversed(restricted_to_base_year_value_list): if parameter_at_instant.instant_str.startswith(str(base_year)): value = ( parameter_at_instant.value * (1 + inflator) if parameter_at_instant.value is not None else None ) sub_parameter.update( start = parameter_at_instant.instant_str.replace( str(base_year), str(base_year + 1) ), value = value, ) # Or use the value at that instant even when it is defined earlier tahn the base year else: value = ( sub_parameter("{}-12-31".format(base_year)) * (1 + inflator) if sub_parameter("{}-12-31".format(base_year)) is not None else None ) sub_parameter.update( start = "{}-01-01".format(base_year + 1), value = value )
Inflate a Parameter leaf according to unit type Basic unit type are supposed by default Other admissible unit types are threshold_unit and rate_unit
entailment
def calculate_variable(self, variable = None, period = None, use_baseline = False): """ Compute and return the variable values for period and baseline or reform tax_benefit_system """ if use_baseline: assert self.baseline_simulation is not None, "self.baseline_simulation is None" simulation = self.baseline_simulation else: assert self.simulation is not None simulation = self.simulation tax_benefit_system = simulation.tax_benefit_system assert period is not None if not isinstance(period, periods.Period): period = periods.period(period) assert simulation is not None assert tax_benefit_system is not None assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable) period_size_independent = tax_benefit_system.get_variable(variable).is_period_size_independent definition_period = tax_benefit_system.get_variable(variable).definition_period if period_size_independent is False and definition_period != u'eternity': values = simulation.calculate_add(variable, period = period) elif period_size_independent is True and definition_period == u'month' and period.size_in_months > 1: values = simulation.calculate(variable, period = period.first_month) elif period_size_independent is True and definition_period == u'month' and period.size_in_months == 1: values = simulation.calculate(variable, period = period) elif period_size_independent is True and definition_period == u'year' and period.size_in_months > 12: values = simulation.calculate(variable, period = period.start.offset('first-of', 'year').period('year')) elif period_size_independent is True and definition_period == u'year' and period.size_in_months == 12: values = simulation.calculate(variable, period = period) elif period_size_independent is True and definition_period == u'year': values = simulation.calculate(variable, period = period.this_year) elif definition_period == u'eternity': values = simulation.calculate(variable, period = period) else: values = None assert values is not None, 'Unspecified calculation period for variable {}'.format(variable) return values
Compute and return the variable values for period and baseline or reform tax_benefit_system
entailment
def filter_input_variables(self, input_data_frame = None, simulation = None): """ Filter the input data frame from variables that won't be used or are set to be computed """ assert input_data_frame is not None assert simulation is not None id_variable_by_entity_key = self.id_variable_by_entity_key role_variable_by_entity_key = self.role_variable_by_entity_key used_as_input_variables = self.used_as_input_variables tax_benefit_system = simulation.tax_benefit_system variables = tax_benefit_system.variables id_variables = [ id_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] role_variables = [ role_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] log.debug('Variable used_as_input_variables in filter: \n {}'.format(used_as_input_variables)) unknown_columns = [] for column_name in input_data_frame: if column_name in id_variables + role_variables: continue if column_name not in variables: unknown_columns.append(column_name) input_data_frame.drop(column_name, axis = 1, inplace = True) if unknown_columns: log.debug('The following unknown columns {}, are dropped from input table'.format( sorted(unknown_columns))) used_columns = [] dropped_columns = [] for column_name in input_data_frame: if column_name in id_variables + role_variables: continue variable = variables[column_name] # Keeping the calculated variables that are initialized by the input data if variable.formulas: if column_name in used_as_input_variables: used_columns.append(column_name) continue dropped_columns.append(column_name) input_data_frame.drop(column_name, axis = 1, inplace = True) # # # if used_columns: log.debug( 'These columns are not dropped because present in used_as_input_variables:\n {}'.format( sorted(used_columns))) if dropped_columns: log.debug( 'These columns in survey are set to be calculated, we drop them from the input table:\n {}'.format( sorted(dropped_columns))) log.info('Keeping the following variables in the input_data_frame:\n {}'.format( sorted(list(input_data_frame.columns)))) return input_data_frame
Filter the input data frame from variables that won't be used or are set to be computed
entailment
def init_from_data(self, calibration_kwargs = None, inflation_kwargs = None, rebuild_input_data = False, rebuild_kwargs = None, data = None, memory_config = None): '''Initialises a survey scenario from data. :param rebuild_input_data: Whether or not to clean, format and save data. Take a look at :func:`build_input_data` :param data: Contains the data, or metadata needed to know where to find it. ''' # When not ``None``, it'll try to get the data for *year*. if data is not None: data_year = data.get("data_year", self.year) if calibration_kwargs is not None: assert set(calibration_kwargs.keys()).issubset(set( ['target_margins_by_variable', 'parameters', 'total_population'])) if inflation_kwargs is not None: assert set(inflation_kwargs.keys()).issubset(set(['inflator_by_variable', 'target_by_variable'])) self._set_id_variable_by_entity_key() self._set_role_variable_by_entity_key() self._set_used_as_input_variables_by_entity() # When ``True`` it'll assume it is raw data and do all that described supra. # When ``False``, it'll assume data is ready for consumption. if rebuild_input_data: if rebuild_kwargs is not None: self.build_input_data(year = data_year, **rebuild_kwargs) else: self.build_input_data(year = data_year) debug = self.debug trace = self.trace # Inverting reform and baseline because we are more likely # to use baseline input in reform than the other way around if self.baseline_tax_benefit_system is not None: self.new_simulation(debug = debug, data = data, trace = trace, memory_config = memory_config, use_baseline = True) # Note that I can pass a :class:`pd.DataFrame` directly, if I don't want to rebuild the data. self.new_simulation(debug = debug, data = data, trace = trace, memory_config = memory_config) if calibration_kwargs: self.calibrate(**calibration_kwargs) if inflation_kwargs: self.inflate(**inflation_kwargs)
Initialises a survey scenario from data. :param rebuild_input_data: Whether or not to clean, format and save data. Take a look at :func:`build_input_data` :param data: Contains the data, or metadata needed to know where to find it.
entailment
def init_entity(self, entity = None, input_data_frame = None, period = None, simulation = None): """ Initialize the simulation period with current input_data_frame """ assert entity is not None assert input_data_frame is not None assert period is not None assert simulation is not None used_as_input_variables = self.used_as_input_variables_by_entity[entity] variables_mismatch = set(used_as_input_variables).difference(set(input_data_frame.columns)) if used_as_input_variables else None if variables_mismatch: log.info( 'The following variables are used as input variables are not present in the input data frame: \n {}'.format( sorted(variables_mismatch))) if variables_mismatch: log.debug('The following variables are used as input variables: \n {}'.format( sorted(used_as_input_variables))) log.debug('The input_data_frame contains the following variables: \n {}'.format( sorted(list(input_data_frame.columns)))) entity = simulation.entities[entity] id_variables = [ self.id_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] role_variables = [ self.role_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] if entity.is_person: for id_variable in id_variables + role_variables: assert id_variable in input_data_frame.columns, \ "Variable {} is not present in input dataframe".format(id_variable) input_data_frame = self.filter_input_variables(input_data_frame = input_data_frame, simulation = simulation) if entity.is_person: entity.count = entity.step_size = len(input_data_frame) for collective_entity in simulation.entities.values(): if collective_entity.is_person: continue _key = collective_entity.key _id_variable = self.id_variable_by_entity_key[_key] _role_variable = self.role_variable_by_entity_key[_key] collective_entity.count = len(input_data_frame[_id_variable].unique()) collective_entity.members_entity_id = input_data_frame[_id_variable].astype('int').values # TODO remove legacy use collective_entity.members_legacy_role = input_data_frame[_role_variable].astype('int').values for (legacy_role, flattened_role) in enumerate(collective_entity.flattened_roles): if legacy_role < len(collective_entity.flattened_roles): collective_entity.members_role = np.where( collective_entity.members_legacy_role == legacy_role, flattened_role, collective_entity.members_role, ) else: collective_entity.members_role = np.where( collective_entity.members_legacy_role >= len(collective_entity.flattened_roles), collective_entity.flattened_roles[-1], collective_entity.members_role, ) else: entity.count = entity.step_size = len(input_data_frame) for column_name, column_serie in input_data_frame.iteritems(): if column_name in (id_variables + role_variables): continue variable_instance = self.tax_benefit_system.variables.get(column_name) if variable_instance.entity.key != entity.key: log.info("Ignoring variable {} which is not part of entity {} but {}".format( column_name, entity.key, variable_instance.entity.key)) continue init_variable_in_entity( entity = entity, variable_name = column_name, series = column_serie, period = period, )
Initialize the simulation period with current input_data_frame
entailment
def init_simulation_with_data_frame(self, input_data_frame = None, period = None, simulation = None, entity = None): """ Initialize the simulation period with current input_data_frame for an entity if specified """ assert input_data_frame is not None assert period is not None assert simulation is not None used_as_input_variables = self.used_as_input_variables id_variable_by_entity_key = self.id_variable_by_entity_key role_variable_by_entity_key = self.role_variable_by_entity_key variables_mismatch = set(used_as_input_variables).difference(set(input_data_frame.columns)) if variables_mismatch: log.info( 'The following variables used as input variables are not present in the input data frame: \n {}'.format( sorted(variables_mismatch))) if variables_mismatch: log.debug('The following variables are used as input variables: \n {}'.format( sorted(used_as_input_variables))) log.debug('The input_data_frame contains the following variables: \n {}'.format( sorted(list(input_data_frame.columns)))) id_variables = [ id_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] role_variables = [ role_variable_by_entity_key[_entity.key] for _entity in simulation.entities.values() if not _entity.is_person] for id_variable in id_variables + role_variables: entity_key = entity.key if entity is not None else None if (entity_key is not None) and (not simulation.entities[entity].is_person): assert id_variable in [id_variable_by_entity_key[entity], role_variable_by_entity_key[entity]], \ "variable {} for entity {} is not valid (not {} nor {})".format( id_variable, entity_key, id_variable_by_entity_key[entity_key], role_variable_by_entity_key[entity_key], ) continue assert id_variable in input_data_frame.columns, \ "Variable {} is not present in input dataframe".format(id_variable) input_data_frame = self.filter_input_variables(input_data_frame = input_data_frame, simulation = simulation) index_by_entity_key = dict() for key, entity in simulation.entities.items(): if entity.is_person: entity.count = entity.step_size = len(input_data_frame) else: entity.count = entity.step_size = \ (input_data_frame[role_variable_by_entity_key[key]] == 0).sum() unique_ids_count = len(input_data_frame[id_variable_by_entity_key[key]].unique()) assert entity.count == unique_ids_count, \ "There are {0} person of role 0 in {1} but {2} {1}".format( entity.count, entity.key, unique_ids_count) entity.members_entity_id = input_data_frame[id_variable_by_entity_key[key]].astype('int').values entity.members_legacy_role = input_data_frame[role_variable_by_entity_key[key]].astype('int').values index_by_entity_key[entity.key] = input_data_frame.loc[ input_data_frame[role_variable_by_entity_key[entity.key]] == 0, id_variable_by_entity_key[key] ].sort_values().index for column_name, column_serie in input_data_frame.iteritems(): if role_variable_by_entity_key is not None: if column_name in role_variable_by_entity_key.values(): continue if id_variable_by_entity_key is not None: if column_name in id_variable_by_entity_key.values(): continue entity = simulation.get_variable_entity(column_name) if entity.is_person: init_variable_in_entity(entity, column_name, column_serie, period) else: init_variable_in_entity(entity, column_name, column_serie[index_by_entity_key[entity.key]], period)
Initialize the simulation period with current input_data_frame for an entity if specified
entailment
def neutralize_variables(self, tax_benefit_system): """ Neutralizing input variables not in input dataframe and keep some crucial variables """ for variable_name, variable in tax_benefit_system.variables.items(): if variable.formulas: continue if self.used_as_input_variables and (variable_name in self.used_as_input_variables): continue if self.non_neutralizable_variables and (variable_name in self.non_neutralizable_variables): continue if self.weight_column_name_by_entity and (variable_name in self.weight_column_name_by_entity.values()): continue tax_benefit_system.neutralize_variable(variable_name)
Neutralizing input variables not in input dataframe and keep some crucial variables
entailment
def set_tax_benefit_systems(self, tax_benefit_system = None, baseline_tax_benefit_system = None): """ Set the tax and benefit system and eventually the baseline tax and benefit system """ assert tax_benefit_system is not None self.tax_benefit_system = tax_benefit_system if self.cache_blacklist is not None: self.tax_benefit_system.cache_blacklist = self.cache_blacklist if baseline_tax_benefit_system is not None: self.baseline_tax_benefit_system = baseline_tax_benefit_system if self.cache_blacklist is not None: self.baseline_tax_benefit_system.cache_blacklist = self.cache_blacklist
Set the tax and benefit system and eventually the baseline tax and benefit system
entailment
def summarize_variable(self, variable = None, use_baseline = False, weighted = False, force_compute = False): """ Prints a summary of a variable including its memory usage. :param string variable: the variable being summarized :param bool use_baseline: the tax-benefit-system considered :param bool weighted: whether the produced statistics should be weigthted or not :param bool force_compute: whether the computation of the variable should be forced Example: >>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario >>> survey_scenario = create_randomly_initialized_survey_scenario() >>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True) <BLANKLINE> housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B Details: 2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%). >>> survey_scenario.summarize_variable(variable = "rent", force_compute = True) <BLANKLINE> rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B Details: 2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 """ if use_baseline: simulation = self.baseline_simulation else: simulation = self.simulation tax_benefit_system = simulation.tax_benefit_system assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable) variable_instance = tax_benefit_system.variables[variable] default_value = variable_instance.default_value value_type = variable_instance.value_type if weighted: weight_variable = self.weight_column_name_by_entity[variable_instance.entity.key] weights = simulation.calculate(weight_variable, simulation.period) infos = simulation.get_memory_usage(variables = [variable])['by_variable'].get(variable) if not infos: if force_compute: self.calculate_variable(variable = variable, period = simulation.period, use_baseline = use_baseline) self.summarize_variable(variable = variable, use_baseline = use_baseline, weighted = weighted) return else: print("{} is not computed yet. Use keyword argument force_compute = True".format(variable)) return header_line = "{}: {} periods * {} cells * item size {} ({}, default = {}) = {}".format( variable, infos['nb_arrays'], infos['nb_cells_by_array'], infos['cell_size'], infos['dtype'], default_value, humanize.naturalsize(infos['total_nb_bytes'], gnu = True), ) print("") print(header_line) print("Details:") holder = simulation.get_holder(variable) if holder is not None: if holder.variable.definition_period == ETERNITY: array = holder.get_array(ETERNITY) print("permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}".format( array.mean() if not weighted else np.average(array, weights = weights), array.min(), array.max(), np.median(array), ( (array == default_value).sum() / len(array) if not weighted else ((array == default_value) * weights).sum() / weights.sum() ) )) else: for period in sorted(holder.get_known_periods()): array = holder.get_array(period) if array.shape == (): print("{}: always = {}".format(period, array)) continue if value_type == Enum: possible_values = variable_instance.possible_values categories_by_index = dict(zip( range(len(possible_values._member_names_)), possible_values._member_names_ )) categories_type = pd.api.types.CategoricalDtype(categories = possible_values._member_names_, ordered = True) df = pd.DataFrame({variable: array}).replace(categories_by_index).astype(categories_type) df['weights'] = weights if weighted else 1 groupby = df.groupby(variable)['weights'].sum() total = groupby.sum() expr = [" {} = {:.2e} ({:.1%})".format(index, row, row / total) for index, row in groupby.iteritems()] print("{}:{}.".format(period, ",".join(expr))) continue print("{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}".format( period, array.mean() if not weighted else np.average(array, weights = weights), array.min(), array.max(), array.sum() if not weighted else np.sum(array * weights), ( (array == default_value).sum() / len(array) if not weighted else ((array == default_value) * weights).sum() / weights.sum() ), np.median(array), ))
Prints a summary of a variable including its memory usage. :param string variable: the variable being summarized :param bool use_baseline: the tax-benefit-system considered :param bool weighted: whether the produced statistics should be weigthted or not :param bool force_compute: whether the computation of the variable should be forced Example: >>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario >>> survey_scenario = create_randomly_initialized_survey_scenario() >>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True) <BLANKLINE> housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B Details: 2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%). >>> survey_scenario.summarize_variable(variable = "rent", force_compute = True) <BLANKLINE> rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B Details: 2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
entailment
def _set_id_variable_by_entity_key(self) -> Dict[str, str]: '''Identify and set the good ids for the different entities''' if self.id_variable_by_entity_key is None: self.id_variable_by_entity_key = dict( (entity.key, entity.key + '_id') for entity in self.tax_benefit_system.entities) log.debug("Use default id_variable names:\n {}".format(self.id_variable_by_entity_key)) return self.id_variable_by_entity_key
Identify and set the good ids for the different entities
entailment
def _set_role_variable_by_entity_key(self) -> Dict[str, str]: '''Identify and set the good roles for the different entities''' if self.role_variable_by_entity_key is None: self.role_variable_by_entity_key = dict( (entity.key, entity.key + '_legacy_role') for entity in self.tax_benefit_system.entities) return self.role_variable_by_entity_key
Identify and set the good roles for the different entities
entailment
def _set_used_as_input_variables_by_entity(self) -> Dict[str, List[str]]: '''Identify and set the good input variables for the different entities''' if self.used_as_input_variables_by_entity is not None: return tax_benefit_system = self.tax_benefit_system assert set(self.used_as_input_variables) <= set(tax_benefit_system.variables.keys()), \ "Some variables used as input variables are not part of the tax benefit system:\n {}".format( set(self.used_as_input_variables).difference(set(tax_benefit_system.variables.keys())) ) self.used_as_input_variables_by_entity = dict() for entity in tax_benefit_system.entities: self.used_as_input_variables_by_entity[entity.key] = [ variable for variable in self.used_as_input_variables if tax_benefit_system.get_variable(variable).entity == entity ] return self.used_as_input_variables_by_entity
Identify and set the good input variables for the different entities
entailment
def _dimensions(self): """tuple of dimension objects in this collection. This composed tuple is the source for the dimension objects in this collection. """ return tuple(d for d in self._all_dimensions if d.dimension_type != DT.MR_CAT)
tuple of dimension objects in this collection. This composed tuple is the source for the dimension objects in this collection.
entailment
def _iter_dimensions(self): """Generate Dimension object for each dimension dict.""" return ( Dimension(raw_dimension.dimension_dict, raw_dimension.dimension_type) for raw_dimension in self._raw_dimensions )
Generate Dimension object for each dimension dict.
entailment
def _raw_dimensions(self): """Sequence of _RawDimension objects wrapping each dimension dict.""" return tuple( _RawDimension(dimension_dict, self._dimension_dicts) for dimension_dict in self._dimension_dicts )
Sequence of _RawDimension objects wrapping each dimension dict.
entailment
def dimension_type(self): """Return member of DIMENSION_TYPE appropriate to dimension_dict.""" base_type = self._base_type if base_type == "categorical": return self._resolve_categorical() if base_type == "enum.variable": return self._resolve_array_type() if base_type == "enum.datetime": return DT.DATETIME if base_type == "enum.numeric": return DT.BINNED_NUMERIC if base_type == "enum.text": return DT.TEXT raise NotImplementedError("unrecognized dimension type %s" % base_type)
Return member of DIMENSION_TYPE appropriate to dimension_dict.
entailment
def _base_type(self): """Return str like 'enum.numeric' representing dimension type. This string is a 'type.subclass' concatenation of the str keys used to identify the dimension type in the cube response JSON. The '.subclass' suffix only appears where a subtype is present. """ type_class = self._dimension_dict["type"]["class"] if type_class == "categorical": return "categorical" if type_class == "enum": subclass = self._dimension_dict["type"]["subtype"]["class"] return "enum.%s" % subclass raise NotImplementedError("unexpected dimension type class '%s'" % type_class)
Return str like 'enum.numeric' representing dimension type. This string is a 'type.subclass' concatenation of the str keys used to identify the dimension type in the cube response JSON. The '.subclass' suffix only appears where a subtype is present.
entailment
def _next_raw_dimension(self): """_RawDimension for next *dimension_dict* in sequence or None for last. Returns None if this dimension is the last in sequence for this cube. """ dimension_dicts = self._dimension_dicts this_idx = dimension_dicts.index(self._dimension_dict) if this_idx > len(dimension_dicts) - 2: return None return _RawDimension(dimension_dicts[this_idx + 1], self._dimension_dicts)
_RawDimension for next *dimension_dict* in sequence or None for last. Returns None if this dimension is the last in sequence for this cube.
entailment
def _resolve_array_type(self): """Return one of the ARRAY_TYPES members of DIMENSION_TYPE. This method distinguishes between CA and MR dimensions. The return value is only meaningful if the dimension is known to be of array type (i.e. either CA or MR, base-type 'enum.variable'). """ next_raw_dimension = self._next_raw_dimension if next_raw_dimension is None: return DT.CA is_mr_subvar = ( next_raw_dimension._base_type == "categorical" and next_raw_dimension._has_selected_category and next_raw_dimension._alias == self._alias ) return DT.MR if is_mr_subvar else DT.CA
Return one of the ARRAY_TYPES members of DIMENSION_TYPE. This method distinguishes between CA and MR dimensions. The return value is only meaningful if the dimension is known to be of array type (i.e. either CA or MR, base-type 'enum.variable').
entailment
def _resolve_categorical(self): """Return one of the categorical members of DIMENSION_TYPE. This method distinguishes between CAT, CA_CAT, MR_CAT, and LOGICAL dimension types, all of which have the base type 'categorical'. The return value is only meaningful if the dimension is known to be one of the categorical types (has base-type 'categorical'). """ # ---an array categorical is either CA_CAT or MR_CAT--- if self._is_array_cat: return DT.MR_CAT if self._has_selected_category else DT.CA_CAT # ---what's left is logical or plain-old categorical--- return DT.LOGICAL if self._has_selected_category else DT.CAT
Return one of the categorical members of DIMENSION_TYPE. This method distinguishes between CAT, CA_CAT, MR_CAT, and LOGICAL dimension types, all of which have the base type 'categorical'. The return value is only meaningful if the dimension is known to be one of the categorical types (has base-type 'categorical').
entailment
def hs_indices(self): """tuple of (anchor_idx, addend_idxs) pair for each subtotal. Example:: ( (2, (0, 1, 2)), (3, (3,)), ('bottom', (4, 5)) ) Note that the `anchor_idx` item in the first position of each pair can be 'top' or 'bottom' as well as an int. The `addend_idxs` tuple will always contains at least one index (a subtotal with no addends is ignored). """ if self.dimension_type in {DT.MR_CAT, DT.LOGICAL}: return () return tuple( (subtotal.anchor_idx, subtotal.addend_idxs) for subtotal in self._subtotals )
tuple of (anchor_idx, addend_idxs) pair for each subtotal. Example:: ( (2, (0, 1, 2)), (3, (3,)), ('bottom', (4, 5)) ) Note that the `anchor_idx` item in the first position of each pair can be 'top' or 'bottom' as well as an int. The `addend_idxs` tuple will always contains at least one index (a subtotal with no addends is ignored).
entailment
def inserted_hs_indices(self): """list of int index of each inserted subtotal for the dimension. Each value represents the position of a subtotal in the interleaved sequence of elements and subtotals items. """ # ---don't do H&S insertions for CA and MR subvar dimensions--- if self.dimension_type in DT.ARRAY_TYPES: return [] return [ idx for idx, item in enumerate( self._iter_interleaved_items(self.valid_elements) ) if item.is_insertion ]
list of int index of each inserted subtotal for the dimension. Each value represents the position of a subtotal in the interleaved sequence of elements and subtotals items.
entailment
def is_marginable(self): """True if adding counts across this dimension axis is meaningful.""" return self.dimension_type not in {DT.CA, DT.MR, DT.MR_CAT, DT.LOGICAL}
True if adding counts across this dimension axis is meaningful.
entailment
def labels( self, include_missing=False, include_transforms=False, include_cat_ids=False ): """Return list of str labels for the elements of this dimension. Returns a list of (label, element_id) pairs if *include_cat_ids* is True. The `element_id` value in the second position of the pair is None for subtotal items (which don't have an element-id). """ # TODO: Having an alternate return type triggered by a flag-parameter # (`include_cat_ids` in this case) is poor practice. Using flags like # that effectively squashes what should be two methods into one. # Either get rid of the need for that alternate return value type or # create a separate method for it. elements = self.all_elements if include_missing else self.valid_elements include_subtotals = include_transforms and self.dimension_type != DT.CA_SUBVAR # ---items are elements or subtotals, interleaved in display order--- interleaved_items = tuple(self._iter_interleaved_items(elements)) labels = list( item.label for item in interleaved_items if include_subtotals or not item.is_insertion ) if include_cat_ids: element_ids = tuple( None if item.is_insertion else item.element_id for item in interleaved_items if include_subtotals or not item.is_insertion ) return list(zip(labels, element_ids)) return labels
Return list of str labels for the elements of this dimension. Returns a list of (label, element_id) pairs if *include_cat_ids* is True. The `element_id` value in the second position of the pair is None for subtotal items (which don't have an element-id).
entailment
def _iter_interleaved_items(self, elements): """Generate element or subtotal items in interleaved order. This ordering corresponds to how value "rows" (or columns) are to appear after subtotals have been inserted at their anchor locations. Where more than one subtotal is anchored to the same location, they appear in their document order in the cube response. Only elements in the passed *elements* collection appear, which allows control over whether missing elements are included by choosing `.all_elements` or `.valid_elements`. """ subtotals = self._subtotals for subtotal in subtotals.iter_for_anchor("top"): yield subtotal for element in elements: yield element for subtotal in subtotals.iter_for_anchor(element.element_id): yield subtotal for subtotal in subtotals.iter_for_anchor("bottom"): yield subtotal
Generate element or subtotal items in interleaved order. This ordering corresponds to how value "rows" (or columns) are to appear after subtotals have been inserted at their anchor locations. Where more than one subtotal is anchored to the same location, they appear in their document order in the cube response. Only elements in the passed *elements* collection appear, which allows control over whether missing elements are included by choosing `.all_elements` or `.valid_elements`.
entailment
def _subtotals(self): """_Subtotals sequence object for this dimension. The subtotals sequence provides access to any subtotal insertions defined on this dimension. """ view = self._dimension_dict.get("references", {}).get("view", {}) # ---view can be both None and {}, thus the edge case.--- insertion_dicts = ( [] if view is None else view.get("transform", {}).get("insertions", []) ) return _Subtotals(insertion_dicts, self.valid_elements)
_Subtotals sequence object for this dimension. The subtotals sequence provides access to any subtotal insertions defined on this dimension.
entailment
def _element_makings(self): """(ElementCls, element_dicts) pair for this dimension's elements. All the elements of a given dimension are the same type. This method determines the type (class) and source dicts for the elements of this dimension and provides them for the element factory. """ if self._type_dict["class"] == "categorical": return _Category, self._type_dict["categories"] return _Element, self._type_dict["elements"]
(ElementCls, element_dicts) pair for this dimension's elements. All the elements of a given dimension are the same type. This method determines the type (class) and source dicts for the elements of this dimension and provides them for the element factory.
entailment
def _elements(self): """Composed tuple storing actual sequence of element objects.""" ElementCls, element_dicts = self._element_makings return tuple( ElementCls(element_dict, idx, element_dicts) for idx, element_dict in enumerate(element_dicts) )
Composed tuple storing actual sequence of element objects.
entailment
def numeric_value(self): """Numeric value assigned to element by user, np.nan if absent.""" numeric_value = self._element_dict.get("numeric_value") return np.nan if numeric_value is None else numeric_value
Numeric value assigned to element by user, np.nan if absent.
entailment
def label(self): """str display-name for this element, '' when absent from cube response. This property handles numeric, datetime and text variables, but also subvar dimensions """ value = self._element_dict.get("value") type_name = type(value).__name__ if type_name == "NoneType": return "" if type_name == "list": # ---like '10-15' or 'A-F'--- return "-".join([str(item) for item in value]) if type_name in ("float", "int"): return str(value) if type_name in ("str", "unicode"): return value # ---For CA and MR subvar dimensions--- name = value.get("references", {}).get("name") return name if name else ""
str display-name for this element, '' when absent from cube response. This property handles numeric, datetime and text variables, but also subvar dimensions
entailment
def iter_for_anchor(self, anchor): """Generate each subtotal having matching *anchor*.""" return (subtotal for subtotal in self._subtotals if subtotal.anchor == anchor)
Generate each subtotal having matching *anchor*.
entailment
def _iter_valid_subtotal_dicts(self): """Generate each insertion dict that represents a valid subtotal.""" for insertion_dict in self._insertion_dicts: # ---skip any non-dicts--- if not isinstance(insertion_dict, dict): continue # ---skip any non-subtotal insertions--- if insertion_dict.get("function") != "subtotal": continue # ---skip any malformed subtotal-dicts--- if not {"anchor", "args", "name"}.issubset(insertion_dict.keys()): continue # ---skip if doesn't reference at least one non-missing element--- if not self._element_ids.intersection(insertion_dict["args"]): continue # ---an insertion-dict that successfully runs this gauntlet # ---is a valid subtotal dict yield insertion_dict
Generate each insertion dict that represents a valid subtotal.
entailment
def _subtotals(self): """Composed tuple storing actual sequence of _Subtotal objects.""" return tuple( _Subtotal(subtotal_dict, self.valid_elements) for subtotal_dict in self._iter_valid_subtotal_dicts() )
Composed tuple storing actual sequence of _Subtotal objects.
entailment
def anchor(self): """int or str indicating element under which to insert this subtotal. An int anchor is the id of the dimension element (category or subvariable) under which to place this subtotal. The return value can also be one of 'top' or 'bottom'. The return value defaults to 'bottom' for an anchor referring to an element that is no longer present in the dimension or an element that represents missing data. """ anchor = self._subtotal_dict["anchor"] try: anchor = int(anchor) if anchor not in self.valid_elements.element_ids: return "bottom" return anchor except (TypeError, ValueError): return anchor.lower()
int or str indicating element under which to insert this subtotal. An int anchor is the id of the dimension element (category or subvariable) under which to place this subtotal. The return value can also be one of 'top' or 'bottom'. The return value defaults to 'bottom' for an anchor referring to an element that is no longer present in the dimension or an element that represents missing data.
entailment
def anchor_idx(self): """int or str representing index of anchor element in dimension. When the anchor is an operation, like 'top' or 'bottom' """ anchor = self.anchor if anchor in ["top", "bottom"]: return anchor return self.valid_elements.get_by_id(anchor).index_in_valids
int or str representing index of anchor element in dimension. When the anchor is an operation, like 'top' or 'bottom'
entailment
def addend_ids(self): """tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded. """ return tuple( arg for arg in self._subtotal_dict.get("args", []) if arg in self.valid_elements.element_ids )
tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded.
entailment
def addend_idxs(self): """tuple of int index of each addend element for this subtotal. The length of the tuple is the same as that for `.addend_ids`, but each value repesents the offset of that element within the dimension, rather than its element id. """ return tuple( self.valid_elements.get_by_id(addend_id).index_in_valids for addend_id in self.addend_ids )
tuple of int index of each addend element for this subtotal. The length of the tuple is the same as that for `.addend_ids`, but each value repesents the offset of that element within the dimension, rather than its element id.
entailment
def create_data_file_by_format(directory_path = None): """ Browse subdirectories to extract stata and sas files """ stata_files = [] sas_files = [] for root, subdirs, files in os.walk(directory_path): for file_name in files: file_path = os.path.join(root, file_name) if os.path.basename(file_name).endswith(".dta"): log.info("Found stata file {}".format(file_path)) stata_files.append(file_path) if os.path.basename(file_name).endswith(".sas7bdat"): log.info("Found sas file {}".format(file_path)) sas_files.append(file_path) return {'stata': stata_files, 'sas': sas_files}
Browse subdirectories to extract stata and sas files
entailment
def as_array( self, include_missing=False, weighted=True, include_transforms_for_dims=None, prune=False, ): """Return `ndarray` representing cube values. Returns the tabular representation of the crunch cube. The returned array has the same number of dimensions as the cube. E.g. for a cross-tab representation of a categorical and numerical variable, the resulting cube will have two dimensions. *include_missing* (bool): Include rows/cols for missing values. Example 1 (Categorical x Categorical):: >>> cube = CrunchCube(response) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) Example 2 (Categorical x Categorical, include missing values):: >>> cube = CrunchCube(response) >>> cube.as_array(include_missing=True) np.array([ [5, 3, 2, 0], [5, 2, 3, 0], [0, 0, 0, 0], ]) """ array = self._as_array( include_missing=include_missing, weighted=weighted, include_transforms_for_dims=include_transforms_for_dims, ) # ---prune array if pruning was requested--- if prune: array = self._prune_body(array, transforms=include_transforms_for_dims) return self._drop_mr_cat_dims(array)
Return `ndarray` representing cube values. Returns the tabular representation of the crunch cube. The returned array has the same number of dimensions as the cube. E.g. for a cross-tab representation of a categorical and numerical variable, the resulting cube will have two dimensions. *include_missing* (bool): Include rows/cols for missing values. Example 1 (Categorical x Categorical):: >>> cube = CrunchCube(response) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) Example 2 (Categorical x Categorical, include missing values):: >>> cube = CrunchCube(response) >>> cube.as_array(include_missing=True) np.array([ [5, 3, 2, 0], [5, 2, 3, 0], [0, 0, 0, 0], ])
entailment
def count(self, weighted=True): """Return numberic count of rows considered for cube response.""" return self._measures.weighted_n if weighted else self._measures.unweighted_n
Return numberic count of rows considered for cube response.
entailment
def get_slices(self, ca_as_0th=False): """Return list of :class:`.CubeSlice` objects. The number of slice objects in the returned list depends on the dimensionality of this cube. A 1D or 2D cube will return a list containing one slice object. A 3D cube will return a list of slices the same length as the first dimension. """ if self.ndim < 3 and not ca_as_0th: return [CubeSlice(self, 0)] return [CubeSlice(self, i, ca_as_0th) for i, _ in enumerate(self.labels()[0])]
Return list of :class:`.CubeSlice` objects. The number of slice objects in the returned list depends on the dimensionality of this cube. A 1D or 2D cube will return a list containing one slice object. A 3D cube will return a list of slices the same length as the first dimension.
entailment
def index(self, weighted=True, prune=False): """Return cube index measurement. This function is deprecated. Use index_table from CubeSlice. """ warnings.warn( "CrunchCube.index() is deprecated. Use CubeSlice.index_table().", DeprecationWarning, ) return Index.data(self, weighted, prune)
Return cube index measurement. This function is deprecated. Use index_table from CubeSlice.
entailment
def inserted_hs_indices(self, prune=False): """Get indices of the inserted H&S (for formatting purposes).""" if self.ndim == 2 and prune: # If pruning is applied, we need to subtract from the H&S indes # the number of pruned rows (cols) that come before that index. pruning_bases = [self._pruning_base(axis=i, hs_dims=[0, 1]) for i in [1, 0]] pruning_bases = [ base if base.ndim == 1 else np.sum(base, axis=(1 - i)) for i, base in enumerate(pruning_bases) ] # Obtain prune indices as subscripts prune_indices_list = [ np.arange(len(base))[np.logical_or(base == 0, np.isnan(base))] for base in pruning_bases ] inserted_indices_list = [dim.inserted_hs_indices for dim in self.dimensions] return self._adjust_inserted_indices( inserted_indices_list, prune_indices_list ) return [dim.inserted_hs_indices for dim in self.dimensions]
Get indices of the inserted H&S (for formatting purposes).
entailment
def is_univariate_ca(self): """True if cube only contains a CA dimension-pair, in either order.""" return self.ndim == 2 and set(self.dim_types) == {DT.CA_SUBVAR, DT.CA_CAT}
True if cube only contains a CA dimension-pair, in either order.
entailment
def labels(self, include_missing=False, include_transforms_for_dims=False): """Gets labels for each cube's dimension. Args include_missing (bool): Include labels for missing values Returns labels (list of lists): Labels for each dimension """ return [ dim.labels(include_missing, include_transforms_for_dims) for dim in self.dimensions ]
Gets labels for each cube's dimension. Args include_missing (bool): Include labels for missing values Returns labels (list of lists): Labels for each dimension
entailment
def margin( self, axis=None, weighted=True, include_missing=False, include_transforms_for_dims=None, prune=False, include_mr_cat=False, ): """Get margin for the selected axis. the selected axis. For MR variables, this is the sum of the selected and non-selected slices. Args axis (int): Axis across the margin is calculated. If no axis is provided the margin is calculated across all axis. For Categoricals, Num, Datetime, and Text, this translates to sumation of all elements. Returns Calculated margin for the selected axis Example 1: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.margin(axis=0) np.array([10, 5]) Example 2: >>> cube = CrunchCube(fixt_cat_x_num_x_datetime) np.array([ [[1, 1], [0, 0], [0, 0], [0, 0]], [[2, 1], [1, 1], [0, 0], [0, 0]], [[0, 0], [2, 3], [0, 0], [0, 0]], [[0, 0], [0, 0], [3, 2], [0, 0]], [[0, 0], [0, 0], [1, 1], [0, 1]] ]) >>> cube.margin(axis=0) np.array([ [3, 2], [3, 4], [4, 3], [0, 1], ]) """ table = self._counts(weighted).raw_cube_array new_axis = self._adjust_axis(axis) index = tuple( None if i in new_axis else slice(None) for i, _ in enumerate(table.shape) ) # Calculate denominator. Only include those H&S dimensions, across # which we DON'T sum. These H&S are needed because of the shape, when # dividing. Those across dims which are summed across MUST NOT be # included, because they would change the result. hs_dims = self._hs_dims_for_den(include_transforms_for_dims, axis) den = self._apply_subtotals( self._apply_missings(table, include_missing=include_missing), hs_dims ) # Apply correct mask (based on the as_array shape) arr = self._as_array( include_transforms_for_dims=hs_dims, include_missing=include_missing ) # ---prune array if pruning was requested--- if prune: arr = self._prune_body(arr, transforms=hs_dims) arr = self._drop_mr_cat_dims(arr, fix_valids=include_missing) if isinstance(arr, np.ma.core.MaskedArray): # Inflate the reduced version of the array, to match the # non-reduced version, for the purposes of creating the correct # mask. Create additional dimension (with no elements) where MR_CAT # dimensions should be. Don't inflate 0th dimension if it has only # a single element, because it's not being reduced # in self._drop_mr_cat_dims inflate_ind = tuple( ( None if ( d.dimension_type == DT.MR_CAT or i != 0 and (n <= 1 or len(d.valid_elements) <= 1) ) else slice(None) ) for i, (d, n) in enumerate(zip(self._all_dimensions, table.shape)) ) mask = np.logical_or(np.zeros(den.shape, dtype=bool), arr.mask[inflate_ind]) den = np.ma.masked_array(den, mask) if ( self.ndim != 1 or axis is None or axis == 0 and len(self._all_dimensions) == 1 ): # Special case for 1D cube wigh MR, for "Table" direction den = np.sum(den, axis=new_axis)[index] den = self._drop_mr_cat_dims( den, fix_valids=(include_missing or include_mr_cat) ) if den.shape[0] == 1 and len(den.shape) > 1 and self.ndim < 3: den = den.reshape(den.shape[1:]) return den
Get margin for the selected axis. the selected axis. For MR variables, this is the sum of the selected and non-selected slices. Args axis (int): Axis across the margin is calculated. If no axis is provided the margin is calculated across all axis. For Categoricals, Num, Datetime, and Text, this translates to sumation of all elements. Returns Calculated margin for the selected axis Example 1: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.margin(axis=0) np.array([10, 5]) Example 2: >>> cube = CrunchCube(fixt_cat_x_num_x_datetime) np.array([ [[1, 1], [0, 0], [0, 0], [0, 0]], [[2, 1], [1, 1], [0, 0], [0, 0]], [[0, 0], [2, 3], [0, 0], [0, 0]], [[0, 0], [0, 0], [3, 2], [0, 0]], [[0, 0], [0, 0], [1, 1], [0, 1]] ]) >>> cube.margin(axis=0) np.array([ [3, 2], [3, 4], [4, 3], [0, 1], ])
entailment
def mr_dim_ind(self): """Return int, tuple of int, or None, representing MR indices. The return value represents the index of each multiple-response (MR) dimension in this cube. Return value is None if there are no MR dimensions, and int if there is one MR dimension, and a tuple of int when there are more than one. The index is the (zero-based) position of the MR dimensions in the _ApparentDimensions sequence returned by the :attr"`.dimensions` property. """ # TODO: rename to `mr_dim_idxs` or better yet get rid of need for # this as it's really a cube internal characteristic. # TODO: Make this return a tuple in all cases, like (), (1,), or (0, 2). indices = tuple( idx for idx, d in enumerate(self.dimensions) if d.dimension_type == DT.MR_SUBVAR ) if indices == (): return None if len(indices) == 1: return indices[0] return indices
Return int, tuple of int, or None, representing MR indices. The return value represents the index of each multiple-response (MR) dimension in this cube. Return value is None if there are no MR dimensions, and int if there is one MR dimension, and a tuple of int when there are more than one. The index is the (zero-based) position of the MR dimensions in the _ApparentDimensions sequence returned by the :attr"`.dimensions` property.
entailment
def population_counts( self, population_size, weighted=True, include_missing=False, include_transforms_for_dims=None, prune=False, ): """Return counts scaled in proportion to overall population. The return value is a numpy.ndarray object. Count values are scaled proportionally to approximate their value if the entire population had been sampled. This calculation is based on the estimated size of the population provided as *population size*. The remaining arguments have the same meaning as they do for the `.proportions()` method. Example:: >>> cube = CrunchCube(fixt_cat_x_cat) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) >>> cube.population_counts(9000) np.array([ [3000, 1200], [3000, 1800], ]) """ population_counts = [ slice_.population_counts( population_size, weighted=weighted, include_missing=include_missing, include_transforms_for_dims=include_transforms_for_dims, prune=prune, ) for slice_ in self.slices ] if len(population_counts) > 1: return np.array(population_counts) return population_counts[0]
Return counts scaled in proportion to overall population. The return value is a numpy.ndarray object. Count values are scaled proportionally to approximate their value if the entire population had been sampled. This calculation is based on the estimated size of the population provided as *population size*. The remaining arguments have the same meaning as they do for the `.proportions()` method. Example:: >>> cube = CrunchCube(fixt_cat_x_cat) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) >>> cube.population_counts(9000) np.array([ [3000, 1200], [3000, 1800], ])
entailment
def proportions( self, axis=None, weighted=True, include_transforms_for_dims=None, include_mr_cat=False, prune=False, ): """Return percentage values for cube as `numpy.ndarray`. This function calculates the proportions across the selected axis of a crunch cube. For most variable types, it means the value divided by the margin value. For a multiple-response variable, the value is divided by the sum of selected and non-selected slices. *axis* (int): base axis of proportions calculation. If no axis is provided, calculations are done across the entire table. *weighted* (bool): Specifies weighted or non-weighted proportions. *include_transforms_for_dims* (list): Also include headings and subtotals transformations for the provided dimensions. If the dimensions have the transformations, they'll be included in the resulting numpy array. If the dimensions don't have the transformations, nothing will happen (the result will be the same as if the argument weren't provided). *include_transforms_for_dims* (list): Include headers and subtotals (H&S) across various dimensions. The dimensions are provided as list elements. For example: "include_transforms_for_dims=[0, 1]" instructs the CrunchCube to return H&S for both rows and columns (if it's a 2D cube). *include_mr_cat* (bool): Include MR categories. *prune* (bool): Instructs the CrunchCube to prune empty rows/cols. Emptiness is determined by the state of the margin (if it's either 0 or nan at certain index). If it is, the corresponding row/col is not included in the result. Example 1:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions() np.array([ [0.3333333, 0.1333333], [0.3333333, 0.2000000], ]) Example 2:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions(axis=0) np.array([ [0.5, 0.4], [0.5, 0.6], ]) """ # Calculate numerator from table (include all H&S dimensions). table = self._measure(weighted).raw_cube_array num = self._apply_subtotals( self._apply_missings(table), include_transforms_for_dims ) proportions = num / self._denominator( weighted, include_transforms_for_dims, axis ) if not include_mr_cat: proportions = self._drop_mr_cat_dims(proportions) # Apply correct mask (based on the as_array shape) arr = self.as_array( prune=prune, include_transforms_for_dims=include_transforms_for_dims ) if isinstance(arr, np.ma.core.MaskedArray): proportions = np.ma.masked_array(proportions, arr.mask) return proportions
Return percentage values for cube as `numpy.ndarray`. This function calculates the proportions across the selected axis of a crunch cube. For most variable types, it means the value divided by the margin value. For a multiple-response variable, the value is divided by the sum of selected and non-selected slices. *axis* (int): base axis of proportions calculation. If no axis is provided, calculations are done across the entire table. *weighted* (bool): Specifies weighted or non-weighted proportions. *include_transforms_for_dims* (list): Also include headings and subtotals transformations for the provided dimensions. If the dimensions have the transformations, they'll be included in the resulting numpy array. If the dimensions don't have the transformations, nothing will happen (the result will be the same as if the argument weren't provided). *include_transforms_for_dims* (list): Include headers and subtotals (H&S) across various dimensions. The dimensions are provided as list elements. For example: "include_transforms_for_dims=[0, 1]" instructs the CrunchCube to return H&S for both rows and columns (if it's a 2D cube). *include_mr_cat* (bool): Include MR categories. *prune* (bool): Instructs the CrunchCube to prune empty rows/cols. Emptiness is determined by the state of the margin (if it's either 0 or nan at certain index). If it is, the corresponding row/col is not included in the result. Example 1:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions() np.array([ [0.3333333, 0.1333333], [0.3333333, 0.2000000], ]) Example 2:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions(axis=0) np.array([ [0.5, 0.4], [0.5, 0.6], ])
entailment
def _denominator(self, weighted, include_transforms_for_dims, axis): """Calculate denominator for percentages. Only include those H&S dimensions, across which we DON'T sum. These H&S are needed because of the shape, when dividing. Those across dims which are summed across MUST NOT be included, because they would change the result.""" table = self._measure(weighted).raw_cube_array new_axis = self._adjust_axis(axis) index = tuple( None if i in new_axis else slice(None) for i, _ in enumerate(table.shape) ) hs_dims = self._hs_dims_for_den(include_transforms_for_dims, axis) den = self._apply_subtotals(self._apply_missings(table), hs_dims) return np.sum(den, axis=new_axis)[index]
Calculate denominator for percentages. Only include those H&S dimensions, across which we DON'T sum. These H&S are needed because of the shape, when dividing. Those across dims which are summed across MUST NOT be included, because they would change the result.
entailment
def scale_means(self, hs_dims=None, prune=False): """Get cube means.""" slices_means = [ScaleMeans(slice_).data for slice_ in self.slices] if hs_dims and self.ndim > 1: # Intersperse scale means with nans if H&S specified, and 2D. No # need to modify 1D, as only one mean will ever be inserted. inserted_indices = self.inserted_hs_indices()[-2:] for scale_means in slices_means: # Scale means 0 corresonds to the column dimension (is # calculated by using its values). The result of it, however, # is a row. That's why we need to check the insertions on the # row dim (inserted columns). if scale_means[0] is not None and 1 in hs_dims and inserted_indices[1]: for i in inserted_indices[1]: scale_means[0] = np.insert(scale_means[0], i, np.nan) # Scale means 1 is a column, so we need to check # for row insertions. if scale_means[1] is not None and 0 in hs_dims and inserted_indices[0]: for i in inserted_indices[0]: scale_means[1] = np.insert(scale_means[1], i, np.nan) if prune: # Apply pruning arr = self.as_array(include_transforms_for_dims=hs_dims, prune=True) if isinstance(arr, np.ma.core.MaskedArray): mask = arr.mask for i, scale_means in enumerate(slices_means): if scale_means[0] is not None: row_mask = ( mask.all(axis=0) if self.ndim < 3 else mask.all(axis=1)[i] ) scale_means[0] = scale_means[0][~row_mask] if self.ndim > 1 and scale_means[1] is not None: col_mask = ( mask.all(axis=1) if self.ndim < 3 else mask.all(axis=2)[i] ) scale_means[1] = scale_means[1][~col_mask] return slices_means
Get cube means.
entailment
def zscore(self, weighted=True, prune=False, hs_dims=None): """Return ndarray with cube's zscore measurements. Zscore is a measure of statistical significance of observed vs. expected counts. It's only applicable to a 2D contingency tables. For 3D cubes, the measures of separate slices are stacked together and returned as the result. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns zscore: ndarray representing zscore measurements """ res = [s.zscore(weighted, prune, hs_dims) for s in self.slices] return np.array(res) if self.ndim == 3 else res[0]
Return ndarray with cube's zscore measurements. Zscore is a measure of statistical significance of observed vs. expected counts. It's only applicable to a 2D contingency tables. For 3D cubes, the measures of separate slices are stacked together and returned as the result. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns zscore: ndarray representing zscore measurements
entailment
def wishart_pairwise_pvals(self, axis=0): """Return matrices of column-comparison p-values as list of numpy.ndarrays. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently. """ return [slice_.wishart_pairwise_pvals(axis=axis) for slice_ in self.slices]
Return matrices of column-comparison p-values as list of numpy.ndarrays. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently.
entailment
def _adjust_axis(self, axis): """Return raw axis/axes corresponding to apparent axis/axes. This method adjusts user provided 'axis' parameter, for some of the cube operations, mainly 'margin'. The user never sees the MR selections dimension, and treats all MRs as single dimensions. Thus we need to adjust the values of axis (to sum across) to what the user would've specified if he were aware of the existence of the MR selections dimension. The reason for this adjustment is that all of the operations performed troughout the margin calculations will be carried on an internal array, containing all the data (together with all selections). For more info on how it needs to operate, check the unit tests. """ if not self._is_axis_allowed(axis): ca_error_msg = "Direction {} not allowed (items dimension)" raise ValueError(ca_error_msg.format(axis)) if isinstance(axis, int): # If single axis was provided, create a list out of it, so that # we can do the subsequent iteration. axis = list([axis]) elif axis is None: # If axis was None, create what user would expect in terms of # finding out the Total(s). In case of 2D cube, this will be the # axis of all the dimensions that the user can see, that is (0, 1), # because the selections dimension is invisible to the user. In # case of 3D cube, this will be the "total" across each slice, so # we need to drop the 0th dimension, and only take last two (1, 2). axis = range(self.ndim)[-2:] else: # In case of a tuple, just keep it as a list. axis = list(axis) axis = np.array(axis) # Create new array for storing updated values of axis. It's necessary # because it's hard to update the values in place. new_axis = np.array(axis) # Iterate over user-visible dimensions, and update axis when MR is # detected. For each detected MR, we need to increment all subsequent # axis (that were provided by the user). But we don't need to update # the axis that are "behind" the current MR. for i, dim in enumerate(self.dimensions): if dim.dimension_type == DT.MR_SUBVAR: # This formula updates only the axis that come "after" the # current MR (items) dimension. new_axis[axis >= i] += 1 return tuple(new_axis)
Return raw axis/axes corresponding to apparent axis/axes. This method adjusts user provided 'axis' parameter, for some of the cube operations, mainly 'margin'. The user never sees the MR selections dimension, and treats all MRs as single dimensions. Thus we need to adjust the values of axis (to sum across) to what the user would've specified if he were aware of the existence of the MR selections dimension. The reason for this adjustment is that all of the operations performed troughout the margin calculations will be carried on an internal array, containing all the data (together with all selections). For more info on how it needs to operate, check the unit tests.
entailment
def _adjust_inserted_indices(inserted_indices_list, prune_indices_list): """Adjust inserted indices, if there are pruned elements.""" # Created a copy, to preserve cached property updated_inserted = [[i for i in dim_inds] for dim_inds in inserted_indices_list] pruned_and_inserted = zip(prune_indices_list, updated_inserted) for prune_inds, inserted_inds in pruned_and_inserted: # Only prune indices if they're not H&S (inserted) prune_inds = prune_inds[~np.in1d(prune_inds, inserted_inds)] for i, ind in enumerate(inserted_inds): ind -= np.sum(prune_inds < ind) inserted_inds[i] = ind return updated_inserted
Adjust inserted indices, if there are pruned elements.
entailment
def _apply_missings(self, res, include_missing=False): """Return ndarray with missing and insertions as specified. The return value is the result of the following operations on *res*, which is a raw cube value array (raw meaning it has shape of original cube response). * Remove vectors (rows/cols) for missing elements if *include_missin* is False. Note that it does *not* include pruning. """ # --element idxs that satisfy `include_missing` arg. Note this # --includes MR_CAT elements so is essentially all-or-valid-elements element_idxs = tuple( ( d.all_elements.element_idxs if include_missing else d.valid_elements.element_idxs ) for d in self._all_dimensions ) return res[np.ix_(*element_idxs)] if element_idxs else res
Return ndarray with missing and insertions as specified. The return value is the result of the following operations on *res*, which is a raw cube value array (raw meaning it has shape of original cube response). * Remove vectors (rows/cols) for missing elements if *include_missin* is False. Note that it does *not* include pruning.
entailment
def _apply_subtotals(self, res, include_transforms_for_dims): """* Insert subtotals (and perhaps other insertions later) for dimensions having their apparent dimension-idx in *include_transforms_for_dims*. """ if not include_transforms_for_dims: return res suppressed_dim_count = 0 for (dim_idx, dim) in enumerate(self._all_dimensions): if dim.dimension_type == DT.MR_CAT: suppressed_dim_count += 1 # ---only marginable dimensions can be subtotaled--- if not dim.is_marginable: continue apparent_dim_idx = dim_idx - suppressed_dim_count transform = ( dim.has_transforms and apparent_dim_idx in include_transforms_for_dims ) if not transform: continue # ---insert subtotals into result array--- insertions = self._insertions(res, dim, dim_idx) res = self._update_result(res, insertions, dim_idx) return res
* Insert subtotals (and perhaps other insertions later) for dimensions having their apparent dimension-idx in *include_transforms_for_dims*.
entailment
def _as_array( self, include_missing=False, get_non_selected=False, weighted=True, include_transforms_for_dims=False, ): """Get crunch cube as ndarray. Args include_missing (bool): Include rows/cols for missing values. get_non_selected (bool): Get non-selected slices for MR vars. weighted (bool): Take weighted or unweighted counts. include_transforms_for_dims (list): For which dims to include headings & subtotals (H&S) transformations. Returns res (ndarray): Tabular representation of crunch cube """ return self._apply_subtotals( self._apply_missings( self._measure(weighted).raw_cube_array, include_missing=include_missing ), include_transforms_for_dims, )
Get crunch cube as ndarray. Args include_missing (bool): Include rows/cols for missing values. get_non_selected (bool): Get non-selected slices for MR vars. weighted (bool): Take weighted or unweighted counts. include_transforms_for_dims (list): For which dims to include headings & subtotals (H&S) transformations. Returns res (ndarray): Tabular representation of crunch cube
entailment
def _calculate_constraints_sum(cls, prop_table, prop_margin, axis): """Calculate sum of constraints (part of the standard error equation). This method calculates the sum of the cell proportions multiplied by row (or column) marginal proportions (margins divide by the total count). It does this by utilizing the matrix multiplication, which directly translates to the mathematical definition (the sum across i and j indices). """ if axis not in [0, 1]: raise ValueError("Unexpected value for `axis`: {}".format(axis)) V = prop_table * (1 - prop_table) if axis == 0: # If axis is 0, sumation is performed across the 'i' index, which # requires the matrix to be multiplied from the right # (because of the inner matrix dimensions). return np.dot(V, prop_margin) elif axis == 1: # If axis is 1, sumation is performed across the 'j' index, which # requires the matrix to be multiplied from the left # (because of the inner matrix dimensions). return np.dot(prop_margin, V)
Calculate sum of constraints (part of the standard error equation). This method calculates the sum of the cell proportions multiplied by row (or column) marginal proportions (margins divide by the total count). It does this by utilizing the matrix multiplication, which directly translates to the mathematical definition (the sum across i and j indices).
entailment
def _counts(self, weighted): """Return _BaseMeasure subclass for *weighted* counts. The return value is a _WeightedCountMeasure object if *weighted* is True and the cube response is weighted. Otherwise it is an _UnweightedCountMeasure object. Any means measure that may be present is not considered. Contrast with `._measure()` below. """ return ( self._measures.weighted_counts if weighted else self._measures.unweighted_counts )
Return _BaseMeasure subclass for *weighted* counts. The return value is a _WeightedCountMeasure object if *weighted* is True and the cube response is weighted. Otherwise it is an _UnweightedCountMeasure object. Any means measure that may be present is not considered. Contrast with `._measure()` below.
entailment
def _cube_dict(self): """dict containing raw cube response, parsed from JSON payload.""" try: cube_response = self._cube_response_arg # ---parse JSON to a dict when constructed with JSON--- cube_dict = ( cube_response if isinstance(cube_response, dict) else json.loads(cube_response) ) # ---cube is 'value' item in a shoji response--- return cube_dict.get("value", cube_dict) except TypeError: raise TypeError( "Unsupported type <%s> provided. Cube response must be JSON " "(str) or dict." % type(self._cube_response_arg).__name__ )
dict containing raw cube response, parsed from JSON payload.
entailment
def _drop_mr_cat_dims(self, array, fix_valids=False): """Return ndarray reflecting *array* with MR_CAT dims dropped. If any (except 1st) dimension has a single element, it is flattened in the resulting array (which is more convenient for the users of the CrunchCube). If the original shape of the cube is needed (e.g. to calculate the margins with correct axis arguments), this needs to happen before the call to this method '_drop_mr_cat_dims'. """ # TODO: We cannot arbitrarily drop any dimension simply because it # has a length (shape) of 1. We must target MR_CAT dimensions # specifically. Otherwise unexpected results can occur based on # accidents of cube category count etc. If "user-friendly" reshaping # needs be done, it should be as a very last step and much safer to # leave that to the cr.cube client; software being "helpful" almost # never is. if not array.shape or len(array.shape) != len(self._all_dimensions): # This condition covers two cases: # 1. In case of no dimensions, the shape of the array is empty # 2. If the shape was already fixed, we don't need to fix it again. # This might happen while constructing the masked arrays. In case # of MR, we will have the selections dimension included thoughout # the calculations, and will only remove it before returning the # result to the user. return array # We keep MR selections (MR_CAT) dimensions in the array, all the way # up to here. At this point, we need to remove the non-selected part of # selections dimension (and subsequently purge the dimension itself). display_ind = ( tuple( 0 if dim.dimension_type == DT.MR_CAT else slice(None) for dim, n in zip(self._all_dimensions, array.shape) ) if not fix_valids else np.ix_( *[ dim.valid_elements.element_idxs if n > 1 else [0] for dim, n in zip(self._all_dimensions, array.shape) ] ) ) array = array[display_ind] # If a first dimension only has one element, we don't want to # remove it from the shape. Hence the i == 0 part. For other dimensions # that have one element, it means that these are the remnants of the MR # selections, which we don't need as separate dimensions. new_shape = [ length for (i, length) in enumerate(array.shape) if length != 1 or i == 0 ] return array.reshape(new_shape)
Return ndarray reflecting *array* with MR_CAT dims dropped. If any (except 1st) dimension has a single element, it is flattened in the resulting array (which is more convenient for the users of the CrunchCube). If the original shape of the cube is needed (e.g. to calculate the margins with correct axis arguments), this needs to happen before the call to this method '_drop_mr_cat_dims'.
entailment
def _fix_valid_indices(cls, valid_indices, insertion_index, dim): """Add indices for H&S inserted elements.""" # TODO: make this accept an immutable sequence for valid_indices # (a tuple) and return an immutable sequence rather than mutating an # argument. indices = np.array(sorted(valid_indices[dim])) slice_index = np.sum(indices <= insertion_index) indices[slice_index:] += 1 indices = np.insert(indices, slice_index, insertion_index + 1) valid_indices[dim] = indices.tolist() return valid_indices
Add indices for H&S inserted elements.
entailment
def _insertions(self, result, dimension, dimension_index): """Return list of (idx, sum) pairs representing subtotals. *idx* is the int offset at which to insert the ndarray subtotal in *sum*. """ def iter_insertions(): for anchor_idx, addend_idxs in dimension.hs_indices: insertion_idx = ( -1 if anchor_idx == "top" else result.shape[dimension_index] - 1 if anchor_idx == "bottom" else anchor_idx ) addend_fancy_idx = tuple( [slice(None) for _ in range(dimension_index)] + [np.array(addend_idxs)] ) yield ( insertion_idx, np.sum(result[addend_fancy_idx], axis=dimension_index), ) return [insertion for insertion in iter_insertions()]
Return list of (idx, sum) pairs representing subtotals. *idx* is the int offset at which to insert the ndarray subtotal in *sum*.
entailment
def _is_axis_allowed(self, axis): """Check if axis are allowed. In case the calculation is requested over CA items dimension, it is not valid. It's valid in all other cases. """ if axis is None: # If table direction was requested, we must ensure that each slice # doesn't have the CA items dimension (thus the [-2:] part). It's # OK for the 0th dimension to be items, since no calculation is # performed over it. if DT.CA_SUBVAR in self.dim_types[-2:]: return False return True if isinstance(axis, int): if self.ndim == 1 and axis == 1: # Special allowed case of a 1D cube, where "row" # directions is requested. return True axis = [axis] # ---axis is a tuple--- for dim_idx in axis: if self.dim_types[dim_idx] == DT.CA_SUBVAR: # If any of the directions explicitly asked for directly # corresponds to the CA items dimension, the requested # calculation is not valid. return False return True
Check if axis are allowed. In case the calculation is requested over CA items dimension, it is not valid. It's valid in all other cases.
entailment
def _measure(self, weighted): """_BaseMeasure subclass representing primary measure for this cube. If the cube response includes a means measure, the return value is means. Otherwise it is counts, with the choice between weighted or unweighted determined by *weighted*. Note that weighted counts are provided on an "as-available" basis. When *weighted* is True and the cube response is not weighted, unweighted counts are returned. """ return ( self._measures.means if self._measures.means is not None else self._measures.weighted_counts if weighted else self._measures.unweighted_counts )
_BaseMeasure subclass representing primary measure for this cube. If the cube response includes a means measure, the return value is means. Otherwise it is counts, with the choice between weighted or unweighted determined by *weighted*. Note that weighted counts are provided on an "as-available" basis. When *weighted* is True and the cube response is not weighted, unweighted counts are returned.
entailment
def _prune_3d_body(self, res, transforms): """Return masked array where mask indicates pruned vectors. *res* is an ndarray (result). *transforms* is a list of ... """ mask = np.zeros(res.shape) mr_dim_idxs = self.mr_dim_ind for i, prune_inds in enumerate(self.prune_indices(transforms)): rows_pruned = prune_inds[0] cols_pruned = prune_inds[1] rows_pruned = np.repeat(rows_pruned[:, None], len(cols_pruned), axis=1) cols_pruned = np.repeat(cols_pruned[None, :], len(rows_pruned), axis=0) slice_mask = np.logical_or(rows_pruned, cols_pruned) # In case of MRs we need to "inflate" mask if mr_dim_idxs == (1, 2): slice_mask = slice_mask[:, np.newaxis, :, np.newaxis] elif mr_dim_idxs == (0, 1): slice_mask = slice_mask[np.newaxis, :, np.newaxis, :] elif mr_dim_idxs == (0, 2): slice_mask = slice_mask[np.newaxis, :, :, np.newaxis] elif mr_dim_idxs == 1 and self.ndim == 3: slice_mask = slice_mask[:, np.newaxis, :] elif mr_dim_idxs == 2 and self.ndim == 3: slice_mask = slice_mask[:, :, np.newaxis] mask[i] = slice_mask res = np.ma.masked_array(res, mask=mask) return res
Return masked array where mask indicates pruned vectors. *res* is an ndarray (result). *transforms* is a list of ...
entailment
def _prune_body(self, res, transforms=None): """Return a masked version of *res* where pruned rows/cols are masked. Return value is an `np.ma.MaskedArray` object. Pruning is the removal of rows or columns whose corresponding marginal elements are either 0 or not defined (np.nan). """ if self.ndim > 2: return self._prune_3d_body(res, transforms) res = self._drop_mr_cat_dims(res) # ---determine which rows should be pruned--- row_margin = self._pruning_base( hs_dims=transforms, axis=self.row_direction_axis ) # ---adjust special-case row-margin values--- item_types = (DT.MR, DT.CA_SUBVAR) if self.ndim > 1 and self.dim_types[1] in item_types and len(res.shape) > 1: # ---when row-dimension has only one category it gets squashed--- axis = 1 if res.shape[0] > 1 else None # ---in CAT x MR case (or if it has CA subvars) we get # a 2D margin (denom really)--- row_margin = np.sum(row_margin, axis=axis) row_prune_inds = self._margin_pruned_indices( row_margin, self._inserted_dim_inds(transforms, 0), 0 ) # ---a 1D only has rows, so mask only with row-prune-idxs--- if self.ndim == 1 or len(res.shape) == 1: # For 1D, margin is calculated as the row margin. return np.ma.masked_array(res, mask=row_prune_inds) # ---determine which columns should be pruned--- col_margin = self._pruning_base( hs_dims=transforms, axis=self._col_direction_axis ) if col_margin.ndim > 1: # In case of MR x CAT, we have 2D margin col_margin = np.sum(col_margin, axis=0) col_prune_inds = self._margin_pruned_indices( col_margin, self._inserted_dim_inds(transforms, 1), 1 ) # ---create rows x cols mask and mask the result array--- mask = self._create_mask(res, row_prune_inds, col_prune_inds) res = np.ma.masked_array(res, mask=mask) # ---return the masked array--- return res
Return a masked version of *res* where pruned rows/cols are masked. Return value is an `np.ma.MaskedArray` object. Pruning is the removal of rows or columns whose corresponding marginal elements are either 0 or not defined (np.nan).
entailment
def prune_indices(self, transforms=None): """Return indices of pruned rows and columns as list. The return value has one of three possible forms: * a 1-element list of row indices (in case of 1D cube) * 2-element list of row and col indices (in case of 2D cube) * n-element list of tuples of 2 elements (if it's 3D cube). For each case, the 2 elements are the ROW and COL indices of the elements that need to be pruned. If it's a 3D cube, these indices are calculated "per slice", that is NOT on the 0th dimension (as the 0th dimension represents the slices). """ if self.ndim >= 3: # In case of a 3D cube, return list of tuples # (of row and col pruned indices). return self._prune_3d_indices(transforms) def prune_non_3d_indices(transforms): row_margin = self._pruning_base( hs_dims=transforms, axis=self.row_direction_axis ) row_indices = self._margin_pruned_indices( row_margin, self._inserted_dim_inds(transforms, 0), 0 ) if row_indices.ndim > 1: # In case of MR, we'd have 2D prune indices row_indices = row_indices.all(axis=1) if self.ndim == 1: return [row_indices] col_margin = self._pruning_base( hs_dims=transforms, axis=self._col_direction_axis ) col_indices = self._margin_pruned_indices( col_margin, self._inserted_dim_inds(transforms, 1), 1 ) if col_indices.ndim > 1: # In case of MR, we'd have 2D prune indices col_indices = col_indices.all(axis=0) return [row_indices, col_indices] # In case of 1 or 2 D cubes, return a list of # row indices (or row and col indices) return prune_non_3d_indices(transforms)
Return indices of pruned rows and columns as list. The return value has one of three possible forms: * a 1-element list of row indices (in case of 1D cube) * 2-element list of row and col indices (in case of 2D cube) * n-element list of tuples of 2 elements (if it's 3D cube). For each case, the 2 elements are the ROW and COL indices of the elements that need to be pruned. If it's a 3D cube, these indices are calculated "per slice", that is NOT on the 0th dimension (as the 0th dimension represents the slices).
entailment
def _pruning_base(self, axis=None, hs_dims=None): """Gets margin if across CAT dimension. Gets counts if across items. Categorical variables are pruned based on their marginal values. If the marginal is a 0 or a NaN, the corresponding row/column is pruned. In case of a subvars (items) dimension, we only prune if all the counts of the corresponding row/column are zero. """ if not self._is_axis_allowed(axis): # In case we encountered axis that would go across items dimension, # we need to return at least some result, to prevent explicitly # checking for this condition, wherever self._margin is used return self.as_array(weighted=False, include_transforms_for_dims=hs_dims) # In case of allowed axis, just return the normal API margin. This call # would throw an exception when directly invoked with bad axis. This is # intended, because we want to be as explicit as possible. Margins # across items are not allowed. return self.margin( axis=axis, weighted=False, include_transforms_for_dims=hs_dims )
Gets margin if across CAT dimension. Gets counts if across items. Categorical variables are pruned based on their marginal values. If the marginal is a 0 or a NaN, the corresponding row/column is pruned. In case of a subvars (items) dimension, we only prune if all the counts of the corresponding row/column are zero.
entailment
def _update_result(self, result, insertions, dimension_index): """Insert subtotals into resulting ndarray.""" for j, (ind_insertion, value) in enumerate(insertions): result = np.insert( result, ind_insertion + j + 1, value, axis=dimension_index ) return result
Insert subtotals into resulting ndarray.
entailment
def is_weighted(self): """True if weights have been applied to the measure(s) for this cube. Unweighted counts are available for all cubes. Weighting applies to any other measures provided by the cube. """ cube_dict = self._cube_dict if cube_dict.get("query", {}).get("weight") is not None: return True if cube_dict.get("weight_var") is not None: return True if cube_dict.get("weight_url") is not None: return True unweighted_counts = cube_dict["result"]["counts"] count_data = cube_dict["result"]["measures"].get("count", {}).get("data") if unweighted_counts != count_data: return True return False
True if weights have been applied to the measure(s) for this cube. Unweighted counts are available for all cubes. Weighting applies to any other measures provided by the cube.
entailment
def means(self): """_MeanMeasure object providing access to means values. None when the cube response does not contain a mean measure. """ mean_measure_dict = ( self._cube_dict.get("result", {}).get("measures", {}).get("mean") ) if mean_measure_dict is None: return None return _MeanMeasure(self._cube_dict, self._all_dimensions)
_MeanMeasure object providing access to means values. None when the cube response does not contain a mean measure.
entailment
def missing_count(self): """numeric representing count of missing rows in cube response.""" if self.means: return self.means.missing_count return self._cube_dict["result"].get("missing", 0)
numeric representing count of missing rows in cube response.
entailment
def population_fraction(self): """The filtered/unfiltered ratio for cube response. This value is required for properly calculating population on a cube where a filter has been applied. Returns 1.0 for an unfiltered cube. Returns `np.nan` if the unfiltered count is zero, which would otherwise result in a divide-by-zero error. """ numerator = self._cube_dict["result"].get("filtered", {}).get("weighted_n") denominator = self._cube_dict["result"].get("unfiltered", {}).get("weighted_n") try: return numerator / denominator except ZeroDivisionError: return np.nan except Exception: return 1.0
The filtered/unfiltered ratio for cube response. This value is required for properly calculating population on a cube where a filter has been applied. Returns 1.0 for an unfiltered cube. Returns `np.nan` if the unfiltered count is zero, which would otherwise result in a divide-by-zero error.
entailment
def weighted_counts(self): """_WeightedCountMeasure object for this cube. This object provides access to weighted counts for this cube, if available. If the cube response is not weighted, the _UnweightedCountMeasure object for this cube is returned. """ if not self.is_weighted: return self.unweighted_counts return _WeightedCountMeasure(self._cube_dict, self._all_dimensions)
_WeightedCountMeasure object for this cube. This object provides access to weighted counts for this cube, if available. If the cube response is not weighted, the _UnweightedCountMeasure object for this cube is returned.
entailment
def weighted_n(self): """float count of returned rows adjusted for weighting.""" if not self.is_weighted: return float(self.unweighted_n) return float(sum(self._cube_dict["result"]["measures"]["count"]["data"]))
float count of returned rows adjusted for weighting.
entailment
def raw_cube_array(self): """Return read-only ndarray of measure values from cube-response. The shape of the ndarray mirrors the shape of the (raw) cube response. Specifically, it includes values for missing elements, any MR_CAT dimensions, and any prunable rows and columns. """ array = np.array(self._flat_values).reshape(self._all_dimensions.shape) # ---must be read-only to avoid hard-to-find bugs--- array.flags.writeable = False return array
Return read-only ndarray of measure values from cube-response. The shape of the ndarray mirrors the shape of the (raw) cube response. Specifically, it includes values for missing elements, any MR_CAT dimensions, and any prunable rows and columns.
entailment
def _flat_values(self): """Return tuple of mean values as found in cube response. Mean data may include missing items represented by a dict like {'?': -1} in the cube response. These are replaced by np.nan in the returned value. """ return tuple( np.nan if type(x) is dict else x for x in self._cube_dict["result"]["measures"]["mean"]["data"] )
Return tuple of mean values as found in cube response. Mean data may include missing items represented by a dict like {'?': -1} in the cube response. These are replaced by np.nan in the returned value.
entailment
def make_input_dataframe_by_entity(tax_benefit_system, nb_persons, nb_groups): """ Generate a dictionnary of dataframes containing nb_persons persons spread in nb_groups groups. :param TaxBenefitSystem tax_benefit_system: the tax_benefit_system to use :param int nb_persons: the number of persons in the system :param int nb_groups: the number of collective entities in the system :returns: A dictionary whose keys are entities and values the corresponding data frames Example: >>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity >>> from openfisca_country_template import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100) >>> sorted(input_dataframe_by_entity['person'].columns.tolist()) ['household_id', 'household_legacy_role', 'household_role', 'person_id'] >>> sorted(input_dataframe_by_entity['household'].columns.tolist()) [] """ input_dataframe_by_entity = dict() person_entity = [entity for entity in tax_benefit_system.entities if entity.is_person][0] person_id = np.arange(nb_persons) input_dataframe_by_entity = dict() input_dataframe_by_entity[person_entity.key] = pd.DataFrame({ person_entity.key + '_id': person_id, }) input_dataframe_by_entity[person_entity.key].set_index('person_id') # adults = [0] + sorted(random.sample(range(1, nb_persons), nb_groups - 1)) members_entity_id = np.empty(nb_persons, dtype = int) # A legacy role is an index that every person within an entity has. # For instance, the 'first_parent' has legacy role 0, the 'second_parent' 1, the first 'child' 2, the second 3, etc. members_legacy_role = np.empty(nb_persons, dtype = int) id_group = -1 for id_person in range(nb_persons): if id_person in adults: id_group += 1 legacy_role = 0 else: legacy_role = 2 if legacy_role == 0 else legacy_role + 1 members_legacy_role[id_person] = legacy_role members_entity_id[id_person] = id_group for entity in tax_benefit_system.entities: if entity.is_person: continue key = entity.key person_dataframe = input_dataframe_by_entity[person_entity.key] person_dataframe[key + '_id'] = members_entity_id person_dataframe[key + '_legacy_role'] = members_legacy_role person_dataframe[key + '_role'] = np.where( members_legacy_role == 0, entity.flattened_roles[0].key, entity.flattened_roles[-1].key) input_dataframe_by_entity[key] = pd.DataFrame({ key + '_id': range(nb_groups) }) input_dataframe_by_entity[key].set_index(key + '_id', inplace = True) return input_dataframe_by_entity
Generate a dictionnary of dataframes containing nb_persons persons spread in nb_groups groups. :param TaxBenefitSystem tax_benefit_system: the tax_benefit_system to use :param int nb_persons: the number of persons in the system :param int nb_groups: the number of collective entities in the system :returns: A dictionary whose keys are entities and values the corresponding data frames Example: >>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity >>> from openfisca_country_template import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100) >>> sorted(input_dataframe_by_entity['person'].columns.tolist()) ['household_id', 'household_legacy_role', 'household_role', 'person_id'] >>> sorted(input_dataframe_by_entity['household'].columns.tolist()) []
entailment
def randomly_init_variable(tax_benefit_system, input_dataframe_by_entity, variable_name, max_value, condition = None, seed = None): """ Initialise a variable with random values (from 0 to max_value). If a condition vector is provided, only set the value of persons or groups for which condition is True. Exemple: >>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity >>> from openfisca_country_template import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100) >>> randomly_init_variable(tbs, input_dataframe_by_entity, 'salary', max_value = 50000, condition = "household_role == 'first_parent'") # Randomly set a salaire_net for all persons between 0 and 50000? >>> sorted(input_dataframe_by_entity['person'].columns.tolist()) ['household_id', 'household_legacy_role', 'household_role', 'person_id', 'salary'] >>> input_dataframe_by_entity['person'].salary.max() <= 50000 True >>> len(input_dataframe_by_entity['person'].salary) 400 >>> randomly_init_variable(tbs, input_dataframe_by_entity, 'rent', max_value = 1000) >>> sorted(input_dataframe_by_entity['household'].columns.tolist()) ['rent'] >>> input_dataframe_by_entity['household'].rent.max() <= 1000 True >>> input_dataframe_by_entity['household'].rent.max() >= 1 True >>> len(input_dataframe_by_entity['household'].rent) 100 """ variable = tax_benefit_system.variables[variable_name] entity = variable.entity if condition is None: condition = True else: condition = input_dataframe_by_entity[entity.key].eval(condition).values if seed is None: seed = 42 np.random.seed(seed) count = len(input_dataframe_by_entity[entity.key]) value = (np.random.rand(count) * max_value * condition).astype(variable.dtype) input_dataframe_by_entity[entity.key][variable_name] = value
Initialise a variable with random values (from 0 to max_value). If a condition vector is provided, only set the value of persons or groups for which condition is True. Exemple: >>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity >>> from openfisca_country_template import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100) >>> randomly_init_variable(tbs, input_dataframe_by_entity, 'salary', max_value = 50000, condition = "household_role == 'first_parent'") # Randomly set a salaire_net for all persons between 0 and 50000? >>> sorted(input_dataframe_by_entity['person'].columns.tolist()) ['household_id', 'household_legacy_role', 'household_role', 'person_id', 'salary'] >>> input_dataframe_by_entity['person'].salary.max() <= 50000 True >>> len(input_dataframe_by_entity['person'].salary) 400 >>> randomly_init_variable(tbs, input_dataframe_by_entity, 'rent', max_value = 1000) >>> sorted(input_dataframe_by_entity['household'].columns.tolist()) ['rent'] >>> input_dataframe_by_entity['household'].rent.max() <= 1000 True >>> input_dataframe_by_entity['household'].rent.max() >= 1 True >>> len(input_dataframe_by_entity['household'].rent) 100
entailment
def get_value(self, variable = None, table = None): """ Get value Parameters ---------- variable : string name of the variable table : string, default None name of the table hosting the variable Returns ------- df : DataFrame, default None A DataFrame containing the variable """ assert variable is not None, "A variable is needed" if table not in self.tables: log.error("Table {} is not found in survey tables".format(table)) df = self.get_values([variable], table) return df
Get value Parameters ---------- variable : string name of the variable table : string, default None name of the table hosting the variable Returns ------- df : DataFrame, default None A DataFrame containing the variable
entailment
def get_values(self, variables = None, table = None, lowercase = False, rename_ident = True): """ Get values Parameters ---------- variables : list of strings, default None list of variables names, if None return the whole table table : string, default None name of the table hosting the variables lowercase : boolean, deflault True put variables of the table into lowercase rename_ident : boolean, deflault True rename variables ident+yr (e.g. ident08) into ident Returns ------- df : DataFrame, default None A DataFrame containing the variables """ assert self.hdf5_file_path is not None assert os.path.exists(self.hdf5_file_path), '{} is not a valid path'.format( self.hdf5_file_path) store = pandas.HDFStore(self.hdf5_file_path) try: df = store.select(table) except KeyError: log.error('No table {} in the file {}'.format(table, self.hdf5_file_path)) log.error('Table(s) available are: {}'.format(store.keys())) store.close() raise if lowercase: columns = dict((column_name, column_name.lower()) for column_name in df) df.rename(columns = columns, inplace = True) if rename_ident is True: for column_name in df: if ident_re.match(str(column_name)) is not None: df.rename(columns = {column_name: "ident"}, inplace = True) log.info("{} column have been replaced by ident".format(column_name)) break if variables is None: return df else: diff = set(variables) - set(df.columns) if diff: raise Exception("The following variable(s) {} are missing".format(diff)) variables = list(set(variables).intersection(df.columns)) df = df[variables] return df
Get values Parameters ---------- variables : list of strings, default None list of variables names, if None return the whole table table : string, default None name of the table hosting the variables lowercase : boolean, deflault True put variables of the table into lowercase rename_ident : boolean, deflault True rename variables ident+yr (e.g. ident08) into ident Returns ------- df : DataFrame, default None A DataFrame containing the variables
entailment
def insert_table(self, label = None, name = None, **kwargs): """ Insert a table in the Survey object """ data_frame = kwargs.pop('data_frame', None) if data_frame is None: data_frame = kwargs.pop('dataframe', None) to_hdf_kwargs = kwargs.pop('to_hdf_kwargs', dict()) if data_frame is not None: assert isinstance(data_frame, pandas.DataFrame) if data_frame is not None: if label is None: label = name table = Table(label = label, name = name, survey = self) assert table.survey.hdf5_file_path is not None log.debug("Saving table {} in {}".format(name, table.survey.hdf5_file_path)) table.save_data_frame(data_frame, **to_hdf_kwargs) if name not in self.tables: self.tables[name] = dict() for key, val in kwargs.items(): self.tables[name][key] = val
Insert a table in the Survey object
entailment
def quantile(q, variable, weight_variable = None, filter_variable = None): """ Return quantile of a variable with weight provided by a specific wieght variable potentially filtered """ def formula(entity, period): value = entity(variable, period) if weight_variable is not None: weight = entity(weight_variable, period) weight = entity.filled_array(1) if filter_variable is not None: filter_value = entity(filter_variable, period) weight = filter_value * weight labels = arange(1, q + 1) quantile, _ = weightedcalcs_quantiles( value, labels, weight, return_quantiles = True, ) if filter_variable is not None: quantile = where(weight > 0, quantile, -1) return quantile return formula
Return quantile of a variable with weight provided by a specific wieght variable potentially filtered
entailment
def _get_version(): """Get the version from package itself.""" with open("../waliki/__init__.py") as fh: for line in fh: if line.startswith("__version__ = "): return line.split("=")[-1].strip().strip("'").strip('"')
Get the version from package itself.
entailment
def clean_meta(rst_content): """remove moinmoin metada from the top of the file""" rst = rst_content.split('\n') for i, line in enumerate(rst): if line.startswith('#'): continue break return '\n'.join(rst[i:])
remove moinmoin metada from the top of the file
entailment
def entry_point(context, block_name): """include an snippet at the bottom of a block, if it exists For example, if the plugin with slug 'attachments' is registered waliki/attachments_edit_content.html will be included with {% entry_point 'edit_content' %} which is declared at the bottom of the block 'content' in edit.html """ from waliki.plugins import get_plugins includes = [] for plugin in get_plugins(): template_name = 'waliki/%s_%s.html' % (plugin.slug, block_name) try: # template exists template.loader.get_template(template_name) includes.append(template_name) except template.TemplateDoesNotExist: continue context.update({'includes': includes}) return context
include an snippet at the bottom of a block, if it exists For example, if the plugin with slug 'attachments' is registered waliki/attachments_edit_content.html will be included with {% entry_point 'edit_content' %} which is declared at the bottom of the block 'content' in edit.html
entailment
def check_perms(parser, token): """ Returns a list of permissions (as ``codename`` strings) for a given ``user``/``group`` and ``obj`` (Model instance). Parses ``check_perms`` tag which should be in format:: {% check_perms "perm1[, perm2, ...]" for user in slug as "context_var" %} or {% check_perms "perm1[, perm2, ...]" for user in "slug" as "context_var" %} .. note:: Make sure that you set and use those permissions in same template block (``{% block %}``). Example of usage (assuming ``page` objects are available from *context*):: {% check_perms "delete_page" for request.user in page.slug as "can_delete" %} {% if can_delete %} ... {% endif %} """ bits = token.split_contents() format = '{% check_perms "perm1[, perm2, ...]" for user in slug as "context_var" %}' if len(bits) != 8 or bits[2] != 'for' or bits[4] != "in" or bits[6] != 'as': raise template.TemplateSyntaxError("get_obj_perms tag should be in " "format: %s" % format) perms = bits[1] user = bits[3] slug = bits[5] context_var = bits[7] if perms[0] != perms[-1] or perms[0] not in ('"', "'"): raise template.TemplateSyntaxError("check_perms tag's perms " "argument should be in quotes") if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"): raise template.TemplateSyntaxError("check_perms tag's context_var " "argument should be in quotes") context_var = context_var[1:-1] return CheckPermissionsNode(perms, user, slug, context_var)
Returns a list of permissions (as ``codename`` strings) for a given ``user``/``group`` and ``obj`` (Model instance). Parses ``check_perms`` tag which should be in format:: {% check_perms "perm1[, perm2, ...]" for user in slug as "context_var" %} or {% check_perms "perm1[, perm2, ...]" for user in "slug" as "context_var" %} .. note:: Make sure that you set and use those permissions in same template block (``{% block %}``). Example of usage (assuming ``page` objects are available from *context*):: {% check_perms "delete_page" for request.user in page.slug as "can_delete" %} {% if can_delete %} ... {% endif %}
entailment
def waliki_box(context, slug, show_edit=True, *args, **kwargs): """ A templatetag to render a wiki page content as a box in any webpage, and allow rapid edition if you have permission. It's inspired in `django-boxes`_ .. _django-boxes: https://github.com/eldarion/django-boxes """ request = context["request"] try: page = Page.objects.get(slug=slug) except Page.DoesNotExist: page = None if (page and check_perms_helper('change_page', request.user, slug) or (not page and check_perms_helper('add_page', request.user, slug))): form = PageForm(instance=page, initial={'slug': slug}) form_action = reverse("waliki_edit", args=[slug]) else: form = None form_action = None return { "request": request, "slug": slug, "label": slug.replace('/', '_'), "page": page, "form": form, "form_action": form_action, }
A templatetag to render a wiki page content as a box in any webpage, and allow rapid edition if you have permission. It's inspired in `django-boxes`_ .. _django-boxes: https://github.com/eldarion/django-boxes
entailment
def check_perms(perms, user, slug, raise_exception=False): """a helper user to check if a user has the permissions for a given slug""" if isinstance(perms, string_types): perms = {perms} else: perms = set(perms) allowed_users = ACLRule.get_users_for(perms, slug) if allowed_users: return user in allowed_users if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)): return True if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)): return True # First check if the user has the permission (even anon users) if user.has_perms(['waliki.%s' % p for p in perms]): return True # In case the 403 handler should be called raise the exception if raise_exception: raise PermissionDenied # As the last resort, show the login form return False
a helper user to check if a user has the permissions for a given slug
entailment
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME): """ this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception): return view_func(request, *args, **kwargs) if is_authenticated(request.user): if WALIKI_RENDER_403: return render(request, 'waliki/403.html', kwargs, status=403) else: raise PermissionDenied path = request.build_absolute_uri() # urlparse chokes on lazy objects in Python 3, force to str resolved_login_url = force_str( resolve_url(login_url or settings.LOGIN_URL)) # If the login url is the same scheme and net location then just # use the path as the "next" url. login_scheme, login_netloc = urlparse(resolved_login_url)[:2] current_scheme, current_netloc = urlparse(path)[:2] if ((not login_scheme or login_scheme == current_scheme) and (not login_netloc or login_netloc == current_netloc)): path = request.get_full_path() from django.contrib.auth.views import redirect_to_login return redirect_to_login( path, resolved_login_url, redirect_field_name) return _wrapped_view return decorator
this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply.
entailment
def get_module(app, modname, verbose=False, failfast=False): """ Internal function to load a module from a single app. taken from https://github.com/ojii/django-load. """ module_name = '%s.%s' % (app, modname) try: module = import_module(module_name) except ImportError as e: if failfast: raise e elif verbose: print("Could not load %r from %r: %s" % (modname, app, e)) return None if verbose: print("Loaded %r from %r" % (modname, app)) return module
Internal function to load a module from a single app. taken from https://github.com/ojii/django-load.
entailment
def load(modname, verbose=False, failfast=False): """ Loads all modules with name 'modname' from all installed apps. If verbose is True, debug information will be printed to stdout. If failfast is True, import errors will not be surpressed. """ for app in settings.INSTALLED_APPS: get_module(app, modname, verbose, failfast)
Loads all modules with name 'modname' from all installed apps. If verbose is True, debug information will be printed to stdout. If failfast is True, import errors will not be surpressed.
entailment
def register(PluginClass): """ Register a plugin class. This function will call back your plugin's constructor. """ if PluginClass in _cache.keys(): raise Exception("Plugin class already registered") plugin = PluginClass() _cache[PluginClass] = plugin if getattr(PluginClass, 'extra_page_actions', False): for key in plugin.extra_page_actions: if key not in _extra_page_actions: _extra_page_actions[key] = [] _extra_page_actions[key].extend(plugin.extra_page_actions[key]) if getattr(PluginClass, 'extra_edit_actions', False): for key in plugin.extra_edit_actions: if key not in _extra_edit_actions: _extra_edit_actions[key] = [] _extra_edit_actions[key].extend(plugin.extra_edit_actions[key]) if getattr(PluginClass, 'navbar_links', False): _navbar_links.extend(list(plugin.navbar_links))
Register a plugin class. This function will call back your plugin's constructor.
entailment
def render_form(form): """same than {{ form|crispy }} if crispy_forms is installed. render using a bootstrap3 templating otherwise""" if 'crispy_forms' in settings.INSTALLED_APPS: from crispy_forms.templatetags.crispy_forms_filters import as_crispy_form return as_crispy_form(form) template = get_template("bootstrap/form.html") form = _preprocess_fields(form) return template.render({"form": form})
same than {{ form|crispy }} if crispy_forms is installed. render using a bootstrap3 templating otherwise
entailment
def settings(request): """inject few waliki's settings to the context to be used in templates""" from waliki.settings import WALIKI_USE_MATHJAX # NOQA return {k: v for (k, v) in locals().items() if k.startswith('WALIKI')}
inject few waliki's settings to the context to be used in templates
entailment
def smart_encode_str(s): """Create a UTF-16 encoded PDF string literal for `s`.""" try: utf16 = s.encode('utf_16_be') except AttributeError: # ints and floats utf16 = str(s).encode('utf_16_be') safe = utf16.replace(b'\x00)', b'\x00\\)').replace(b'\x00(', b'\x00\\(') return b''.join((codecs.BOM_UTF16_BE, safe))
Create a UTF-16 encoded PDF string literal for `s`.
entailment
def forge_fdf(pdf_form_url=None, fdf_data_strings=[], fdf_data_names=[], fields_hidden=[], fields_readonly=[], checkbox_checked_name=b"Yes"): """Generates fdf string from fields specified * pdf_form_url (default: None): just the url for the form. * fdf_data_strings (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed as a UTF-16 encoded string, unless True/False, in which case it is assumed to be a checkbox (and passes names, '/Yes' (by default) or '/Off'). * fdf_data_names (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed to FDF as a name, '/value' * fields_hidden (default: []): list of field names that should be set hidden. * fields_readonly (default: []): list of field names that should be set readonly. * checkbox_checked_value (default: "Yes"): By default means a checked checkboxes gets passed the value "/Yes". You may find that the default does not work with your PDF, in which case you might want to try "On". The result is a string suitable for writing to a .fdf file. """ fdf = [b'%FDF-1.2\x0a%\xe2\xe3\xcf\xd3\x0d\x0a'] fdf.append(b'1 0 obj\x0a<</FDF') fdf.append(b'<</Fields[') fdf.append(b''.join(handle_data_strings(fdf_data_strings, fields_hidden, fields_readonly, checkbox_checked_name))) fdf.append(b''.join(handle_data_names(fdf_data_names, fields_hidden, fields_readonly))) if pdf_form_url: fdf.append(b''.join(b'/F (', smart_encode_str(pdf_form_url), b')\x0a')) fdf.append(b']\x0a') fdf.append(b'>>\x0a') fdf.append(b'>>\x0aendobj\x0a') fdf.append(b'trailer\x0a\x0a<<\x0a/Root 1 0 R\x0a>>\x0a') fdf.append(b'%%EOF\x0a\x0a') return b''.join(fdf)
Generates fdf string from fields specified * pdf_form_url (default: None): just the url for the form. * fdf_data_strings (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed as a UTF-16 encoded string, unless True/False, in which case it is assumed to be a checkbox (and passes names, '/Yes' (by default) or '/Off'). * fdf_data_names (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed to FDF as a name, '/value' * fields_hidden (default: []): list of field names that should be set hidden. * fields_readonly (default: []): list of field names that should be set readonly. * checkbox_checked_value (default: "Yes"): By default means a checked checkboxes gets passed the value "/Yes". You may find that the default does not work with your PDF, in which case you might want to try "On". The result is a string suitable for writing to a .fdf file.
entailment