sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def get_expanded_schema(self, schema_name): """ Return a schema file with all $ref properties expanded """ if schema_name not in self.expanded_schemas: fn = self.get_schema_file(schema_name) schemas_folder = self.get_schemas_folder() base_uri = self.get_schema_path(schemas_folder) with open(fn) as f: jsn_schema = jsonref.load(f, base_uri=base_uri) # cache the schema for future use self.expanded_schemas[schema_name] = jsn_schema else: jsn_schema = self.expanded_schemas[schema_name] return jsn_schema
Return a schema file with all $ref properties expanded
entailment
def output(s): """ Parse, transform, and pretty print the result """ p = Parser() t = ExpressionsTransformer() ast = p.parse(s) logging.debug(ast.pretty()) print(ast.pretty()) d = t.transform(ast) print(json.dumps(d, indent=4)) return d
Parse, transform, and pretty print the result
entailment
def comparison(self, t): """ <PropertyIsEqualTo> <PropertyName>NAME</PropertyName> <Literal>Sydney</Literal> </PropertyIsEqualTo> """ assert(len(t) == 3) d = {"PropertyIsEqualTo": [ t[0], t[1], t[2] ]} #parts = [str(p.value) for p in t] #v = " ".join(parts) #v = "( {} )".format(v) #t[0].value = v #return t[0] return d
<PropertyIsEqualTo> <PropertyName>NAME</PropertyName> <Literal>Sydney</Literal> </PropertyIsEqualTo>
entailment
def main(ctx, verbose, quiet): """ Execute the main mappyfile command """ verbosity = verbose - quiet configure_logging(verbosity) ctx.obj = {} ctx.obj['verbosity'] = verbosity
Execute the main mappyfile command
entailment
def format(ctx, input_mapfile, output_mapfile, indent, spacer, quote, newlinechar, expand, comments): """ Format a the input-mapfile and save as output-mapfile. Note output-mapfile will be overwritten if it already exists. Example of formatting a single Mapfile: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map Example of formatting a single Mapfile with single quotes and tabs for indentation: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map --quote=\\' --indent=1 --spacer=\t Example of formatting a single Mapfile without expanding includes, but including comments: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map --no-expand --comments """ quote = codecs.decode(quote, 'unicode_escape') # ensure \t is handled as a tab spacer = codecs.decode(spacer, 'unicode_escape') # ensure \t is handled as a tab newlinechar = codecs.decode(newlinechar, 'unicode_escape') # ensure \n is handled as a newline d = mappyfile.open(input_mapfile, expand_includes=expand, include_comments=comments, include_position=True) mappyfile.save(d, output_mapfile, indent=indent, spacer=spacer, quote=quote, newlinechar=newlinechar) sys.exit(0)
Format a the input-mapfile and save as output-mapfile. Note output-mapfile will be overwritten if it already exists. Example of formatting a single Mapfile: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map Example of formatting a single Mapfile with single quotes and tabs for indentation: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map --quote=\\' --indent=1 --spacer=\t Example of formatting a single Mapfile without expanding includes, but including comments: mappyfile format C:/Temp/valid.map C:/Temp/valid_formatted.map --no-expand --comments
entailment
def validate(ctx, mapfiles, expand): """ Validate Mapfile(s) against the Mapfile schema The MAPFILES argument is a list of paths, either to individual Mapfiles, or a folders containing Mapfiles. Wildcards are supported (natively on Linux, and up to one level deep on Windows). Validation errors are reported to the console. The program returns the error count - this will be 0 if no validation errors are encountered. Example of validating a single Mapfile: mappyfile validate C:/Temp/valid.map Example of validating two folders containing Mapfiles, without expanding INCLUDES: mappyfile validate C:/Temp/*.map D:/GitHub/mappyfile/tests/mapfiles/*.map --no-expand """ all_mapfiles = get_mapfiles(mapfiles) if len(all_mapfiles) == 0: click.echo("No Mapfiles found at the following paths: {}".format(",".join(mapfiles))) return validation_count = 0 errors = 0 for fn in all_mapfiles: fn = click.format_filename(fn) d = mappyfile.open(fn, expand_includes=expand, include_position=True) validation_messages = mappyfile.validate(d) if validation_messages: for v in validation_messages: v["fn"] = fn msg = "{fn} (Line: {line} Column: {column}) {message} - {error}".format(**v) click.echo(msg) errors += 1 else: click.echo("{} validated successfully".format(fn)) validation_count += 1 click.echo("{} file(s) validated ({} successfully)".format(len(all_mapfiles), validation_count)) sys.exit(errors)
Validate Mapfile(s) against the Mapfile schema The MAPFILES argument is a list of paths, either to individual Mapfiles, or a folders containing Mapfiles. Wildcards are supported (natively on Linux, and up to one level deep on Windows). Validation errors are reported to the console. The program returns the error count - this will be 0 if no validation errors are encountered. Example of validating a single Mapfile: mappyfile validate C:/Temp/valid.map Example of validating two folders containing Mapfiles, without expanding INCLUDES: mappyfile validate C:/Temp/*.map D:/GitHub/mappyfile/tests/mapfiles/*.map --no-expand
entailment
def get_keyword(text): """ Accept a string such as BACKGROUNDCOLOR [r] [g] [b] and return backgroundcolor """ first_word = text.split(" ")[0] if len(first_word) > 1 and first_word.isupper(): kwd = str(first_word.lower()) else: kwd = None return kwd
Accept a string such as BACKGROUNDCOLOR [r] [g] [b] and return backgroundcolor
entailment
def get_values(text): """ Accept a string such as BACKGROUNDCOLOR [r] [g] [b] and return ['r', 'g', 'b'] """ res = re.findall(r"\[(.*?)\]", text) values = [] for r in res: if "|" in r: params = r.split("|") for p in params: values.append(p) else: values.append(r) values = [str(v.lower()) for v in values] return values
Accept a string such as BACKGROUNDCOLOR [r] [g] [b] and return ['r', 'g', 'b']
entailment
def process_doc(text): """ The :ref: role is supported by Sphinx but not by plain docutils """ # remove :ref: directives document = docutils.core.publish_doctree(text) # http://epydoc.sourceforge.net/docutils/private/docutils.nodes.document-class.html visitor = RefVisitor(document) document.walk(visitor) return visitor.kwd, visitor.values
The :ref: role is supported by Sphinx but not by plain docutils
entailment
def open(fn, expand_includes=True, include_comments=False, include_position=False, **kwargs): """ Load a Mapfile from the supplied filename into a Python dictionary. Parameters ---------- fn: string The path to the Mapfile, or partial Mapfile expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a filename and return it as a dictionary object:: d = mappyfile.open('mymap.map') Notes ----- Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object. """ p = Parser(expand_includes=expand_includes, include_comments=include_comments, **kwargs) ast = p.parse_file(fn) m = MapfileToDict(include_position=include_position, include_comments=include_comments, **kwargs) d = m.transform(ast) return d
Load a Mapfile from the supplied filename into a Python dictionary. Parameters ---------- fn: string The path to the Mapfile, or partial Mapfile expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a filename and return it as a dictionary object:: d = mappyfile.open('mymap.map') Notes ----- Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object.
entailment
def load(fp, expand_includes=True, include_position=False, include_comments=False, **kwargs): """ Load a Mapfile from an open file or file-like object. Parameters ---------- fp: file A file-like object - as with all Mapfiles this should be encoded in "utf-8" expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a file and return it as a dictionary object:: with open('mymap.map') as fp: d = mappyfile.load(fp) Notes ----- Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object. """ p = Parser(expand_includes=expand_includes, include_comments=include_comments, **kwargs) ast = p.load(fp) m = MapfileToDict(include_position=include_position, include_comments=include_comments, **kwargs) d = m.transform(ast) return d
Load a Mapfile from an open file or file-like object. Parameters ---------- fp: file A file-like object - as with all Mapfiles this should be encoded in "utf-8" expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a file and return it as a dictionary object:: with open('mymap.map') as fp: d = mappyfile.load(fp) Notes ----- Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object.
entailment
def loads(s, expand_includes=True, include_position=False, include_comments=False, **kwargs): """ Load a Mapfile from a string Parameters ---------- s: string The Mapfile, or partial Mapfile, text expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a string and return it as a dictionary object:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) assert d["name"] == "TEST" """ p = Parser(expand_includes=expand_includes, include_comments=include_comments, **kwargs) ast = p.parse(s) m = MapfileToDict(include_position=include_position, include_comments=include_comments, **kwargs) d = m.transform(ast) return d
Load a Mapfile from a string Parameters ---------- s: string The Mapfile, or partial Mapfile, text expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a string and return it as a dictionary object:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) assert d["name"] == "TEST"
entailment
def dump(d, fp, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False): """ Write d (the Mapfile dictionary) as a formatted stream to fp Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema fp: file A file-like object indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Example ------- To open a Mapfile from a string, and then dump it back out to an open file, using 2 spaces for indentation, and single-quotes for properties:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) with open(fn, "w") as f: mappyfile.dump(d, f, indent=2, quote="'") """ map_string = _pprint(d, indent, spacer, quote, newlinechar, end_comment) fp.write(map_string)
Write d (the Mapfile dictionary) as a formatted stream to fp Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema fp: file A file-like object indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Example ------- To open a Mapfile from a string, and then dump it back out to an open file, using 2 spaces for indentation, and single-quotes for properties:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) with open(fn, "w") as f: mappyfile.dump(d, f, indent=2, quote="'")
entailment
def save(d, output_file, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False, **kwargs): """ Write a dictionary to an output Mapfile on disk Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema output_file: string The output filename indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Returns ------- string The output_file passed into the function Example ------- To open a Mapfile from a string, and then save it to a file:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) fn = "C:/Data/mymap.map" mappyfile.save(d, fn) """ map_string = _pprint(d, indent, spacer, quote, newlinechar, end_comment) _save(output_file, map_string) return output_file
Write a dictionary to an output Mapfile on disk Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema output_file: string The output filename indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Returns ------- string The output_file passed into the function Example ------- To open a Mapfile from a string, and then save it to a file:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) fn = "C:/Data/mymap.map" mappyfile.save(d, fn)
entailment
def dumps(d, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False, **kwargs): """ Output a Mapfile dictionary as a string Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Returns ------- string The Mapfile as a string Example ------- To open a Mapfile from a string, and then print it back out as a string using tabs:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) print(mappyfile.dumps(d, indent=1, spacer="\\t")) """ return _pprint(d, indent, spacer, quote, newlinechar, end_comment, **kwargs)
Output a Mapfile dictionary as a string Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Returns ------- string The Mapfile as a string Example ------- To open a Mapfile from a string, and then print it back out as a string using tabs:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) print(mappyfile.dumps(d, indent=1, spacer="\\t"))
entailment
def find(lst, key, value): """ Find an item in a list of dicts using a key and a value Parameters ---------- list: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list key: value The value to search for Returns ------- dict The first composite dictionary object with a key that matches the value Example ------- To find the ``LAYER`` in a list of layers with ``NAME`` set to ``Layer2``:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON END LAYER NAME "Layer2" TYPE POLYGON CLASS NAME "Class1" COLOR 0 0 -8 END END END ''' d = mappyfile.loads(s) cmp = mappyfile.find(d["layers"], "name", "Layer2") assert cmp["name"] == "Layer2" """ return next((item for item in lst if item[key.lower()] == value), None)
Find an item in a list of dicts using a key and a value Parameters ---------- list: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list key: value The value to search for Returns ------- dict The first composite dictionary object with a key that matches the value Example ------- To find the ``LAYER`` in a list of layers with ``NAME`` set to ``Layer2``:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON END LAYER NAME "Layer2" TYPE POLYGON CLASS NAME "Class1" COLOR 0 0 -8 END END END ''' d = mappyfile.loads(s) cmp = mappyfile.find(d["layers"], "name", "Layer2") assert cmp["name"] == "Layer2"
entailment
def findall(lst, key, value): """ Find all items in lst where key matches value. For example find all ``LAYER`` s in a ``MAP`` where ``GROUP`` equals ``VALUE`` Parameters ---------- list: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list key: value The value to search for Returns ------- list A Python list containing the matching composite dictionaries Example ------- To find all ``LAYER`` s with ``GROUP`` set to ``test``:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON GROUP "test" END LAYER NAME "Layer2" TYPE POLYGON GROUP "test1" END LAYER NAME "Layer3" TYPE POLYGON GROUP "test2" END LAYER NAME "Layer4" TYPE POLYGON GROUP "test" END END ''' d = mappyfile.loads(s) layers = mappyfile.findall(d["layers"], "group", "test") assert len(layers) == 2 """ return [item for item in lst if item[key.lower()] in value]
Find all items in lst where key matches value. For example find all ``LAYER`` s in a ``MAP`` where ``GROUP`` equals ``VALUE`` Parameters ---------- list: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list key: value The value to search for Returns ------- list A Python list containing the matching composite dictionaries Example ------- To find all ``LAYER`` s with ``GROUP`` set to ``test``:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON GROUP "test" END LAYER NAME "Layer2" TYPE POLYGON GROUP "test1" END LAYER NAME "Layer3" TYPE POLYGON GROUP "test2" END LAYER NAME "Layer4" TYPE POLYGON GROUP "test" END END ''' d = mappyfile.loads(s) layers = mappyfile.findall(d["layers"], "group", "test") assert len(layers) == 2
entailment
def findunique(lst, key): """ Find all unique key values for items in lst. Parameters ---------- lst: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list Returns ------- list A sorted Python list of unique keys in the list Example ------- To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``:: s = ''' LAYER CLASS GROUP "group1" NAME "Class1" COLOR 0 0 0 END CLASS GROUP "group2" NAME "Class2" COLOR 0 0 0 END CLASS GROUP "group1" NAME "Class3" COLOR 0 0 0 END END ''' d = mappyfile.loads(s) groups = mappyfile.findunique(d["classes"], "group") assert groups == ["group1", "group2"] """ return sorted(set([item[key.lower()] for item in lst]))
Find all unique key values for items in lst. Parameters ---------- lst: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list Returns ------- list A sorted Python list of unique keys in the list Example ------- To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``:: s = ''' LAYER CLASS GROUP "group1" NAME "Class1" COLOR 0 0 0 END CLASS GROUP "group2" NAME "Class2" COLOR 0 0 0 END CLASS GROUP "group1" NAME "Class3" COLOR 0 0 0 END END ''' d = mappyfile.loads(s) groups = mappyfile.findunique(d["classes"], "group") assert groups == ["group1", "group2"]
entailment
def findkey(d, *keys): """ Get a value from a dictionary based on a list of keys and/or list indexes. Parameters ---------- d: dict A Python dictionary keys: list A list of key names, or list indexes Returns ------- dict The composite dictionary object at the path specified by the keys Example ------- To return the value of the first class of the first layer in a Mapfile:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON CLASS NAME "Class1" COLOR 0 0 255 END END END ''' d = mappyfile.loads(s) pth = ["layers", 0, "classes", 0] cls1 = mappyfile.findkey(d, *pth) assert cls1["name"] == "Class1" """ if keys: keys = list(keys) key = keys.pop(0) return findkey(d[key], *keys) else: return d
Get a value from a dictionary based on a list of keys and/or list indexes. Parameters ---------- d: dict A Python dictionary keys: list A list of key names, or list indexes Returns ------- dict The composite dictionary object at the path specified by the keys Example ------- To return the value of the first class of the first layer in a Mapfile:: s = ''' MAP LAYER NAME "Layer1" TYPE POLYGON CLASS NAME "Class1" COLOR 0 0 255 END END END ''' d = mappyfile.loads(s) pth = ["layers", 0, "classes", 0] cls1 = mappyfile.findkey(d, *pth) assert cls1["name"] == "Class1"
entailment
def update(d1, d2): """ Update dict d1 with properties from d2 Note ---- Allows deletion of objects with a special ``__delete__`` key For any list of dicts new items can be added when updating Parameters ---------- d1: dict A Python dictionary d2: dict A Python dictionary that will be used to update any keys with the same name in d1 Returns ------- dict The updated dictionary """ NoneType = type(None) if d2.get("__delete__", False): return {} for k, v in d2.items(): if isinstance(v, dict): if v.get("__delete__", False): # allow a __delete__ property to be set to delete objects del d1[k] else: d1[k] = update(d1.get(k, {}), v) elif isinstance(v, (tuple, list)) and all(isinstance(li, (NoneType, dict)) for li in v): # a list of dicts and/or NoneType orig_list = d1.get(k, []) new_list = [] pairs = list(zip_longest(orig_list, v, fillvalue=None)) for orig_item, new_item in pairs: if orig_item is None: orig_item = {} # can't use {} for fillvalue as only one dict created/modified! if new_item is None: new_item = {} if new_item.get("__delete__", False): d = None # orig_list.remove(orig_item) # remove the item to delete else: d = update(orig_item, new_item) if d is not None: new_list.append(d) d1[k] = new_list else: if k in d1 and v == "__delete__": del d1[k] else: d1[k] = v return d1
Update dict d1 with properties from d2 Note ---- Allows deletion of objects with a special ``__delete__`` key For any list of dicts new items can be added when updating Parameters ---------- d1: dict A Python dictionary d2: dict A Python dictionary that will be used to update any keys with the same name in d1 Returns ------- dict The updated dictionary
entailment
def erosion(mapfile, dilated): """ We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it """ ll = mappyfile.find(mapfile["layers"], "name", "line") ll["status"] = "OFF" pl = mappyfile.find(mapfile["layers"], "name", "polygon") # make a deep copy of the polygon layer in the Map # so any modification are made to this layer only pl2 = deepcopy(pl) pl2["name"] = "newpolygon" mapfile["layers"].append(pl2) dilated = dilated.buffer(-0.3) pl2["features"][0]["wkt"] = dilated.wkt style = pl["classes"][0]["styles"][0] style["color"] = "#999999" style["outlinecolor"] = "#b2b2b2"
We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it
entailment
def _decode_response(response): """Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code. """ content_type = response.headers.get('content-type', '') logger.debug("status[%s] content_type[%s] encoding[%s]" % (response.status_code, content_type, response.encoding)) response.raise_for_status() content = response.content.strip() if response.encoding: content = content.decode(response.encoding) if not content: logger.debug("no content in response") return content if content_type.split(';')[0] != 'application/json': return content if content.startswith(GERRIT_MAGIC_JSON_PREFIX): content = content[len(GERRIT_MAGIC_JSON_PREFIX):] try: return json.loads(content) except ValueError: logger.error('Invalid json content: %s', content) raise
Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code.
entailment
def translate_kwargs(self, **kwargs): """Translate kwargs replacing `data` with `json` if necessary.""" local_kwargs = self.kwargs.copy() local_kwargs.update(kwargs) if "data" in local_kwargs and "json" in local_kwargs: raise ValueError("Cannot use data and json together") if "data" in local_kwargs and isinstance(local_kwargs["data"], dict): local_kwargs.update({"json": local_kwargs["data"]}) del local_kwargs["data"] headers = DEFAULT_HEADERS.copy() if "headers" in kwargs: headers.update(kwargs["headers"]) if "json" in local_kwargs: headers.update({"Content-Type": "application/json;charset=UTF-8"}) local_kwargs.update({"headers": headers}) return local_kwargs
Translate kwargs replacing `data` with `json` if necessary.
entailment
def post(self, endpoint, return_response=False, **kwargs): """Send HTTP POST to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ args = self.translate_kwargs(**kwargs) response = self.session.post(self.make_url(endpoint), **args) decoded_response = _decode_response(response) if return_response: return decoded_response, response return decoded_response
Send HTTP POST to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
entailment
def escape_string(string): """Escape a string for use in Gerrit commands. :arg str string: The string to escape. :returns: The string with necessary escapes and surrounding double quotes so that it can be passed to any of the Gerrit commands that require double-quoted strings. """ result = string result = result.replace('\\', '\\\\') result = result.replace('"', '\\"') return '"' + result + '"'
Escape a string for use in Gerrit commands. :arg str string: The string to escape. :returns: The string with necessary escapes and surrounding double quotes so that it can be passed to any of the Gerrit commands that require double-quoted strings.
entailment
def append(self, data): """Append the given `data` to the output. :arg data: If a list, it is formatted as a bullet list with each entry in the list being a separate bullet. Otherwise if it is a string, the string is added as a paragraph. :raises: ValueError if `data` is not a list or a string. """ if not data: return if isinstance(data, list): # First we need to clean up the data. # # Gerrit creates new bullet items when it gets newline characters # within a bullet list paragraph, so unless we remove the newlines # from the texts the resulting bullet list will contain multiple # bullets and look crappy. # # We add the '*' character on the beginning of each bullet text in # the next step, so we strip off any existing leading '*' that the # caller has added, and then strip off any leading or trailing # whitespace. _items = [x.replace("\n", " ").strip().lstrip('*').strip() for x in data] # Create the bullet list only with the items that still have any # text in them after cleaning up. _paragraph = "\n".join(["* %s" % x for x in _items if x]) if _paragraph: self.paragraphs.append(_paragraph) elif isinstance(data, str): _paragraph = data.strip() if _paragraph: self.paragraphs.append(_paragraph) else: raise ValueError('Data must be a list or a string')
Append the given `data` to the output. :arg data: If a list, it is formatted as a bullet list with each entry in the list being a separate bullet. Otherwise if it is a string, the string is added as a paragraph. :raises: ValueError if `data` is not a list or a string.
entailment
def format(self): """Format the message parts to a string. :Returns: A string of all the message parts separated into paragraphs, with header and footer paragraphs if they were specified in the constructor. """ message = "" if self.paragraphs: if self.header: message += (self.header + '\n\n') message += "\n\n".join(self.paragraphs) if self.footer: message += ('\n\n' + self.footer) return message
Format the message parts to a string. :Returns: A string of all the message parts separated into paragraphs, with header and footer paragraphs if they were specified in the constructor.
entailment
def set_context_params(self, params): """ Set header context parameters. Refer to the top of <Zimbra Server-Root>/docs/soap.txt about specifics. The <format>-Parameter cannot be changed, because it is set by the implementing class. Should be called by implementing method to check for valid context params. :param params: A Dict containing context parameters. """ for key, value in params.items(): if key not in self.valid_context_params: raise RequestHeaderContextException( "%s is not a valid context parameter." % key )
Set header context parameters. Refer to the top of <Zimbra Server-Root>/docs/soap.txt about specifics. The <format>-Parameter cannot be changed, because it is set by the implementing class. Should be called by implementing method to check for valid context params. :param params: A Dict containing context parameters.
entailment
def enable_batch(self, onerror="continue"): """ Enables batch request gathering. Do this first and then consecutively call "add_request" to add more requests. :param onerror: "continue" (default) if one request fails (and response with soap Faults for the request) or "stop" processing. """ self.batch_request = True self.batch_request_id = 1 self._create_batch_node(onerror)
Enables batch request gathering. Do this first and then consecutively call "add_request" to add more requests. :param onerror: "continue" (default) if one request fails (and response with soap Faults for the request) or "stop" processing.
entailment
def is_fault(self): """ Checks, wether this response has at least one fault response ( supports both batch and single responses) """ if self.is_batch(): info = self.get_batch() return info['hasFault'] else: my_response = self.get_response() if list(my_response.keys())[0] == "Fault": return True return False
Checks, wether this response has at least one fault response ( supports both batch and single responses)
entailment
def _filter_response(self, response_dict): """ Add additional filters to the response dictionary Currently the response dictionary is filtered like this: * If a list only has one item, the list is replaced by that item * Namespace-Keys (_jsns and xmlns) are removed :param response_dict: the pregenerated, but unfiltered response dict :type response_dict: dict :return: The filtered dictionary :rtype: dict """ filtered_dict = {} for key, value in response_dict.items(): if key == "_jsns": continue if key == "xmlns": continue if type(value) == list and len(value) == 1: filtered_dict[key] = value[0] elif type(value) == dict and len(value.keys()) == 1 and "_content" \ in value.keys(): filtered_dict[key] = value["_content"] elif type(value) == dict: tmp_dict = self._filter_response(value) filtered_dict[key] = tmp_dict else: filtered_dict[key] = value return filtered_dict
Add additional filters to the response dictionary Currently the response dictionary is filtered like this: * If a list only has one item, the list is replaced by that item * Namespace-Keys (_jsns and xmlns) are removed :param response_dict: the pregenerated, but unfiltered response dict :type response_dict: dict :return: The filtered dictionary :rtype: dict
entailment
def create_preauth(byval, key, by='name', expires=0, timestamp=None): """ Generates a zimbra preauth value :param byval: The value of the targeted user (according to the by-parameter). For example: The account name, if "by" is "name". :param key: The domain preauth key (you can retrieve that using zmprov gd) :param by: What type is the byval-parameter? Valid parameters are "name" (default), "id" and "foreignPrincipal" :param expires: Milliseconds when the auth token expires. Defaults to 0 for default account expiration :param timestamp: Current timestamp (is calculated by default) :returns: The preauth value to be used in an AuthRequest :rtype: str """ if timestamp is None: timestamp = int(datetime.now().strftime("%s")) * 1000 pak = hmac.new( codecs.latin_1_encode(key)[0], ('%s|%s|%s|%s' % ( byval, by, expires, timestamp )).encode("utf-8"), hashlib.sha1 ).hexdigest() return pak
Generates a zimbra preauth value :param byval: The value of the targeted user (according to the by-parameter). For example: The account name, if "by" is "name". :param key: The domain preauth key (you can retrieve that using zmprov gd) :param by: What type is the byval-parameter? Valid parameters are "name" (default), "id" and "foreignPrincipal" :param expires: Milliseconds when the auth token expires. Defaults to 0 for default account expiration :param timestamp: Current timestamp (is calculated by default) :returns: The preauth value to be used in an AuthRequest :rtype: str
entailment
def zimbra_to_python(zimbra_dict, key_attribute="n", content_attribute="_content"): """ Converts single level Zimbra dicts to a standard python dict :param zimbra_dict: The dictionary in Zimbra-Format :return: A native python dict """ local_dict = {} for item in zimbra_dict: local_dict[item[key_attribute]] = item[content_attribute] return local_dict
Converts single level Zimbra dicts to a standard python dict :param zimbra_dict: The dictionary in Zimbra-Format :return: A native python dict
entailment
def get_value(haystack, needle, key_attribute="n", content_attribute="_content"): """ Fetch a value from a zimbra-like json dict (keys are "n", values are "_content" This function may be slightly faster than zimbra_to_python(haystack)[ needle], because it doesn't necessarily iterate over the complete list. :param haystack: The list in zimbra-dict format :param needle: the key to search for :return: the value or None, if the key is not found """ for value in haystack: if value[key_attribute] == needle: return value[content_attribute] return None
Fetch a value from a zimbra-like json dict (keys are "n", values are "_content" This function may be slightly faster than zimbra_to_python(haystack)[ needle], because it doesn't necessarily iterate over the complete list. :param haystack: The list in zimbra-dict format :param needle: the key to search for :return: the value or None, if the key is not found
entailment
def convert_to_str(input_string): """ Returns a string of the input compatible between py2 and py3 :param input_string: :return: """ if sys.version < '3': if isinstance(input_string, str) \ or isinstance(input_string, unicode): # pragma: no cover py3 return input_string # pragma: no cover py3 else: if isinstance(input_string, str): # pragma: no cover py3 return input_string # pragma: no cover py3 return str(input_string)
Returns a string of the input compatible between py2 and py3 :param input_string: :return:
entailment
def dict_to_dom(root_node, xml_dict): """ Create a DOM node and optionally several subnodes from a dictionary. :param root_node: DOM-Node set the dictionary is applied upon :type root_node: xml.dom.Element :param xml_dict: The dictionary containing the nodes to process :type xml_dict: dict """ if '_content' in list(xml_dict.keys()): root_node.appendChild( root_node.ownerDocument.createTextNode( convert_to_str(xml_dict['_content']) ) ) for key, value in xml_dict.items(): if key == '_content': continue if type(value) == dict: # Root node tmp_node = root_node.ownerDocument.createElement(key) dict_to_dom(tmp_node, value) root_node.appendChild(tmp_node) elif type(value) == list: for multinode in value: tmp_node = root_node.ownerDocument.createElement(key) dict_to_dom(tmp_node, multinode) root_node.appendChild(tmp_node) else: # Attributes root_node.setAttribute( key, convert_to_str(value) )
Create a DOM node and optionally several subnodes from a dictionary. :param root_node: DOM-Node set the dictionary is applied upon :type root_node: xml.dom.Element :param xml_dict: The dictionary containing the nodes to process :type xml_dict: dict
entailment
def dom_to_dict(root_node): """ Serializes the given node to the dictionary Serializes the given node to the documented dictionary format. :param root_node: Node to serialize :returns: The dictionary :rtype: dict """ # Remove namespaces from tagname tag = root_node.tagName if ":" in tag: tag = tag.split(":")[1] root_dict = { tag: {} } node_dict = root_dict[tag] # Set attributes if root_node.hasAttributes(): for key in list(root_node.attributes.keys()): node_dict[key] = root_node.getAttribute(key) # Check out child nodes for child in root_node.childNodes: if child.nodeType == root_node.TEXT_NODE: # This is the content node_dict['_content'] = child.data else: subnode_dict = dom_to_dict(child) child_tag = child.tagName if ":" in child_tag: child_tag = child_tag.split(":")[1] new_val = subnode_dict[child_tag] # If we have several child with same name, put them in a list. if child_tag in node_dict: prev_val = node_dict[child_tag] if type(prev_val) != list: node_dict[child_tag] = [prev_val] node_dict[child_tag].append(new_val) else: node_dict[child_tag] = new_val return root_dict
Serializes the given node to the dictionary Serializes the given node to the documented dictionary format. :param root_node: Node to serialize :returns: The dictionary :rtype: dict
entailment
def connect(self): """Overrides HTTPSConnection.connect to specify TLS version""" # Standard implementation from HTTPSConnection, which is not # designed for extension, unfortunately sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) if getattr(self, '_tunnel_host', None): self.sock = sock # pragma: no cover self._tunnel() # pragma: no cover # This is the only difference; default wrap_socket uses SSLv23 self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1_2)
Overrides HTTPSConnection.connect to specify TLS version
entailment
def gen_request(self, request_type="json", token=None, set_batch=False, batch_onerror=None): """ Convenience method to quickly generate a token :param request_type: Type of request (defaults to json) :param token: Authentication token :param set_batch: Also set this request to batch mode? :param batch_onerror: Onerror-parameter for batch mode :return: The request """ if request_type == "json": local_request = RequestJson() elif request_type == "xml": local_request = RequestXml() else: raise UnknownRequestType() if token is not None: local_request.set_auth_token(token) if set_batch: local_request.enable_batch(batch_onerror) return local_request
Convenience method to quickly generate a token :param request_type: Type of request (defaults to json) :param token: Authentication token :param set_batch: Also set this request to batch mode? :param batch_onerror: Onerror-parameter for batch mode :return: The request
entailment
def send_request(self, request, response=None): """ Send the request. Sends the request and retrieves the results, formats them and returns them in a dict or a list (when it's a batchresponse). If something goes wrong, raises a SoapFailure or a HTTPError on system-side failures. Note: AuthRequest raises an HTTPError on failed authentications! :param request: The request to send :type request: pythonzimbra.request.Request :param response: A prebuilt response object :type response: pythonzimbra.response.Response :raises: pythonzimbra.exceptions.communication.SoapFailure or urllib2.HTTPError """ local_response = None if response is None: if request.request_type == "json": local_response = ResponseJson() elif request.request_type == "xml": local_response = ResponseXml() else: raise UnknownRequestType() try: server_request = ur.urlopen( self.url, request.get_request().encode("utf-8"), self.timeout ) server_response = server_request.read() if isinstance(server_response, bytes): server_response = server_response.decode("utf-8") if response is None: local_response.set_response( server_response ) else: response.set_response(server_response) except ue.HTTPError as e: if e.code == 500: # 500 codes normally returns a SoapFault, that we can use server_response = e.fp.read() if isinstance(server_response, bytes): server_response = server_response.decode("utf-8") if response is None: local_response.set_response(server_response) else: response.set_response(server_response) else: raise e if response is None: return local_response
Send the request. Sends the request and retrieves the results, formats them and returns them in a dict or a list (when it's a batchresponse). If something goes wrong, raises a SoapFailure or a HTTPError on system-side failures. Note: AuthRequest raises an HTTPError on failed authentications! :param request: The request to send :type request: pythonzimbra.request.Request :param response: A prebuilt response object :type response: pythonzimbra.response.Response :raises: pythonzimbra.exceptions.communication.SoapFailure or urllib2.HTTPError
entailment
def authenticate(url, account, key, by='name', expires=0, timestamp=None, timeout=None, request_type="xml", admin_auth=False, use_password=False, raise_on_error=False): """ Authenticate to the Zimbra server :param url: URL of Zimbra SOAP service :param account: The account to be authenticated against :param key: The preauth key of the domain of the account or a password (if admin_auth or use_password is True) :param by: If the account is specified as a name, an ID or a ForeignPrincipal :param expires: When the token expires (or 0 for default expiration) :param timestamp: When the token was requested (None for "now") :param timeout: Timeout for the communication with the server. Defaults to the urllib2-default :param request_type: Which type of request to use ("xml" (default) or "json") :param admin_auth: This request should authenticate and generate an admin token. The "key"-parameter therefore holds the admin password (implies use_password) :param use_password: The "key"-parameter holds a password. Do a password- based user authentication. :param raise_on_error: Should I raise an exception when an authentication error occurs or just return None? :return: The authentication token or None :rtype: str or None or unicode """ if timestamp is None: timestamp = int(time.time()) * 1000 pak = "" if not admin_auth: pak = preauth.create_preauth(account, key, by, expires, timestamp) if request_type == 'xml': auth_request = RequestXml() else: auth_request = RequestJson() request_data = { 'account': { 'by': by, '_content': account } } ns = "urn:zimbraAccount" if admin_auth: ns = "urn:zimbraAdmin" request_data['password'] = key elif use_password: request_data['password'] = { "_content": key } else: request_data['preauth'] = { 'timestamp': timestamp, 'expires': expires, '_content': pak } auth_request.add_request( 'AuthRequest', request_data, ns ) server = Communication(url, timeout) if request_type == 'xml': response = ResponseXml() else: response = ResponseJson() server.send_request(auth_request, response) if response.is_fault(): if raise_on_error: raise AuthenticationFailed( "Cannot authenticate user: (%s) %s" % ( response.get_fault_code(), response.get_fault_message() ) ) return None return response.get_response()['AuthResponse']['authToken']
Authenticate to the Zimbra server :param url: URL of Zimbra SOAP service :param account: The account to be authenticated against :param key: The preauth key of the domain of the account or a password (if admin_auth or use_password is True) :param by: If the account is specified as a name, an ID or a ForeignPrincipal :param expires: When the token expires (or 0 for default expiration) :param timestamp: When the token was requested (None for "now") :param timeout: Timeout for the communication with the server. Defaults to the urllib2-default :param request_type: Which type of request to use ("xml" (default) or "json") :param admin_auth: This request should authenticate and generate an admin token. The "key"-parameter therefore holds the admin password (implies use_password) :param use_password: The "key"-parameter holds a password. Do a password- based user authentication. :param raise_on_error: Should I raise an exception when an authentication error occurs or just return None? :return: The authentication token or None :rtype: str or None or unicode
entailment
def read_dbf(dbf_path, index = None, cols = False, incl_index = False): """ Read a dbf file as a pandas.DataFrame, optionally selecting the index variable and which columns are to be loaded. __author__ = "Dani Arribas-Bel <darribas@asu.edu> " ... Arguments --------- dbf_path : str Path to the DBF file to be read index : str Name of the column to be used as the index of the DataFrame cols : list List with the names of the columns to be read into the DataFrame. Defaults to False, which reads the whole dbf incl_index : Boolean If True index is included in the DataFrame as a column too. Defaults to False Returns ------- df : DataFrame pandas.DataFrame object created """ db = ps.open(dbf_path) if cols: if incl_index: cols.append(index) vars_to_read = cols else: vars_to_read = db.header data = dict([(var, db.by_col(var)) for var in vars_to_read]) if index: index = db.by_col(index) db.close() return pd.DataFrame(data, index=index) else: db.close() return pd.DataFrame(data)
Read a dbf file as a pandas.DataFrame, optionally selecting the index variable and which columns are to be loaded. __author__ = "Dani Arribas-Bel <darribas@asu.edu> " ... Arguments --------- dbf_path : str Path to the DBF file to be read index : str Name of the column to be used as the index of the DataFrame cols : list List with the names of the columns to be read into the DataFrame. Defaults to False, which reads the whole dbf incl_index : Boolean If True index is included in the DataFrame as a column too. Defaults to False Returns ------- df : DataFrame pandas.DataFrame object created
entailment
def column_mask(self): """ndarray, True where column margin <= min_base_size, same shape as slice.""" margin = compress_pruned( self._slice.margin( axis=0, weighted=False, include_transforms_for_dims=self._hs_dims, prune=self._prune, ) ) mask = margin < self._size if margin.shape == self._shape: # If margin shape is the same as slice's (such as in a col margin for # MR x CAT), don't broadcast the mask to the array shape, since # they're already the same. return mask # If the row margin is a row vector - broadcast it's mask to the array shape return np.logical_or(np.zeros(self._shape, dtype=bool), mask)
ndarray, True where column margin <= min_base_size, same shape as slice.
entailment
def table_mask(self): """ndarray, True where table margin <= min_base_size, same shape as slice.""" margin = compress_pruned( self._slice.margin( axis=None, weighted=False, include_transforms_for_dims=self._hs_dims, prune=self._prune, ) ) mask = margin < self._size if margin.shape == self._shape: return mask if self._slice.dim_types[0] == DT.MR: # If the margin is a column vector - broadcast it's mask to the array shape return np.logical_or(np.zeros(self._shape, dtype=bool), mask[:, None]) return np.logical_or(np.zeros(self._shape, dtype=bool), mask)
ndarray, True where table margin <= min_base_size, same shape as slice.
entailment
def values(self): """list of _ColumnPairwiseSignificance tests. Result has as many elements as there are coliumns in the slice. Each significance test contains `p_vals` and `t_stats` significance tests. """ # TODO: Figure out how to intersperse pairwise objects for columns # that represent H&S return [ _ColumnPairwiseSignificance( self._slice, col_idx, self._axis, self._weighted, self._alpha, self._only_larger, self._hs_dims, ) for col_idx in range(self._slice.get_shape(hs_dims=self._hs_dims)[1]) ]
list of _ColumnPairwiseSignificance tests. Result has as many elements as there are coliumns in the slice. Each significance test contains `p_vals` and `t_stats` significance tests.
entailment
def pairwise_indices(self): """ndarray containing tuples of pairwise indices.""" return np.array([sig.pairwise_indices for sig in self.values]).T
ndarray containing tuples of pairwise indices.
entailment
def summary_pairwise_indices(self): """ndarray containing tuples of pairwise indices for the column summary.""" summary_pairwise_indices = np.empty( self.values[0].t_stats.shape[1], dtype=object ) summary_pairwise_indices[:] = [ sig.summary_pairwise_indices for sig in self.values ] return summary_pairwise_indices
ndarray containing tuples of pairwise indices for the column summary.
entailment
def score(self): """ Calculate and return a heuristic score for this Parser against the provided script source and path. This is used to order the ArgumentParsers as "most likely to work" against a given script/source file. Each parser has a calculate_score() function that returns a list of booleans representing the matches against conditions. This is converted into a % match and used to sort parse engines. :return: float """ if self._heuristic_score is None: matches = self.heuristic() self._heuristic_score = float(sum(matches)) / float(len(matches)) return self._heuristic_score
Calculate and return a heuristic score for this Parser against the provided script source and path. This is used to order the ArgumentParsers as "most likely to work" against a given script/source file. Each parser has a calculate_score() function that returns a list of booleans representing the matches against conditions. This is converted into a % match and used to sort parse engines. :return: float
entailment
def reset(self): """ Reset the calibration to it initial state """ simulation = self.survey_scenario.simulation holder = simulation.get_holder(self.weight_name) holder.array = numpy.array(self.initial_weight, dtype = holder.variable.dtype)
Reset the calibration to it initial state
entailment
def _set_survey_scenario(self, survey_scenario): """ Set survey scenario :param survey_scenario: the survey scenario """ self.survey_scenario = survey_scenario # TODO deal with baseline if reform is present if survey_scenario.simulation is None: survey_scenario.simulation = survey_scenario.new_simulation() period = self.period self.filter_by = filter_by = survey_scenario.calculate_variable( variable = self.filter_by_name, period = period) # TODO: shoud not be france specific self.weight_name = weight_name = self.survey_scenario.weight_column_name_by_entity['menage'] self.initial_weight_name = weight_name + "_ini" self.initial_weight = initial_weight = survey_scenario.calculate_variable( variable = weight_name, period = period) self.initial_total_population = sum(initial_weight * filter_by) self.weight = survey_scenario.calculate_variable(variable = weight_name, period = period)
Set survey scenario :param survey_scenario: the survey scenario
entailment
def set_parameters(self, parameter, value): """ Set parameters value :param parameter: the parameter to be set :param value: the valeu used to set the parameter """ if parameter == 'lo': self.parameters['lo'] = 1 / value else: self.parameters[parameter] = value
Set parameters value :param parameter: the parameter to be set :param value: the valeu used to set the parameter
entailment
def _build_calmar_data(self): """ Builds the data dictionnary used as calmar input argument """ # Select only filtered entities assert self.initial_weight_name is not None data = pd.DataFrame() data[self.initial_weight_name] = self.initial_weight * self.filter_by for variable in self.margins_by_variable: if variable == 'total_population': continue assert variable in self.survey_scenario.tax_benefit_system.variables period = self.period data[variable] = self.survey_scenario.calculate_variable(variable = variable, period = period) return data
Builds the data dictionnary used as calmar input argument
entailment
def _update_weights(self, margins, parameters = {}): """ Run calmar, stores new weights and returns adjusted margins """ data = self._build_calmar_data() assert self.initial_weight_name is not None parameters['initial_weight'] = self.initial_weight_name val_pondfin, lambdasol, updated_margins = calmar( data, margins, **parameters) # Updating only afetr filtering weights self.weight = val_pondfin * self.filter_by + self.weight * (logical_not(self.filter_by)) return updated_margins
Run calmar, stores new weights and returns adjusted margins
entailment
def set_calibrated_weights(self): """ Modify the weights to use the calibrated weights """ period = self.period survey_scenario = self.survey_scenario assert survey_scenario.simulation is not None for simulation in [survey_scenario.simulation, survey_scenario.baseline_simulation]: if simulation is None: continue simulation.set_input(self.weight_name, period, self.weight)
Modify the weights to use the calibrated weights
entailment
def get_parameter_action(action): """ To foster a general schema that can accomodate multiple parsers, the general behavior here is described rather than the specific language of a given parser. For instance, the 'append' action of an argument is collapsing each argument given to a single argument. It also returns a set of actions as well, since presumably some actions can impact multiple parameter options """ actions = set() if isinstance(action, argparse._AppendAction): actions.add(SPECIFY_EVERY_PARAM) return actions
To foster a general schema that can accomodate multiple parsers, the general behavior here is described rather than the specific language of a given parser. For instance, the 'append' action of an argument is collapsing each argument given to a single argument. It also returns a set of actions as well, since presumably some actions can impact multiple parameter options
entailment
def nnd_hotdeck_using_feather(receiver = None, donor = None, matching_variables = None, z_variables = None): """ Not working """ import feather assert receiver is not None and donor is not None assert matching_variables is not None temporary_directory_path = os.path.join(config_files_directory, 'tmp') assert os.path.exists(temporary_directory_path) receiver_path = os.path.join(temporary_directory_path, 'receiver.feather') donor_path = os.path.join(temporary_directory_path, 'donor.feather') feather.write_dataframe(receiver, receiver_path) feather.write_dataframe(donor, donor_path) if isinstance(matching_variables, str): match_vars = '"{}"'.format(matching_variables) elif len(matching_variables) == 1: match_vars = '"{}"'.format(matching_variables[0]) else: match_vars = '"{}"'.format('todo') r_script = """ rm(list=ls()) gc() devtools::install_github("wesm/feather/R") library(feather) library(StatMatch) receiver <- read_feather({receiver_path}) donor <- read_feather({donor_path}) summary(receiver) summary(donor) # variables receiver = as.data.frame(receiver) donor = as.data.frame(donor) gc() match_vars = {match_vars} # don_class = c("sexe") out.nnd <- NND.hotdeck( data.rec = receiver, data.don = donor, match.vars = match_vars ) # out.nndsummary(out.nnd$mtc.ids) # head(out.nnd$mtc.ids, 10) # head(receiver, 10) fused.nnd.m <- create.fused( data.rec = receiver, data.don = donor, mtc.ids = out.nnd$mtc.ids, z.vars = "{z_variables}" ) summary(fused.nnd.m) """.format( receiver_path = receiver_path, donor_path = donor_path, match_vars = match_vars, z_variables = z_variables, ) print(r_script)
Not working
entailment
def wishart_pfaffian(self): """ndarray of wishart pfaffian CDF, before normalization""" return np.array( [Pfaffian(self, val).value for i, val in np.ndenumerate(self._chisq)] ).reshape(self._chisq.shape)
ndarray of wishart pfaffian CDF, before normalization
entailment
def other_ind(self): """last row or column of square A""" return np.full(self.n_min, self.size - 1, dtype=np.int)
last row or column of square A
entailment
def K(self): """Normalizing constant for wishart CDF.""" K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min) K1 /= ( np.float_power(2, 0.5 * self.n_min * self._n_max) * self._mgamma(0.5 * self._n_max, self.n_min) * self._mgamma(0.5 * self.n_min, self.n_min) ) K2 = np.float_power( 2, self.alpha * self.size + 0.5 * self.size * (self.size + 1) ) for i in xrange(self.size): K2 *= gamma(self.alpha + i + 1) return K1 * K2
Normalizing constant for wishart CDF.
entailment
def value(self): """return float Cumulative Distribution Function. The return value represents a floating point number of the CDF of the largest eigenvalue of a Wishart(n, p) evaluated at chisq_val. """ wishart = self._wishart_cdf # Prepare variables for integration algorithm A = self.A p = self._gammainc_a g = gamma(wishart.alpha_vec) q_ind = np.arange(2 * wishart.n_min - 2) q_vec = 2 * wishart.alpha + q_ind + 2 q = np.float_power(0.5, q_vec) * gamma(q_vec) * gammainc(q_vec, self._chisq_val) # Perform integration (i.e. calculate Pfaffian CDF) for i in xrange(wishart.n_min): # TODO consider index tricks instead of iteration here b = 0.5 * p[i] * p[i] for j in xrange(i, wishart.n_min - 1): b -= q[i + j] / (g[i] * g[j + 1]) A[j + 1, i] = p[i] * p[j + 1] - 2 * b A[i, j + 1] = -A[j + 1, i] if np.any(np.isnan(A)): return 0 return np.sqrt(det(A))
return float Cumulative Distribution Function. The return value represents a floating point number of the CDF of the largest eigenvalue of a Wishart(n, p) evaluated at chisq_val.
entailment
def A(self): """ndarray - a skew-symmetric matrix for integrating the target distribution""" wishart = self._wishart_cdf base = np.zeros([wishart.size, wishart.size]) if wishart.n_min % 2: # If matrix has odd number of elements, we need to append a # row and a col, in order for the pfaffian algorithm to work base = self._make_size_even(base) return base
ndarray - a skew-symmetric matrix for integrating the target distribution
entailment
def data(cls, cube, weighted, prune): """Return ndarray representing table index by margin.""" return cls()._data(cube, weighted, prune)
Return ndarray representing table index by margin.
entailment
def _data(self, cube, weighted, prune): """ndarray representing table index by margin.""" result = [] for slice_ in cube.slices: if cube.has_mr: return self._mr_index(cube, weighted, prune) num = slice_.margin(axis=0, weighted=weighted, prune=prune) den = slice_.margin(weighted=weighted, prune=prune) margin = num / den proportions = slice_.proportions(axis=1, weighted=weighted, prune=prune) result.append(proportions / margin) if len(result) == 1 and cube.ndim < 3: result = result[0] else: if prune: mask = np.array([slice_.mask for slice_ in result]) result = np.ma.masked_array(result, mask) else: result = np.array(result) return result
ndarray representing table index by margin.
entailment
def gini(values, weights = None, bin_size = None): """ Gini coefficient (normalized to 1) Using fastgini formula : i=N j=i SUM W_i*(SUM W_j*X_j - W_i*X_i/2) i=1 j=1 G = 1 - 2* ---------------------------------- i=N i=N SUM W_i*X_i * SUM W_i i=1 i=1 where observations are sorted in ascending order of X. From http://fmwww.bc.edu/RePec/bocode/f/fastgini.html """ if weights is None: weights = ones(len(values)) df = pd.DataFrame({'x': values, 'w': weights}) df = df.sort_values(by='x') x = df['x'] w = df['w'] wx = w * x cdf = cumsum(wx) - 0.5 * wx numerator = (w * cdf).sum() denominator = ((wx).sum()) * (w.sum()) gini = 1 - 2 * (numerator / denominator) return gini
Gini coefficient (normalized to 1) Using fastgini formula : i=N j=i SUM W_i*(SUM W_j*X_j - W_i*X_i/2) i=1 j=1 G = 1 - 2* ---------------------------------- i=N i=N SUM W_i*X_i * SUM W_i i=1 i=1 where observations are sorted in ascending order of X. From http://fmwww.bc.edu/RePec/bocode/f/fastgini.html
entailment
def kakwani(values, ineq_axis, weights = None): """ Computes the Kakwani index """ from scipy.integrate import simps if weights is None: weights = ones(len(values)) # sign = -1 # if tax == True: # sign = -1 # else: # sign = 1 PLCx, PLCy = pseudo_lorenz(values, ineq_axis, weights) LCx, LCy = lorenz(ineq_axis, weights) del PLCx return simps((LCy - PLCy), LCx)
Computes the Kakwani index
entailment
def lorenz(values, weights = None): """ Computes Lorenz Curve coordinates """ if weights is None: weights = ones(len(values)) df = pd.DataFrame({'v': values, 'w': weights}) df = df.sort_values(by = 'v') x = cumsum(df['w']) x = x / float(x[-1:]) y = cumsum(df['v'] * df['w']) y = y / float(y[-1:]) return x, y
Computes Lorenz Curve coordinates
entailment
def pvals(cls, slice_, axis=0, weighted=True): """Wishart CDF values for slice columns as square ndarray. Wishart CDF (Cumulative Distribution Function) is calculated to determine statistical significance of slice columns, in relation to all other columns. These values represent the answer to the question "How much is a particular column different from each other column in the slice". """ return cls._factory(slice_, axis, weighted).pvals
Wishart CDF values for slice columns as square ndarray. Wishart CDF (Cumulative Distribution Function) is calculated to determine statistical significance of slice columns, in relation to all other columns. These values represent the answer to the question "How much is a particular column different from each other column in the slice".
entailment
def _chi_squared(self, proportions, margin, observed): """return ndarray of chi-squared measures for proportions' columns. *proportions* (ndarray): The basis of chi-squared calcualations *margin* (ndarray): Column margin for proportions (See `def _margin`) *observed* (ndarray): Row margin proportions (See `def _observed`) """ n = self._element_count chi_squared = np.zeros([n, n]) for i in xrange(1, n): for j in xrange(0, n - 1): denominator = 1 / margin[i] + 1 / margin[j] chi_squared[i, j] = chi_squared[j, i] = ( np.sum(np.square(proportions[:, i] - proportions[:, j]) / observed) / denominator ) return chi_squared
return ndarray of chi-squared measures for proportions' columns. *proportions* (ndarray): The basis of chi-squared calcualations *margin* (ndarray): Column margin for proportions (See `def _margin`) *observed* (ndarray): Row margin proportions (See `def _observed`)
entailment
def _pvals_from_chi_squared(self, pairwise_chisq): """return statistical significance for props' columns. *pairwise_chisq* (ndarray) Matrix of chi-squared values (bases for Wishart CDF) """ return self._intersperse_insertion_rows_and_columns( 1.0 - WishartCDF(pairwise_chisq, self._n_min, self._n_max).values )
return statistical significance for props' columns. *pairwise_chisq* (ndarray) Matrix of chi-squared values (bases for Wishart CDF)
entailment
def _factory(slice_, axis, weighted): """return subclass for PairwiseSignificance, based on slice dimension types.""" if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
return subclass for PairwiseSignificance, based on slice dimension types.
entailment
def _intersperse_insertion_rows_and_columns(self, pairwise_pvals): """Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray. Each insertion (a header or a subtotal) creates an offset in the calculated pvals. These need to be taken into account when converting each pval to a corresponding column letter. For this reason, we need to insert an all-NaN row and a column at the right indices. These are the inserted indices of each insertion, along respective dimensions. """ for i in self._insertion_indices: pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=0) pairwise_pvals = np.insert(pairwise_pvals, i, np.nan, axis=1) return pairwise_pvals
Return pvals matrix with inserted NaN rows and columns, as numpy.ndarray. Each insertion (a header or a subtotal) creates an offset in the calculated pvals. These need to be taken into account when converting each pval to a corresponding column letter. For this reason, we need to insert an all-NaN row and a column at the right indices. These are the inserted indices of each insertion, along respective dimensions.
entailment
def _opposite_axis_margin(self): """ndarray representing margin along the axis opposite of self._axis In the process of calculating p-values for the column significance testing we need both the margin along the primary axis and the percentage margin along the opposite axis. """ off_axis = 1 - self._axis return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat)
ndarray representing margin along the axis opposite of self._axis In the process of calculating p-values for the column significance testing we need both the margin along the primary axis and the percentage margin along the opposite axis.
entailment
def _proportions(self): """ndarray representing slice proportions along correct axis.""" return self._slice.proportions( axis=self._axis, include_mr_cat=self._include_mr_cat )
ndarray representing slice proportions along correct axis.
entailment
def _pairwise_chisq(self): """Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a square, symmetric matrix of test statistics for the null hypothesis that each vector along *axis* is equal to each other. """ return self._chi_squared(self._proportions, self._margin, self._observed)
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a square, symmetric matrix of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
entailment
def _pairwise_chisq(self): """Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other. """ return [ self._chi_squared( mr_subvar_proportions, self._margin[idx], self._opposite_axis_margin[idx] / np.sum(self._opposite_axis_margin[idx]), ) for (idx, mr_subvar_proportions) in enumerate(self._proportions) ]
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
entailment
def process_parser(self): """ We can't use the exception catch trick for docopt because the module prevents access to it's innards __all__ = ['docopt']. Instead call with --help enforced, catch sys.exit and work up to the calling docopt function to pull out the elements. This is horrible. :return: """ try: # Parse with --help to enforce exit usage_sections = docopt.docopt(self.parser, ['--help']) except SystemExit as e: parser = inspect.trace()[-2][0].f_locals ''' docopt represents all values as strings and doesn't automatically cast, we probably want to do some testing to see if we can convert the default value (Option.value) to a particular type. ''' def guess_type(s): try: v = float(s) v = int(s) v = s except ValueError: pass return type(v) self.script_groups = ['Arguments'] self.nodes = OrderedDict() self.containers = OrderedDict() self.containers['default'] = [] for option in parser['options']: if option.long in ['--help', '--version']: continue option.type = guess_type(option.value) option_name = option.long.strip('-') node = DocOptNode(option_name, option=option) self.nodes[option_name] = node self.containers['default'].append(option_name) self.class_name = os.path.splitext(os.path.basename(self.script_path))[0] self.script_path = self.script_path self.script_description = self.parser
We can't use the exception catch trick for docopt because the module prevents access to it's innards __all__ = ['docopt']. Instead call with --help enforced, catch sys.exit and work up to the calling docopt function to pull out the elements. This is horrible. :return:
entailment
def build_dummies_dict(data): """ Return a dict with unique values as keys and vectors as values """ unique_val_list = unique(data) output = {} for val in unique_val_list: output[val] = (data == val) return output
Return a dict with unique values as keys and vectors as values
entailment
def calmar(data_in, margins, initial_weight = 'wprm_init', method = 'linear', lo = None, up = None, use_proportions = False, xtol = 1.49012e-08, maxfev = 256): """ Calibrate weights to satisfy some margin constraints :param dataframe data_in: The observations data :param str initial_weight: The initial weight variable name :param dict margins: Margins is a dictionnary containing for each variable as key the following values - a scalar for numeric variables - a dictionnary with categories as key and populations as values - eventually a key named `total_population` with value the total population. If absent it is initialized to the actual total population :param str method: Should be 'linear', 'raking ratio' or 'logit' :param float lo: lower bound on weights ratio. Mandatory when using logit method. Should be < 1. :param float up: upper bound on weights ratio. Mandatory when using logit method. Should be > 1. :param bool use_proportions: default to False. if True use proportions if total population from margins doesn't match total population :param xtol: relative precision on lagrangian multipliers. By default xtol = 1.49012e-08 (default fsolve xtol) :param maxfev: maximum number of function evaluation default to 256 """ from scipy.optimize import fsolve # remove null weights and keep original data null_weight_observations = data_in[initial_weight].isnull().sum() if null_weight_observations > 0: log.info("{} observations have a NaN weight. Not used in the calibration.".format(null_weight_observations)) is_non_zero_weight = (data_in[initial_weight].fillna(0) > 0) if is_non_zero_weight.sum() > null_weight_observations: log.info("{} observations have a zero weight. Not used in the calibration.".format( (data_in[initial_weight].fillna(0) <= 0).sum() - null_weight_observations)) variables = set(margins.keys()).intersection(set(data_in.columns)) for variable in variables: null_value_observations = data_in[variable].isnull().sum() if null_value_observations > 0: log.info("For variable {}, {} observations have a NaN value. Not used in the calibration.".format( variable, null_value_observations)) is_non_zero_weight = is_non_zero_weight & data_in[variable].notnull() if not is_non_zero_weight.all(): log.info("We drop {} observations.".format((~is_non_zero_weight).sum())) data = dict() for a in data_in.columns: data[a] = data_in.loc[is_non_zero_weight, a].copy() if not margins: raise Exception("Calmar requires non empty dict of margins") # choose method assert method in ['linear', 'raking ratio', 'logit'], "method should be 'linear', 'raking ratio' or 'logit'" if method == 'linear': F = linear F_prime = linear_prime elif method == 'raking ratio': F = raking_ratio F_prime = raking_ratio_prime elif method == 'logit': assert up is not None, "When method == 'logit', a value > 1 for up is mandatory" assert up > 1, "up should be > 1" assert lo is not None, "When method == 'logit', a value < 1 for lo is mandatory" assert lo < 1, "lo should be < 1" def F(x): return logit(x, lo, up) def F_prime(x): return logit_prime(x, lo, up) # Construction observations matrix if 'total_population' in margins: total_population = margins.pop('total_population') else: total_population = data[initial_weight].fillna(0).sum() nk = len(data[initial_weight]) # number of Lagrange parameters (at least total population) nj = 1 margins_new = {} margins_new_dict = {} for var, val in margins.items(): if isinstance(val, dict): dummies_dict = build_dummies_dict(data[var]) k, pop = 0, 0 for cat, nb in val.items(): cat_varname = var + '_' + str(cat) data[cat_varname] = dummies_dict[cat] margins_new[cat_varname] = nb if var not in margins_new_dict: margins_new_dict[var] = {} margins_new_dict[var][cat] = nb pop += nb k += 1 nj += 1 # Check total popualtion if pop != total_population: if use_proportions: log.info( 'calmar: categorical variable {} is inconsistent with population; using proportions'.format( var ) ) for cat, nb in val.items(): cat_varname = var + '_' + str(cat) margins_new[cat_varname] = nb * total_population / pop margins_new_dict[var][cat] = nb * total_population / pop else: raise Exception('calmar: categorical variable {} weights sums up to {} != {}'.format( var, pop, total_population)) else: margins_new[var] = val margins_new_dict[var] = val nj += 1 # On conserve systematiquement la population if hasattr(data, 'dummy_is_in_pop'): raise Exception('dummy_is_in_pop is not a valid variable name') data['dummy_is_in_pop'] = ones(nk) margins_new['dummy_is_in_pop'] = total_population # paramètres de Lagrange initialisés à zéro lambda0 = zeros(nj) # initial weights d = data[initial_weight].values x = zeros((nk, nj)) # nb obs x nb constraints xmargins = zeros(nj) margins_dict = {} j = 0 for var, val in margins_new.items(): x[:, j] = data[var] xmargins[j] = val margins_dict[var] = val j += 1 # Résolution des équations du premier ordre def constraint(l): return dot(d * F(dot(x, l)), x) - xmargins def constraint_prime(l): return dot(d * (x.T * F_prime(dot(x, l))), x) # le jacobien ci-dessus est constraintprime = @(l) x*(d.*Fprime(x'*l)*x'); tries, ier = 0, 2 err_max = 1 conv = 1 while (ier == 2 or ier == 5 or ier == 4) and not (tries >= 10 or (err_max < 1e-6 and conv < 1e-8)): lambdasol, infodict, ier, mesg = fsolve( constraint, lambda0, fprime = constraint_prime, maxfev = maxfev, xtol = xtol, full_output = 1, ) lambda0 = 1 * lambdasol tries += 1 pondfin = d * F(dot(x, lambdasol)) rel_error = {} for var, val in margins_new.items(): rel_error[var] = abs((data[var] * pondfin).sum() - margins_dict[var]) / margins_dict[var] sorted_err = sorted(rel_error.items(), key = operator.itemgetter(1), reverse = True) conv = abs(err_max - sorted_err[0][1]) err_max = sorted_err[0][1] if (ier == 2 or ier == 5 or ier == 4): log.debug("optimization converged after {} tries".format(tries)) # rebuilding a weight vector with the same size of the initial one pondfin_out = array(data_in[initial_weight], dtype = float64) pondfin_out[is_non_zero_weight] = pondfin return pondfin_out, lambdasol, margins_new_dict
Calibrate weights to satisfy some margin constraints :param dataframe data_in: The observations data :param str initial_weight: The initial weight variable name :param dict margins: Margins is a dictionnary containing for each variable as key the following values - a scalar for numeric variables - a dictionnary with categories as key and populations as values - eventually a key named `total_population` with value the total population. If absent it is initialized to the actual total population :param str method: Should be 'linear', 'raking ratio' or 'logit' :param float lo: lower bound on weights ratio. Mandatory when using logit method. Should be < 1. :param float up: upper bound on weights ratio. Mandatory when using logit method. Should be > 1. :param bool use_proportions: default to False. if True use proportions if total population from margins doesn't match total population :param xtol: relative precision on lagrangian multipliers. By default xtol = 1.49012e-08 (default fsolve xtol) :param maxfev: maximum number of function evaluation default to 256
entailment
def ca_main_axis(self): """For univariate CA, the main axis is the categorical axis""" try: ca_ind = self.dim_types.index(DT.CA_SUBVAR) return 1 - ca_ind except ValueError: return None
For univariate CA, the main axis is the categorical axis
entailment
def can_compare_pairwise(self): """Return bool indicating if slice can compute pairwise comparisons. Currently, only the CAT x CAT slice can compute pairwise comparisons. This also includes the categorical array categories dimnension (CA_CAT). """ if self.ndim != 2: return False return all(dt in DT.ALLOWED_PAIRWISE_TYPES for dt in self.dim_types)
Return bool indicating if slice can compute pairwise comparisons. Currently, only the CAT x CAT slice can compute pairwise comparisons. This also includes the categorical array categories dimnension (CA_CAT).
entailment
def get_shape(self, prune=False, hs_dims=None): """Tuple of array dimensions' lengths. It returns a tuple of ints, each representing the length of a cube dimension, in the order those dimensions appear in the cube. Pruning is supported. Dimensions that get reduced to a single element (e.g. due to pruning) are removed from the returning shape, thus allowing for the differentiation between true 2D cubes (over which statistical testing can be performed) and essentially 1D cubes (over which it can't). Usage: >>> shape = get_shape() >>> pruned_shape = get_shape(prune=True) """ if not prune: return self.as_array(include_transforms_for_dims=hs_dims).shape shape = compress_pruned( self.as_array(prune=True, include_transforms_for_dims=hs_dims) ).shape # Eliminate dimensions that get reduced to 1 # (e.g. single element categoricals) return tuple(n for n in shape if n > 1)
Tuple of array dimensions' lengths. It returns a tuple of ints, each representing the length of a cube dimension, in the order those dimensions appear in the cube. Pruning is supported. Dimensions that get reduced to a single element (e.g. due to pruning) are removed from the returning shape, thus allowing for the differentiation between true 2D cubes (over which statistical testing can be performed) and essentially 1D cubes (over which it can't). Usage: >>> shape = get_shape() >>> pruned_shape = get_shape(prune=True)
entailment
def index_table(self, axis=None, baseline=None, prune=False): """Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable. """ proportions = self.proportions(axis=axis) baseline = ( baseline if baseline is not None else self._prepare_index_baseline(axis) ) # Fix the shape to enable correct broadcasting if ( axis == 0 and len(baseline.shape) <= 1 and self.ndim == len(self.get_shape()) ): baseline = baseline[:, None] indexes = proportions / baseline * 100 return self._apply_pruning_mask(indexes) if prune else indexes
Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable.
entailment
def labels(self, hs_dims=None, prune=False): """Get labels for the cube slice, and perform pruning by slice.""" if self.ca_as_0th: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:] else: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:] if not prune: return labels def prune_dimension_labels(labels, prune_indices): """Get pruned labels for single dimension, besed on prune inds.""" labels = [label for label, prune in zip(labels, prune_indices) if not prune] return labels labels = [ prune_dimension_labels(dim_labels, dim_prune_inds) for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims)) ] return labels
Get labels for the cube slice, and perform pruning by slice.
entailment
def margin( self, axis=None, weighted=True, include_missing=False, include_transforms_for_dims=None, prune=False, include_mr_cat=False, ): """Return ndarray representing slice margin across selected axis. A margin (or basis) can be calculated for a contingency table, provided that the dimensions of the desired directions are marginable. The dimensions are marginable if they represent mutualy exclusive data, such as true categorical data. For array types the items dimensions are not marginable. Requesting a margin across these dimensions (e.g. slice.margin(axis=0) for a categorical array cube slice) will produce an error. For multiple response slices, the implicit convention is that the provided direction scales to the selections dimension of the slice. These cases produce meaningful data, but of a slightly different shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray (variable dimensions are never collapsed!)). :param axis: Axis across which to sum. Can be 0 (columns margin), 1 (rows margin) and None (table margin). If requested across variables dimension (e.g. requesting 0 margin for CA array) it will produce an error. :param weighted: Weighted or unweighted counts. :param include_missing: Include missing categories or not. :param include_transforms_for_dims: Indices of dimensions for which to include transformations :param prune: Perform pruning based on unweighted counts. :returns: (weighed or unweighted counts) summed across provided axis. For multiple response types, items dimensions are not collapsed. """ axis = self._calculate_correct_axis_for_cube(axis) hs_dims = self._hs_dims_for_cube(include_transforms_for_dims) margin = self._cube.margin( axis=axis, weighted=weighted, include_missing=include_missing, include_transforms_for_dims=hs_dims, prune=prune, include_mr_cat=include_mr_cat, ) return self._extract_slice_result_from_cube(margin)
Return ndarray representing slice margin across selected axis. A margin (or basis) can be calculated for a contingency table, provided that the dimensions of the desired directions are marginable. The dimensions are marginable if they represent mutualy exclusive data, such as true categorical data. For array types the items dimensions are not marginable. Requesting a margin across these dimensions (e.g. slice.margin(axis=0) for a categorical array cube slice) will produce an error. For multiple response slices, the implicit convention is that the provided direction scales to the selections dimension of the slice. These cases produce meaningful data, but of a slightly different shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray (variable dimensions are never collapsed!)). :param axis: Axis across which to sum. Can be 0 (columns margin), 1 (rows margin) and None (table margin). If requested across variables dimension (e.g. requesting 0 margin for CA array) it will produce an error. :param weighted: Weighted or unweighted counts. :param include_missing: Include missing categories or not. :param include_transforms_for_dims: Indices of dimensions for which to include transformations :param prune: Perform pruning based on unweighted counts. :returns: (weighed or unweighted counts) summed across provided axis. For multiple response types, items dimensions are not collapsed.
entailment
def min_base_size_mask(self, size, hs_dims=None, prune=False): """Returns MinBaseSizeMask object with correct row, col and table masks. The returned object stores the necessary information about the base size, as well as about the base values. It can create corresponding masks in teh row, column, and table directions, based on the corresponding base values (the values of the unweighted margins). Usage: >>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice >>> cube_slice.min_base_size_mask(30).row_mask >>> cube_slice.min_base_size_mask(50).column_mask >>> cube_slice.min_base_size_mask(22).table_mask """ return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune)
Returns MinBaseSizeMask object with correct row, col and table masks. The returned object stores the necessary information about the base size, as well as about the base values. It can create corresponding masks in teh row, column, and table directions, based on the corresponding base values (the values of the unweighted margins). Usage: >>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice >>> cube_slice.min_base_size_mask(30).row_mask >>> cube_slice.min_base_size_mask(50).column_mask >>> cube_slice.min_base_size_mask(22).table_mask
entailment
def mr_dim_ind(self): """Get the correct index of the MR dimension in the cube slice.""" mr_dim_ind = self._cube.mr_dim_ind if self._cube.ndim == 3: if isinstance(mr_dim_ind, int): if mr_dim_ind == 0: # If only the 0th dimension of a 3D is an MR, the sliced # don't actuall have the MR... Thus return None. return None return mr_dim_ind - 1 elif isinstance(mr_dim_ind, tuple): # If MR dimension index is a tuple, that means that the cube # (only a 3D one if it reached this path) has 2 MR dimensions. # If any of those is 0 ind dimension we don't need to include # in the slice dimension (because the slice doesn't see the tab # that it's on). If it's 1st and 2nd dimension, then subtract 1 # from those, and present them as 0th and 1st dimension of the # slice. This can happend e.g. in a CAT x MR x MR cube (which # renders MR x MR slices). mr_dim_ind = tuple(i - 1 for i in mr_dim_ind if i) return mr_dim_ind if len(mr_dim_ind) > 1 else mr_dim_ind[0] return mr_dim_ind
Get the correct index of the MR dimension in the cube slice.
entailment
def scale_means(self, hs_dims=None, prune=False): """Return list of column and row scaled means for this slice. If a row/col doesn't have numerical values, return None for the corresponding dimension. If a slice only has 1D, return only the column scaled mean (as numpy array). If both row and col scaled means are present, return them as two numpy arrays inside of a list. """ scale_means = self._cube.scale_means(hs_dims, prune) if self.ca_as_0th: # If slice is used as 0th CA, then we need to observe the 1st dimension, # because the 0th dimension is CA items, which is only used for slicing # (and thus doesn't have numerical values, and also doesn't constitute any # dimension of the actual crosstabs that will be created in this case). scale_means = scale_means[0][-1] if scale_means is None: return [None] return [scale_means[self._index]] return scale_means[self._index]
Return list of column and row scaled means for this slice. If a row/col doesn't have numerical values, return None for the corresponding dimension. If a slice only has 1D, return only the column scaled mean (as numpy array). If both row and col scaled means are present, return them as two numpy arrays inside of a list.
entailment
def table_name(self): """Get slice name. In case of 2D return cube name. In case of 3D, return the combination of the cube name with the label of the corresponding slice (nth label of the 0th dimension). """ if self._cube.ndim < 3 and not self.ca_as_0th: return None title = self._cube.name table_name = self._cube.labels()[0][self._index] return "%s: %s" % (title, table_name)
Get slice name. In case of 2D return cube name. In case of 3D, return the combination of the cube name with the label of the corresponding slice (nth label of the 0th dimension).
entailment
def wishart_pairwise_pvals(self, axis=0): """Return square symmetric matrix of pairwise column-comparison p-values. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently. """ if axis != 0: raise NotImplementedError("Pairwise comparison only implemented for colums") return WishartPairwiseSignificance.pvals(self, axis=axis)
Return square symmetric matrix of pairwise column-comparison p-values. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently.
entailment
def pvals(self, weighted=True, prune=False, hs_dims=None): """Return 2D ndarray with calculated P values This function calculates statistically significant cells for categorical contingency tables under the null hypothesis that the row and column variables are independent (uncorrelated). The values are calculated for 2D tables only. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns: 2 or 3 Dimensional ndarray, representing the p-values for each cell of the table-like representation of the crunch cube. """ stats = self.zscore(weighted=weighted, prune=prune, hs_dims=hs_dims) pvals = 2 * (1 - norm.cdf(np.abs(stats))) return self._apply_pruning_mask(pvals, hs_dims) if prune else pvals
Return 2D ndarray with calculated P values This function calculates statistically significant cells for categorical contingency tables under the null hypothesis that the row and column variables are independent (uncorrelated). The values are calculated for 2D tables only. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns: 2 or 3 Dimensional ndarray, representing the p-values for each cell of the table-like representation of the crunch cube.
entailment
def zscore(self, weighted=True, prune=False, hs_dims=None): """Return ndarray with slices's standardized residuals (Z-scores). (Only applicable to a 2D contingency tables.) The Z-score or standardized residual is the difference between observed and expected cell counts if row and column variables were independent divided by the residual cell variance. They are assumed to come from a N(0,1) or standard Normal distribution, and can show which cells deviate from the null hypothesis that the row and column variables are uncorrelated. See also *pairwise_chisq*, *pairwise_pvals* for a pairwise column- or row-based test of statistical significance. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns zscore: ndarray representing cell standardized residuals (Z) """ counts = self.as_array(weighted=weighted) total = self.margin(weighted=weighted) colsum = self.margin(axis=0, weighted=weighted) rowsum = self.margin(axis=1, weighted=weighted) zscore = self._calculate_std_res(counts, total, colsum, rowsum) if hs_dims: zscore = intersperse_hs_in_std_res(self, hs_dims, zscore) if prune: return self._apply_pruning_mask(zscore, hs_dims) return zscore
Return ndarray with slices's standardized residuals (Z-scores). (Only applicable to a 2D contingency tables.) The Z-score or standardized residual is the difference between observed and expected cell counts if row and column variables were independent divided by the residual cell variance. They are assumed to come from a N(0,1) or standard Normal distribution, and can show which cells deviate from the null hypothesis that the row and column variables are uncorrelated. See also *pairwise_chisq*, *pairwise_pvals* for a pairwise column- or row-based test of statistical significance. :param weighted: Use weighted counts for zscores :param prune: Prune based on unweighted counts :param hs_dims: Include headers and subtotals (as NaN values) :returns zscore: ndarray representing cell standardized residuals (Z)
entailment
def pairwise_indices(self, alpha=0.05, only_larger=True, hs_dims=None): """Indices of columns where p < alpha for column-comparison t-tests Returns an array of tuples of columns that are significant at p<alpha, from a series of pairwise t-tests. Argument both_pairs returns indices striclty on the test statistic. If False, however, only the index of values *significantly smaller* than each cell are indicated. """ return PairwiseSignificance( self, alpha=alpha, only_larger=only_larger, hs_dims=hs_dims ).pairwise_indices
Indices of columns where p < alpha for column-comparison t-tests Returns an array of tuples of columns that are significant at p<alpha, from a series of pairwise t-tests. Argument both_pairs returns indices striclty on the test statistic. If False, however, only the index of values *significantly smaller* than each cell are indicated.
entailment
def _array_type_std_res(self, counts, total, colsum, rowsum): """Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array. """ if self.mr_dim_ind == 0: # --This is a special case where broadcasting cannot be # --automatically done. We need to "inflate" the single dimensional # --ndarrays, to be able to treat them as "columns" (essentially a # --Nx1 ndarray). This is needed for subsequent multiplication # --that needs to happen column wise (rowsum * colsum) / total. total = total[:, np.newaxis] rowsum = rowsum[:, np.newaxis] expected_counts = rowsum * colsum / total variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3 return (counts - expected_counts) / np.sqrt(variance)
Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array.
entailment
def _calculate_std_res(self, counts, total, colsum, rowsum): """Return ndarray containing standard residuals. The shape of the return value is the same as that of *counts*. """ if set(self.dim_types) & DT.ARRAY_TYPES: # ---has-mr-or-ca--- return self._array_type_std_res(counts, total, colsum, rowsum) return self._scalar_type_std_res(counts, total, colsum, rowsum)
Return ndarray containing standard residuals. The shape of the return value is the same as that of *counts*.
entailment
def _calculate_correct_axis_for_cube(self, axis): """Return correct axis for cube, based on ndim. If cube has 3 dimensions, increase axis by 1. This will translate the default 0 (cols direction) and 1 (rows direction) to actual 1 (cols direction) and 2 (rows direction). This is needed because the 0th dimension of the 3D cube is only used to slice across. The actual margins need to be calculated for each slice separately, and since they're implemented as an ndarray, the direction needs to be increased by one. For the value of `None`, don't modify the axis parameter. :param axis: 0, 1, or None. Axis that will be passed to self._cube methods. If the cube is 3D, the axis is typically increased by 1, to represent correct measure direction. :returns: int or None, representing the updated axis to pass to cube """ if self._cube.ndim < 3: if self.ca_as_0th and axis is None: # Special case for CA slices (in multitables). In this case, # we need to calculate a measurement across CA categories # dimension (and not across items, because it's not # allowed). The value for the axis parameter of None, would # imply both cat and items dimensions, and we don't want that. return 1 return axis # Expected usage of the 'axis' parameter from CubeSlice is 0, 1, or # None. CrunchCube handles all other logic. The only 'smart' thing # about the handling here, is that the axes are increased for 3D cubes. # This way the 3Dness is hidden from the user and he still sees 2D # crosstabs, with col and row axes (0 and 1), which are transformed to # corresponding numbers in case of 3D cubes (namely 1 and 2). In the # case of None, we need to analyze across all valid dimensions, and the # CrunchCube takes care of that (no need to update axis if it's None). # If the user provides a tuple, it's considered that he "knows" what # he's doing, and the axis argument is not updated in this case. if isinstance(axis, int): axis += 1 return axis
Return correct axis for cube, based on ndim. If cube has 3 dimensions, increase axis by 1. This will translate the default 0 (cols direction) and 1 (rows direction) to actual 1 (cols direction) and 2 (rows direction). This is needed because the 0th dimension of the 3D cube is only used to slice across. The actual margins need to be calculated for each slice separately, and since they're implemented as an ndarray, the direction needs to be increased by one. For the value of `None`, don't modify the axis parameter. :param axis: 0, 1, or None. Axis that will be passed to self._cube methods. If the cube is 3D, the axis is typically increased by 1, to represent correct measure direction. :returns: int or None, representing the updated axis to pass to cube
entailment
def _scalar_type_std_res(self, counts, total, colsum, rowsum): """Return ndarray containing standard residuals for category values. The shape of the return value is the same as that of *counts*. """ expected_counts = expected_freq(counts) residuals = counts - expected_counts variance = ( np.outer(rowsum, colsum) * np.outer(total - rowsum, total - colsum) / total ** 3 ) return residuals / np.sqrt(variance)
Return ndarray containing standard residuals for category values. The shape of the return value is the same as that of *counts*.
entailment
def data(self): """list of mean numeric values of categorical responses.""" means = [] table = self._slice.as_array() products = self._inner_prods(table, self.values) for axis, product in enumerate(products): if product is None: means.append(product) continue # Calculate means valid_indices = self._valid_indices(axis) num = np.sum(product[valid_indices], axis) den = np.sum(table[valid_indices], axis) mean = num / den if not isinstance(mean, np.ndarray): mean = np.array([mean]) means.append(mean) return means
list of mean numeric values of categorical responses.
entailment
def margin(self, axis): """Return marginal value of the current slice scaled means. This value is the the same what you would get from a single variable (constituting a 2D cube/slice), when the "non-missing" filter of the opposite variable would be applied. This behavior is consistent with what is visible in the front-end client. """ if self._slice.ndim < 2: msg = ( "Scale Means marginal cannot be calculated on 1D cubes, as" "the scale means already get reduced to a scalar value." ) raise ValueError(msg) dimension_index = 1 - axis margin = self._slice.margin(axis=axis) if len(margin.shape) > 1: index = [ 0 if d.dimension_type == DT.MR else slice(None) for d in self._slice.dimensions ] margin = margin[index] total = np.sum(margin) values = self.values[dimension_index] if values is None: return None return np.sum(values * margin) / total
Return marginal value of the current slice scaled means. This value is the the same what you would get from a single variable (constituting a 2D cube/slice), when the "non-missing" filter of the opposite variable would be applied. This behavior is consistent with what is visible in the front-end client.
entailment
def values(self): """list of ndarray value-ids for each dimension in slice. The values for each dimension appear as an ndarray. None appears instead of the array for each dimension having only NaN values. """ return [ ( np.array(dim.numeric_values) if (dim.numeric_values and any(~np.isnan(dim.numeric_values))) else None ) for dim in self._slice.dimensions ]
list of ndarray value-ids for each dimension in slice. The values for each dimension appear as an ndarray. None appears instead of the array for each dimension having only NaN values.
entailment
def compress_pruned(table): """Compress table based on pruning mask. Only the rows/cols in which all of the elements are masked need to be pruned. """ if not isinstance(table, np.ma.core.MaskedArray): return table if table.ndim == 0: return table.data if table.ndim == 1: return np.ma.compressed(table) row_inds = ~table.mask.all(axis=1) col_inds = ~table.mask.all(axis=0) table = table[row_inds, :][:, col_inds] if table.dtype == float and table.mask.any(): table[table.mask] = np.nan return table
Compress table based on pruning mask. Only the rows/cols in which all of the elements are masked need to be pruned.
entailment