signature stringlengths 8 3.44k | body stringlengths 0 1.41M | docstring stringlengths 1 122k | id stringlengths 5 17 |
|---|---|---|---|
def __init__(self, headers={}): | self._headers = headers<EOL> | Initializer.
:param dict headers: A dictionary with custom headers which will be sent
in every request to the Transifex API. | f12474:c2:m0 |
def get_keywords(): | <EOL>git_refnames = "<STR_LIT>"<EOL>git_full = "<STR_LIT>"<EOL>git_date = "<STR_LIT>"<EOL>keywords = {"<STR_LIT>": git_refnames, "<STR_LIT>": git_full, "<STR_LIT:date>": git_date}<EOL>return keywords<EOL> | Get the keywords needed to look up the version information. | f12477:m0 |
def get_config(): | <EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = "<STR_LIT>"<EOL>cfg.style = "<STR_LIT>"<EOL>cfg.tag_prefix = "<STR_LIT:v>"<EOL>cfg.parentdir_prefix = "<STR_LIT:None>"<EOL>cfg.versionfile_source = "<STR_LIT>"<EOL>cfg.verbose = False<EOL>return cfg<EOL> | Create, populate and return the VersioneerConfig() object. | f12477:m1 |
def register_vcs_handler(vcs, method): | def decorate(f):<EOL><INDENT>"""<STR_LIT>"""<EOL>if vcs not in HANDLERS:<EOL><INDENT>HANDLERS[vcs] = {}<EOL><DEDENT>HANDLERS[vcs][method] = f<EOL>return f<EOL><DEDENT>return decorate<EOL> | Decorator to mark a method as the handler for a particular VCS. | f12477:m2 |
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,<EOL>env=None): | assert isinstance(commands, list)<EOL>p = None<EOL>for c in commands:<EOL><INDENT>try:<EOL><INDENT>dispcmd = str([c] + args)<EOL>p = subprocess.Popen([c] + args, cwd=cwd, env=env,<EOL>stdout=subprocess.PIPE,<EOL>stderr=(subprocess.PIPE if hide_stderr<EOL>else None))<EOL>break<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if e.errno == errno.ENOENT:<EOL><INDENT>continue<EOL><DEDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" % dispcmd)<EOL>print(e)<EOL><DEDENT>return None, None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" % (commands,))<EOL><DEDENT>return None, None<EOL><DEDENT>stdout = p.communicate()[<NUM_LIT:0>].strip()<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>stdout = stdout.decode()<EOL><DEDENT>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" % dispcmd)<EOL>print("<STR_LIT>" % stdout)<EOL><DEDENT>return None, p.returncode<EOL><DEDENT>return stdout, p.returncode<EOL> | Call the given command(s). | f12477:m3 |
def versions_from_parentdir(parentdir_prefix, root, verbose): | rootdirs = []<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>dirname = os.path.basename(root)<EOL>if dirname.startswith(parentdir_prefix):<EOL><INDENT>return {"<STR_LIT:version>": dirname[len(parentdir_prefix):],<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT>": False, "<STR_LIT:error>": None, "<STR_LIT:date>": None}<EOL><DEDENT>else:<EOL><INDENT>rootdirs.append(root)<EOL>root = os.path.dirname(root) <EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" %<EOL>(str(rootdirs), parentdir_prefix))<EOL><DEDENT>raise NotThisMethod("<STR_LIT>")<EOL> | Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory | f12477:m4 |
@register_vcs_handler("<STR_LIT>", "<STR_LIT>")<EOL>def git_get_keywords(versionfile_abs): | <EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, "<STR_LIT:r>")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith("<STR_LIT>"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords["<STR_LIT>"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith("<STR_LIT>"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords["<STR_LIT>"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith("<STR_LIT>"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords["<STR_LIT:date>"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL> | Extract version information from the given file. | f12477:m5 |
@register_vcs_handler("<STR_LIT>", "<STR_LIT>")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose): | if not keywords:<EOL><INDENT>raise NotThisMethod("<STR_LIT>")<EOL><DEDENT>date = keywords.get("<STR_LIT:date>")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace("<STR_LIT:U+0020>", "<STR_LIT:T>", <NUM_LIT:1>).replace("<STR_LIT:U+0020>", "<STR_LIT>", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords["<STR_LIT>"].strip()<EOL>if refnames.startswith("<STR_LIT>"):<EOL><INDENT>if verbose:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>raise NotThisMethod("<STR_LIT>")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip("<STR_LIT>").split("<STR_LIT:U+002C>")])<EOL>TAG = "<STR_LIT>"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print("<STR_LIT>" % "<STR_LIT:U+002C>".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" % "<STR_LIT:U+002C>".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print("<STR_LIT>" % r)<EOL><DEDENT>return {"<STR_LIT:version>": r,<EOL>"<STR_LIT>": keywords["<STR_LIT>"].strip(),<EOL>"<STR_LIT>": False, "<STR_LIT:error>": None,<EOL>"<STR_LIT:date>": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>return {"<STR_LIT:version>": "<STR_LIT>",<EOL>"<STR_LIT>": keywords["<STR_LIT>"].strip(),<EOL>"<STR_LIT>": False, "<STR_LIT:error>": "<STR_LIT>", "<STR_LIT:date>": None}<EOL> | Get version information from git keywords. | f12477:m6 |
@register_vcs_handler("<STR_LIT>", "<STR_LIT>")<EOL>def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): | GITS = ["<STR_LIT>"]<EOL>if sys.platform == "<STR_LIT:win32>":<EOL><INDENT>GITS = ["<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>out, rc = run_command(GITS, ["<STR_LIT>", "<STR_LIT>"], cwd=root,<EOL>hide_stderr=True)<EOL>if rc != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print("<STR_LIT>" % root)<EOL><DEDENT>raise NotThisMethod("<STR_LIT>")<EOL><DEDENT>describe_out, rc = run_command(GITS, ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>" % tag_prefix],<EOL>cwd=root)<EOL>if describe_out is None:<EOL><INDENT>raise NotThisMethod("<STR_LIT>")<EOL><DEDENT>describe_out = describe_out.strip()<EOL>full_out, rc = run_command(GITS, ["<STR_LIT>", "<STR_LIT>"], cwd=root)<EOL>if full_out is None:<EOL><INDENT>raise NotThisMethod("<STR_LIT>")<EOL><DEDENT>full_out = full_out.strip()<EOL>pieces = {}<EOL>pieces["<STR_LIT>"] = full_out<EOL>pieces["<STR_LIT>"] = full_out[:<NUM_LIT:7>] <EOL>pieces["<STR_LIT:error>"] = None<EOL>git_describe = describe_out<EOL>dirty = git_describe.endswith("<STR_LIT>")<EOL>pieces["<STR_LIT>"] = dirty<EOL>if dirty:<EOL><INDENT>git_describe = git_describe[:git_describe.rindex("<STR_LIT>")]<EOL><DEDENT>if "<STR_LIT:->" in git_describe:<EOL><INDENT>mo = re.search(r'<STR_LIT>', git_describe)<EOL>if not mo:<EOL><INDENT>pieces["<STR_LIT:error>"] = ("<STR_LIT>"<EOL>% describe_out)<EOL>return pieces<EOL><DEDENT>full_tag = mo.group(<NUM_LIT:1>)<EOL>if not full_tag.startswith(tag_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>fmt = "<STR_LIT>"<EOL>print(fmt % (full_tag, tag_prefix))<EOL><DEDENT>pieces["<STR_LIT:error>"] = ("<STR_LIT>"<EOL>% (full_tag, tag_prefix))<EOL>return pieces<EOL><DEDENT>pieces["<STR_LIT>"] = full_tag[len(tag_prefix):]<EOL>pieces["<STR_LIT>"] = int(mo.group(<NUM_LIT:2>))<EOL>pieces["<STR_LIT>"] = mo.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>pieces["<STR_LIT>"] = None<EOL>count_out, rc = run_command(GITS, ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>"],<EOL>cwd=root)<EOL>pieces["<STR_LIT>"] = int(count_out) <EOL><DEDENT>date = run_command(GITS, ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"],<EOL>cwd=root)[<NUM_LIT:0>].strip()<EOL>pieces["<STR_LIT:date>"] = date.strip().replace("<STR_LIT:U+0020>", "<STR_LIT:T>", <NUM_LIT:1>).replace("<STR_LIT:U+0020>", "<STR_LIT>", <NUM_LIT:1>)<EOL>return pieces<EOL> | Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree. | f12477:m7 |
def plus_or_dot(pieces): | if "<STR_LIT:+>" in pieces.get("<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>return "<STR_LIT:.>"<EOL><DEDENT>return "<STR_LIT:+>"<EOL> | Return a + if we don't already have one, else return a . | f12477:m8 |
def render_pep440(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"] or pieces["<STR_LIT>"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += "<STR_LIT>" % (pieces["<STR_LIT>"], pieces["<STR_LIT>"])<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = "<STR_LIT>" % (pieces["<STR_LIT>"],<EOL>pieces["<STR_LIT>"])<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT><DEDENT>return rendered<EOL> | Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] | f12477:m9 |
def render_pep440_pre(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>" % pieces["<STR_LIT>"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = "<STR_LIT>" % pieces["<STR_LIT>"]<EOL><DEDENT>return rendered<EOL> | TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE | f12477:m10 |
def render_pep440_post(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"] or pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>" % pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += "<STR_LIT>" % pieces["<STR_LIT>"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = "<STR_LIT>" % pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT>rendered += "<STR_LIT>" % pieces["<STR_LIT>"]<EOL><DEDENT>return rendered<EOL> | TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0] | f12477:m11 |
def render_pep440_old(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"] or pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>" % pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = "<STR_LIT>" % pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT><DEDENT>return rendered<EOL> | TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0] | f12477:m12 |
def render_git_describe(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>" % (pieces["<STR_LIT>"], pieces["<STR_LIT>"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL><DEDENT>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT>return rendered<EOL> | TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix) | f12477:m13 |
def render_git_describe_long(pieces): | if pieces["<STR_LIT>"]:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL>rendered += "<STR_LIT>" % (pieces["<STR_LIT>"], pieces["<STR_LIT>"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces["<STR_LIT>"]<EOL><DEDENT>if pieces["<STR_LIT>"]:<EOL><INDENT>rendered += "<STR_LIT>"<EOL><DEDENT>return rendered<EOL> | TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix) | f12477:m14 |
def render(pieces, style): | if pieces["<STR_LIT:error>"]:<EOL><INDENT>return {"<STR_LIT:version>": "<STR_LIT>",<EOL>"<STR_LIT>": pieces.get("<STR_LIT>"),<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT:error>": pieces["<STR_LIT:error>"],<EOL>"<STR_LIT:date>": None}<EOL><DEDENT>if not style or style == "<STR_LIT:default>":<EOL><INDENT>style = "<STR_LIT>" <EOL><DEDENT>if style == "<STR_LIT>":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == "<STR_LIT>":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == "<STR_LIT>":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == "<STR_LIT>":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == "<STR_LIT>":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == "<STR_LIT>":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>" % style)<EOL><DEDENT>return {"<STR_LIT:version>": rendered, "<STR_LIT>": pieces["<STR_LIT>"],<EOL>"<STR_LIT>": pieces["<STR_LIT>"], "<STR_LIT:error>": None,<EOL>"<STR_LIT:date>": pieces.get("<STR_LIT:date>")}<EOL> | Render the given version pieces into the requested style. | f12477:m15 |
def get_versions(): | <EOL>cfg = get_config()<EOL>verbose = cfg.verbose<EOL>try:<EOL><INDENT>return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,<EOL>verbose)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>root = os.path.realpath(__file__)<EOL>for i in cfg.versionfile_source.split('<STR_LIT:/>'):<EOL><INDENT>root = os.path.dirname(root)<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>return {"<STR_LIT:version>": "<STR_LIT>", "<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT:error>": "<STR_LIT>",<EOL>"<STR_LIT:date>": None}<EOL><DEDENT>try:<EOL><INDENT>pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)<EOL>return render(pieces, cfg.style)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>if cfg.parentdir_prefix:<EOL><INDENT>return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)<EOL><DEDENT><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>return {"<STR_LIT:version>": "<STR_LIT>", "<STR_LIT>": None,<EOL>"<STR_LIT>": None,<EOL>"<STR_LIT:error>": "<STR_LIT>", "<STR_LIT:date>": None}<EOL> | Get version information or return default if unable to do so. | f12477:m16 |
def __getattr__(self, name): | res = self.responsibilities.get(name, None)<EOL>if res is None:<EOL><INDENT>msg = "<STR_LIT>" % name<EOL>_logger.warning(msg)<EOL><DEDENT>return res<EOL> | Return the class for the various responsibilities. | f12480:c0:m0 |
def setup(self, responsibilities): | self.responsibilities.update(responsibilities)<EOL> | Initial setup of the responsibilities.
Allows to override the defaults and/or add new ones.
Args:
`responsibilities`: A dictionary of responsibilities to define. | f12480:c0:m1 |
def remove(self, key): | try:<EOL><INDENT>del self.responsibilities[key]<EOL>return True<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT> | Remove the responsibility with the given key.
Args:
`key`: The name of the responsibility to remove
Returns:
True if found, False otherwise | f12480:c0:m2 |
def retrieve_content(self): | path = self._construct_path_to_source_content()<EOL>res = self._http.get(path)<EOL>self._populated_fields['<STR_LIT:content>'] = res['<STR_LIT:content>']<EOL>return res['<STR_LIT:content>']<EOL> | Retrieve the content of a resource. | f12482:c0:m0 |
def _update(self, **kwargs): | if '<STR_LIT:content>' in kwargs:<EOL><INDENT>content = kwargs.pop('<STR_LIT:content>')<EOL>path = self._construct_path_to_source_content()<EOL>self._http.put(path, json.dumps({'<STR_LIT:content>': content}))<EOL><DEDENT>super(Resource, self)._update(**kwargs)<EOL> | Use separate URL for updating the source file. | f12482:c0:m1 |
def _construct_path_to_source_content(self): | template = self.get_path_to_source_content_template() <EOL>return template % self.get_url_parameters()<EOL> | Construct the path to the source content for an actual resource. | f12482:c0:m2 |
def get_path_to_source_content_template(self): | return self._join_subpaths(self._prefix, self._path_to_source_language)<EOL> | Return the path to the source language content. | f12482:c0:m3 |
def setup_registry(): | conn = HttpRequest('<STR_LIT>')<EOL>registry.setup({'<STR_LIT>': conn})<EOL> | Initializes the registry and sets up an `http_handler`. | f12487:m0 |
def get_mock_response(status_code, content): | if not registry.http_handler:<EOL><INDENT>setup_registry()<EOL><DEDENT>return TestResponse(status_code, content.encode('<STR_LIT:utf-8>'))<EOL> | Return a test response with the given parameters.
If the registry hasn't been set up yet, it sets it up
and adds an http_handler entry.
:param int status_code: the HTTP status code of the response
:param str content: the content of the response
:return: the response object for testing
:rtype: TestResponse | f12487:m1 |
def clean_registry(): | registry.remove('<STR_LIT>')<EOL> | Run the test and the remove the `http_handler` entry from
the registry. | f12487:m2 |
def _create(self, **kwargs): | path = self._construct_path_to_collection()<EOL>for field in self.writable_fields:<EOL><INDENT>try:<EOL><INDENT>value = getattr(self, field)<EOL>kwargs[field] = value<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return self._http.put(path, json.dumps(kwargs))<EOL> | Create the translation of a resource.
The _create function differentiates from the one in the BaseModel
in the HTTP request that takes place in the end. In the Translation
object's case, it needs to be `PUT`, while in the BaseModel is `POST` | f12488:c0:m0 |
@classmethod<EOL><INDENT>def get(cls, **kwargs):<DEDENT> | fields = {}<EOL>for field in cls.url_fields:<EOL><INDENT>value = kwargs.pop(field, None)<EOL>if value is None:<EOL><INDENT>cls._handle_wrong_field(field, ATTR_TYPE_URL)<EOL><DEDENT>fields[field] = value<EOL><DEDENT>model = cls(**fields)<EOL>model._populate(**kwargs)<EOL>return model<EOL> | Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2) | f12489:c0:m0 |
def __init__(self, prefix='<STR_LIT>', **url_values): | self._http = registry.http_handler<EOL>self._prefix = prefix<EOL>self._modified_fields = {}<EOL>self._populated_fields = {}<EOL>for field in url_values:<EOL><INDENT>if field in self.url_fields:<EOL><INDENT>setattr(self, field, url_values[field])<EOL><DEDENT>else:<EOL><INDENT>self._handle_wrong_field(field, ATTR_TYPE_URL)<EOL><DEDENT><DEDENT>self._is_initialized = True<EOL> | Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed | f12489:c0:m1 |
def __getattr__(self, name, default=None): | if name in self._modified_fields:<EOL><INDENT>return self._modified_fields[name]<EOL><DEDENT>elif name in self._populated_fields:<EOL><INDENT>return self._populated_fields[name]<EOL><DEDENT>else:<EOL><INDENT>self._handle_wrong_field(name, ATTR_TYPE_READ)<EOL><DEDENT> | Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist | f12489:c0:m2 |
def __setattr__(self, name, value): | <EOL>if ('<STR_LIT>' not in self.__dict__) or (name in self.__dict__):<EOL><INDENT>return super(BaseModel, self).__setattr__(name, value)<EOL><DEDENT>elif name in self.writable_fields:<EOL><INDENT>self._modified_fields[name] = value<EOL><DEDENT>else:<EOL><INDENT>self._handle_wrong_field(name, ATTR_TYPE_WRITE)<EOL><DEDENT> | Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`, | f12489:c0:m3 |
def save(self, **fields): | for field in fields:<EOL><INDENT>if field in self.writable_fields:<EOL><INDENT>setattr(self, field, fields[field])<EOL><DEDENT>else:<EOL><INDENT>self._handle_wrong_field(field, ATTR_TYPE_WRITE)<EOL><DEDENT><DEDENT>if self._populated_fields:<EOL><INDENT>self._update(**self._modified_fields)<EOL><DEDENT>else:<EOL><INDENT>self._create(**self._modified_fields)<EOL><DEDENT> | Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`, | f12489:c0:m4 |
def delete(self): | self._delete()<EOL> | Delete the instance from the remote Transifex server. | f12489:c0:m5 |
def _populate(self, **kwargs): | self._populated_fields = self._get(**kwargs)<EOL> | Populate the instance with the values from the server. | f12489:c0:m6 |
def _get(self, **kwargs): | path = self._construct_path_to_item()<EOL>return self._http.get(path)<EOL> | Get the resource from a remote Transifex server. | f12489:c0:m7 |
def _create(self, **kwargs): | path = self._construct_path_to_collection()<EOL>for field in self.writable_fields:<EOL><INDENT>try:<EOL><INDENT>value = getattr(self, field)<EOL>kwargs[field] = value<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return self._http.post(path, json.dumps(kwargs))<EOL> | Create a resource in the remote Transifex server. | f12489:c0:m8 |
def _update(self, **kwargs): | path = self._construct_path_to_item()<EOL>if not kwargs:<EOL><INDENT>return<EOL><DEDENT>return self._http.put(path, json.dumps(kwargs))<EOL> | Update a resource in a remote Transifex server. | f12489:c0:m9 |
def _delete(self, **kwargs): | path = self._construct_path_to_item()<EOL>return self._http.delete(path)<EOL> | Delete a resource from a remote Transifex server. | f12489:c0:m10 |
def _construct_path_to_collection(self): | template = self.get_path_to_collection_template() <EOL>return template % self.get_url_parameters()<EOL> | Construct the path to an actual collection. | f12489:c0:m11 |
def _construct_path_to_item(self): | return self.get_path_to_item_template() % self.get_url_parameters()<EOL> | Construct the path to an actual item. | f12489:c0:m12 |
def get_url_parameters(self): | url_fields = {}<EOL>for field in self.url_fields:<EOL><INDENT>url_fields[field] = getattr(self, field)<EOL><DEDENT>return url_fields<EOL> | Create a dictionary of parameters used in URLs for this model. | f12489:c0:m13 |
def get_path_to_collection_template(self): | return self._join_subpaths(self._prefix, self._path_to_collection)<EOL> | The URL to access the collection of the model. | f12489:c0:m14 |
def get_path_to_item_template(self): | return self._join_subpaths(self._prefix, self._path_to_item)<EOL> | The URL to access a specific item of the model. | f12489:c0:m15 |
def _join_subpaths(self, *args): | return '<STR_LIT:/>'.join(args).replace('<STR_LIT>', '<STR_LIT:/>').replace('<STR_LIT>', '<STR_LIT:/>')<EOL> | Join subpaths (given as arguments) to form a
well-defined URL path. | f12489:c0:m16 |
@classmethod<EOL><INDENT>def _handle_wrong_field(cls, field_name, field_type):<DEDENT> | if field_type == ATTR_TYPE_READ:<EOL><INDENT>field_type = '<STR_LIT>'<EOL><DEDENT>elif field_type == ATTR_TYPE_WRITE:<EOL><INDENT>field_type = '<STR_LIT>'<EOL><DEDENT>elif field_type == ATTR_TYPE_URL:<EOL><INDENT>field_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError('<STR_LIT>'.format(<EOL>field_type<EOL>))<EOL><DEDENT>msg = '<STR_LIT>'.format(<EOL>cls.__name__,<EOL>field_type,<EOL>field_name<EOL>)<EOL>_logger.error(msg)<EOL>raise AttributeError(msg)<EOL> | Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs. | f12489:c0:m17 |
def gfm(text): | extractions = {}<EOL>def extract_pre_block(matchobj):<EOL><INDENT>match = matchobj.group(<NUM_LIT:0>)<EOL>hashed_match = hashlib.md5(match.encode('<STR_LIT:utf-8>')).hexdigest()<EOL>extractions[hashed_match] = match<EOL>result = "<STR_LIT>" % hashed_match<EOL>return result<EOL><DEDENT>def escape_underscore(matchobj):<EOL><INDENT>match = matchobj.group(<NUM_LIT:0>)<EOL>if match.count('<STR_LIT:_>') > <NUM_LIT:1>:<EOL><INDENT>return re.sub('<STR_LIT:_>', '<STR_LIT>', match)<EOL><DEDENT>else:<EOL><INDENT>return match<EOL><DEDENT><DEDENT>def newlines_to_brs(matchobj):<EOL><INDENT>match = matchobj.group(<NUM_LIT:0>)<EOL>if re.search("<STR_LIT>", match):<EOL><INDENT>return match<EOL><DEDENT>else:<EOL><INDENT>match = match.strip()<EOL>return match + "<STR_LIT>"<EOL><DEDENT><DEDENT>def insert_pre_block(matchobj):<EOL><INDENT>string = "<STR_LIT>" + extractions[matchobj.group(<NUM_LIT:1>)]<EOL>return string<EOL><DEDENT>text = re.sub("<STR_LIT>", extract_pre_block, text)<EOL>text = re.sub("<STR_LIT>", escape_underscore, text)<EOL>text = re.sub("<STR_LIT>", newlines_to_brs, text)<EOL>text = re.sub("<STR_LIT>", insert_pre_block, text)<EOL>return text<EOL> | Processes Markdown according to GitHub Flavored Markdown spec. | f12495:m0 |
def markdown(text): | text = gfm(text)<EOL>text = markdown_lib.markdown(text)<EOL>return text<EOL> | Processes GFM then converts it to HTML. | f12495:m1 |
def nt2js_static_resources(): | return abspath(join(dirname(__file__), "<STR_LIT>"))<EOL> | Returns the absolute filesystem path for the nt2js static resources. | f12499:m0 |
def __init__(self, update_callback): | self.update_callback = update_callback<EOL>NetworkTables.addGlobalListener(self._nt_on_change, immediateNotify=True)<EOL>NetworkTables.addConnectionListener(self._nt_connected, immediateNotify=True)<EOL> | :param update_callback: A callable with signature ```callable(update)``` for processing outgoing updates
formatted as strings. | f12500:c0:m0 |
def process_update(self, update): | data = json.loads(update)<EOL>NetworkTables.getEntry(data["<STR_LIT:k>"]).setValue(data["<STR_LIT:v>"])<EOL> | Process an incoming update from a remote NetworkTables | f12500:c0:m1 |
def _send_update(self, data): | if isinstance(data, dict):<EOL><INDENT>data = json.dumps(data)<EOL><DEDENT>self.update_callback(data)<EOL> | Send a NetworkTables update via the stored send_update callback | f12500:c0:m2 |
def _nt_on_change(self, key, value, isNew): | self._send_update({"<STR_LIT:k>": key, "<STR_LIT:v>": value, "<STR_LIT:n>": isNew})<EOL> | NetworkTables global listener callback | f12500:c0:m3 |
def close(self): | NetworkTables.removeGlobalListener(self._nt_on_change)<EOL>NetworkTables.removeConnectionListener(self._nt_connected)<EOL> | Clean up NetworkTables listeners | f12500:c0:m5 |
def get_handlers(): | js_path_opts = {"<STR_LIT:path>": abspath(join(dirname(__file__), "<STR_LIT>"))}<EOL>return [<EOL>("<STR_LIT>", NetworkTablesWebSocket),<EOL>("<STR_LIT>", NonCachingStaticFileHandler, js_path_opts),<EOL>]<EOL> | Returns a list that can be concatenated to the list of handlers
passed to the ``tornado.web.Application`` object. This list contains
handlers for the NetworkTables websocket and the necessary javascript
to use it.
Example usage::
import pynetworktables2js
import tornado.web
...
app = tornado.web.Application(
pynetworktables2js.get_handlers() + [
# tornado handlers here
]) | f12503:m0 |
def check_origin(self, origin): | return True<EOL> | Allow CORS requests | f12503:c0:m1 |
def confusion(a, p): | m = a == p <EOL>f = np.logical_not(m) <EOL>tp = np.sum(np.logical_and(m, a)) <EOL>tn = np.sum(np.logical_and(m, np.logical_not(a))) <EOL>fn = np.sum(np.logical_and(f, a)) <EOL>fp = np.sum(np.logical_and(f, p)) <EOL>return np.array([[tn, fp], [fn, tp]])<EOL> | Confusion Matrix
- confusion matrix, error matrix, matching matrix (unsupervised)
- is a special case: 2x2 contingency table (xtab, RxC table)
- both dimensions are variables with the same classes/labels,
i.e. actual and predicted variables are binary [0,1]
Parameters:
-----------
a : ndarray
Actual values, binary [0,1]
p : ndarray
Predicted values, binary [0,1]
Returns:
--------
cm : ndarray
Confusion matrix
predicted=0 predicted=1
actual=0 tn fp
actual=1 fn tp
Example:
--------
import korr
cm = korr.confusion(a, p)
tn, fp, fn, tp = cm.ravel()
Alternatives:
-------------
import pandas as pd
cm = pd.crosstab(a, p)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(a, p) | f12508:m0 |
def corr_vs_pval(r, pval, plim=<NUM_LIT>, rlim=<NUM_LIT>, dpi=<NUM_LIT>): | <EOL>if len(r.shape) == <NUM_LIT:2>:<EOL><INDENT>idx = (np.tri(N=r.shape[<NUM_LIT:0>], k=-<NUM_LIT:1>) == <NUM_LIT:1>)<EOL>r = r[idx]<EOL>pval = pval[idx]<EOL><DEDENT>i1 = (pval >= plim)<EOL>i2 = (pval < plim) & (np.abs(r) > rlim)<EOL>i3 = (pval < plim) & (np.abs(r) <= rlim)<EOL>absmax = np.max(np.abs(r))<EOL>b = (np.arange(<NUM_LIT:0>, <NUM_LIT>) / <NUM_LIT:10> - <NUM_LIT:1>) * absmax<EOL>c = plt.get_cmap('<STR_LIT>').colors<EOL>c = (c[<NUM_LIT:1>], c[<NUM_LIT:8>], c[<NUM_LIT:7>])<EOL>fig, ax = plt.subplots(dpi=dpi)<EOL>ax.hist((r[i1], r[i2], r[i3]), histtype='<STR_LIT:bar>',<EOL>stacked=True, bins=b, color=c)<EOL>ax.legend(['<STR_LIT>' + str(plim),<EOL>'<STR_LIT>' + str(plim) + '<STR_LIT>' + str(rlim),<EOL>'<STR_LIT>' + str(plim) + '<STR_LIT>' + str(rlim)],<EOL>loc=<NUM_LIT:2>, bbox_to_anchor=(<NUM_LIT>, <NUM_LIT>))<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.grid(color='<STR_LIT>', linestyle='<STR_LIT>')<EOL>for edge, spine in ax.spines.items():<EOL><INDENT>spine.set_visible(False)<EOL><DEDENT>return fig, ax<EOL> | Histogram for correlation coefficients and its p-values (colored)
Parameters:
-----------
r : np.ndarray
Correlation coefficient matrix.
The upper triangular elements are extracted if a NxN is provided.
Otherwise provide a plain vector.
pval : np.ndarray
NxN matrix with p-values
plim : float
Discretionary alpha threshold to judge if a p-value is considered
"significant" or not. (Default: 0.01 or 1%)
rlim : float
Descretionary threshold to judge if an absolute correlation
coefficient is big enough. (Default: 0.4)
dpi : int
Set the resolution of the matplotlib graphics.
Return:
-------
fig, ax
matplotlib figure and axes for further tweaking | f12509:m0 |
def kendall(x, axis=<NUM_LIT:0>): | <EOL>if axis is not <NUM_LIT:0>:<EOL><INDENT>x = x.T<EOL><DEDENT>n, c = x.shape<EOL>if c < <NUM_LIT:2>:<EOL><INDENT>raise Exception(<EOL>"<STR_LIT>" + str(c) + "<STR_LIT>")<EOL><DEDENT>r = np.ones((c, c))<EOL>p = np.zeros((c, c))<EOL>for i in range(<NUM_LIT:0>, c):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, c):<EOL><INDENT>r[i, j], p[i, j] = scipy.stats.kendalltau(x[:, i], x[:, j])<EOL>r[j, i] = r[i, j]<EOL>p[j, i] = p[i, j]<EOL><DEDENT><DEDENT>return r, p<EOL> | Kendall' tau (Rank) Correlation Matrix (for ordinal data)
Parameters
----------
x : ndarray
data set
axis : int, optional
Variables as columns is the default (axis=0). If variables are
in the rows use axis=1
Returns
-------
r : ndarray
Correlation Matrix (Kendall tau)
p : ndarray
p-values | f12510:m0 |
def pearson(x, axis=<NUM_LIT:0>): | <EOL>if axis is not <NUM_LIT:0>:<EOL><INDENT>x = x.T<EOL><DEDENT>n, c = x.shape<EOL>if c < <NUM_LIT:2>:<EOL><INDENT>raise Exception(<EOL>"<STR_LIT>" + str(c) + "<STR_LIT>")<EOL><DEDENT>r = np.ones((c, c))<EOL>p = np.zeros((c, c))<EOL>for i in range(<NUM_LIT:0>, c):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, c):<EOL><INDENT>r[i, j], p[i, j] = scipy.stats.pearsonr(x[:, i], x[:, j])<EOL>r[j, i] = r[i, j]<EOL>p[j, i] = p[i, j]<EOL><DEDENT><DEDENT>return r, p<EOL> | Sample Correlation Matrix
Parameters
----------
x : ndarray
data set
axis : int, optional
Variables as columns is the default (axis=0). If variables
are in the rows use axis=1
Returns
-------
r : ndarray
Sample Correlation Matrix (Pearson )
p : ndarray
p-values | f12512:m0 |
def confusion_to_mcc(*args): | if len(args) is <NUM_LIT:1>:<EOL><INDENT>tn, fp, fn, tp = args[<NUM_LIT:0>].ravel().astype(float)<EOL><DEDENT>elif len(args) is <NUM_LIT:4>:<EOL><INDENT>tn, fp, fn, tp = [float(a) for a in args]<EOL><DEDENT>else:<EOL><INDENT>raise Exception((<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"))<EOL><DEDENT>return (tp * tn - fp * fn) / np.sqrt(<EOL>(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))<EOL> | Convert the confusion matrix to the Matthews correlation coefficient
Parameters:
-----------
cm : ndarray
2x2 confusion matrix with np.array([[tn, fp], [fn, tp]])
tn, fp, fn, tp : float
four scalar variables
- tn : number of true negatives
- fp : number of false positives
- fn : number of false negatives
- tp : number of true positives
Return:
-------
r : float
Matthews correlation coefficient | f12515:m0 |
def mcc(x, axis=<NUM_LIT:0>, autocorrect=False): | <EOL>if axis is not <NUM_LIT:0>:<EOL><INDENT>x = x.T<EOL><DEDENT>n, c = x.shape<EOL>if c < <NUM_LIT:2>:<EOL><INDENT>raise Exception(<EOL>"<STR_LIT>" + str(c) + "<STR_LIT>")<EOL><DEDENT>r = np.ones((c, c))<EOL>p = np.zeros((c, c))<EOL>for i in range(<NUM_LIT:0>, c):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, c):<EOL><INDENT>cm = confusion(x[:, i], x[:, j])<EOL>r[i, j] = confusion_to_mcc(cm)<EOL>r[j, i] = r[i, j]<EOL>p[i, j] = <NUM_LIT:1> - scipy.stats.chi2.cdf(r[i, j] * r[i, j] * n, <NUM_LIT:1>)<EOL>p[j, i] = p[i, j]<EOL><DEDENT><DEDENT>if autocorrect:<EOL><INDENT>r = np.nan_to_num(r)<EOL><DEDENT>return r, p<EOL> | Matthews correlation
Parameters
----------
x : ndarray
dataset of binary [0,1] values
axis : int, optional
Variables as columns is the default (axis=0). If variables
are in the rows use axis=1
autocorrect : bool, optional
If all predictions are True or all are False, then MCC
returns np.NaN
Set autocorrect=True to return a 0.0 correlation instead.
Returns
-------
r : ndarray
Matthews correlation
p : ndarray
p-values of the Chi^2 test statistics
Notes:
------
(1) We cannot directly transform the Chi^2 test statistics to
the Matthews correlation because the relationship is
|r| = sqrt(chi2 / n)
chi2 = r * r * n
(2) The sign would be missing. Therefore, as a rule of thumbs,
If you want to optimize ABS(r_mcc) then just use the Chi2/n
directly (Divide Chi^2 by the number of observations)
Examples:
---------
import korr
r, pval = korr.mcc(X)
Alternatives:
-------------
from sklearn.metrics import matthews_corrcoef
r = matthews_corrcoef(y_true, y_pred) | f12515:m1 |
def slice_yx(r, pval, ydim=<NUM_LIT:1>): | if ydim is <NUM_LIT:1>:<EOL><INDENT>return (<EOL>r[<NUM_LIT:1>:, :<NUM_LIT:1>].reshape(-<NUM_LIT:1>, ), pval[<NUM_LIT:1>:, :<NUM_LIT:1>].reshape(-<NUM_LIT:1>, ),<EOL>r[<NUM_LIT:1>:, <NUM_LIT:1>:], pval[<NUM_LIT:1>:, <NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>return (<EOL>r[ydim:, :ydim], pval[ydim:, :ydim],<EOL>r[ydim:, ydim:], pval[ydim:, ydim:])<EOL><DEDENT> | slice a correlation and p-value matrix of a (y,X) dataset
into a (y,x_i) vector and (x_j, x_k) matrices
Parameters
----------
r : ndarray
Correlation Matrix of a (y,X) dataset
pval : ndarray
p-values
ydim : int
Number of target variables y, i.e. the first ydim-th columns
and rows are (y, x_i) correlations
Returns
-------
y_r : ndarray
1D vector or ydim-column array with (y,x) correlations
y_pval : ndarray
1D vector or ydim-column array with (y,x) p-values
x_r : ndarray
correlation matrix (x_j, x_k)
x_pval : ndarar
matrix with p-values
Example
-------
import korr
r, pval = korr.mcc(np.c_[y, X])
y_r, y_pval, x_r, x_pval = slice_yx(r, pval, ydim=1)
print(np.c_[y_r, y_pval])
korr.corrgram(x_r, x_pval) | f12516:m0 |
def spearman(x, axis=<NUM_LIT:0>): | <EOL>if axis is not <NUM_LIT:0>:<EOL><INDENT>x = x.T<EOL><DEDENT>n, c = x.shape<EOL>if c < <NUM_LIT:2>:<EOL><INDENT>raise Exception(<EOL>"<STR_LIT>" + str(c) + "<STR_LIT>")<EOL><DEDENT>r = np.ones((c, c))<EOL>p = np.zeros((c, c))<EOL>for i in range(<NUM_LIT:0>, c):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, c):<EOL><INDENT>r[i, j], p[i, j] = scipy.stats.spearmanr(x[:, i], x[:, j])<EOL>r[j, i] = r[i, j]<EOL>p[j, i] = p[i, j]<EOL><DEDENT><DEDENT>return r, p<EOL> | Spearman's Rho as Rank Correlation Matrix(for ordinal data)
Parameters
----------
x : ndarray
data set
axis : int, optional
Variables as columns is the default (axis=0).
If variables are in the rows use axis=1
Returns
-------
r : ndarray
Rank Correlation Matrix (Spearman rho)
p : ndarray
p-values | f12517:m0 |
def find_worst(rho, pval, m=<NUM_LIT:1>, rlim=<NUM_LIT>, plim=<NUM_LIT>): | <EOL>n = len(rho)<EOL>r = list(np.abs(rho))<EOL>p = list(pval)<EOL>i = list(range(n))<EOL>if m > n:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>')<EOL>m = n<EOL><DEDENT>selected = list()<EOL>it = <NUM_LIT:0><EOL>while (len(selected) < m) and (it < n):<EOL><INDENT>temp = p.index(max(p)) <EOL>worst = i[temp] <EOL>if (r[temp] <= rlim) and (p[temp] > plim):<EOL><INDENT>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(worst)<EOL><DEDENT>it = it + <NUM_LIT:1><EOL><DEDENT>it = <NUM_LIT:0><EOL>n2 = len(i)<EOL>while (len(selected) < m) and (it < n2):<EOL><INDENT>temp = p.index(max(p)) <EOL>worst = i[temp] <EOL>if (r[temp] <= rlim):<EOL><INDENT>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(worst)<EOL><DEDENT>it = it + <NUM_LIT:1><EOL><DEDENT>it = <NUM_LIT:0><EOL>n3 = len(i)<EOL>while (len(selected) < m) and (it < n3):<EOL><INDENT>temp = r.index(min(r))<EOL>worst = i[temp]<EOL>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(worst)<EOL>it = it + <NUM_LIT:1><EOL><DEDENT>return selected<EOL> | Find the N "worst", i.e. insignificant/random and low, correlations
Parameters
----------
rho : ndarray, list
1D array with correlation coefficients
pval : ndarray, list
1D array with p-values
m : int
The desired number of indicies to return
(How many "worst" correlations to find?)
rlim : float
Desired maximum absolute correlation coefficient
(Default: 0.10)
plim : float
Desired minimum p-value
(Default: 0.35)
Return
------
selected : list
Indicies of rho and pval of the "worst" correlations. | f12518:m0 |
def find_best(rho, pval, m=<NUM_LIT:1>, rlim=<NUM_LIT>, plim=<NUM_LIT>): | <EOL>n = len(rho)<EOL>r = list(np.abs(rho))<EOL>p = list(pval)<EOL>i = list(range(n))<EOL>if m > n:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>')<EOL>m = n<EOL><DEDENT>selected = list()<EOL>it = <NUM_LIT:0><EOL>while (len(selected) < m) and (it < n):<EOL><INDENT>temp = r.index(max(r)) <EOL>best = i[temp] <EOL>if (r[temp] >= rlim) and (p[temp] < plim):<EOL><INDENT>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(best)<EOL><DEDENT>it = it + <NUM_LIT:1><EOL><DEDENT>it = <NUM_LIT:0><EOL>n2 = len(i)<EOL>while (len(selected) < m) and (it < n2):<EOL><INDENT>temp = r.index(max(r)) <EOL>best = i[temp] <EOL>if (p[temp] < plim):<EOL><INDENT>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(best)<EOL><DEDENT>it = it + <NUM_LIT:1><EOL><DEDENT>it = <NUM_LIT:0><EOL>n3 = len(i)<EOL>while (len(selected) < m) and (it < n3):<EOL><INDENT>temp = p.index(min(p))<EOL>best = i[temp]<EOL>r.pop(temp)<EOL>p.pop(temp)<EOL>i.pop(temp)<EOL>selected.append(best)<EOL>it = it + <NUM_LIT:1><EOL><DEDENT>return selected<EOL> | Find the N "best", i.e. high and most significant, correlations
Parameters
----------
rho : ndarray, list
1D array with correlation coefficients
pval : ndarray, list
1D array with p-values
m : int
The desired number of indicies to return
(How many "best" correlations to find?)
rlim : float
Desired minimum absolute correlation coefficient
(Default: 0.40)
plim : float
Desired maximum p-value
(Default: 0.01)
Return
------
selected : list
Indicies of rho and pval of the "best" correlations. | f12519:m0 |
def find_unrelated(x, plim=<NUM_LIT:0.1>, axis=<NUM_LIT:0>): | <EOL>if axis is not <NUM_LIT:0>:<EOL><INDENT>x = x.T<EOL><DEDENT>_, c = x.shape<EOL>pairs = []<EOL>for i in range(<NUM_LIT:0>, c):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, c):<EOL><INDENT>_, p = scipy.stats.pearsonr(x[:, i], x[:, j])<EOL>if p > plim:<EOL><INDENT>pairs.append((i, j))<EOL><DEDENT><DEDENT><DEDENT>return tuple(pairs)<EOL> | Find indicies of insignificant un-/correlated variables
Example:
--------
i, j = find_unrelated(x, plim, rlim) | f12520:m0 |
def flatten(rho, pval, sortby="<STR_LIT>"): | n = rho.shape[<NUM_LIT:0>]<EOL>idx = np.triu_indices(n, k=<NUM_LIT:1>)<EOL>tab = pd.DataFrame(<EOL>columns=['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>data=np.c_[idx[<NUM_LIT:0>], idx[<NUM_LIT:1>], rho[idx], pval[idx]])<EOL>tab[['<STR_LIT:i>', "<STR_LIT>"]] = tab[['<STR_LIT:i>', "<STR_LIT>"]].astype(int)<EOL>if sortby == "<STR_LIT>":<EOL><INDENT>tab['<STR_LIT>'] = np.abs(tab['<STR_LIT>'])<EOL>tab.sort_values(by='<STR_LIT>', inplace=True, ascending=False)<EOL><DEDENT>elif sortby == "<STR_LIT>":<EOL><INDENT>tab.sort_values(by='<STR_LIT>', inplace=True, ascending=True)<EOL><DEDENT>return tab[["<STR_LIT:i>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]]<EOL> | Flatten correlation and p-value matrix
Parameters:
-----------
rho : ndarray
Correlation Matrix
pval : ndarray
Matrix with p-values
sortby : str
sort the output table by
- "cor" the highest absolute correlation coefficient
- "pval" the lowest p-value
Return:
-------
tab : ndarray
Table with (i, j, cor, pval) rows
Example:
--------
from korr import pearson, flatten
rho, pval = pearson(X)
tab = flatten(rho, pval, sortby="pval")
tab.values | f12522:m0 |
def register(name): | def decorator(class_):<EOL><INDENT>if name in known_resolvers:<EOL><INDENT>raise ValueError('<STR_LIT>' % name)<EOL><DEDENT>known_resolvers[name] = class_<EOL><DEDENT>return decorator<EOL> | Return a decorator that registers the decorated class as a
resolver with the given *name*. | f12524:m0 |
def get_resolver(order=None, options=None, modules=None): | if not known_resolvers:<EOL><INDENT>from . import resolvers as carmen_resolvers<EOL>modules = [carmen_resolvers] + (modules or [])<EOL>for module in modules:<EOL><INDENT>for loader, name, _ in pkgutil.iter_modules(module.__path__):<EOL><INDENT>full_name = module.__name__ + '<STR_LIT:.>' + name<EOL>loader.find_module(full_name).load_module(full_name)<EOL><DEDENT><DEDENT><DEDENT>if order is None:<EOL><INDENT>order = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>order = tuple(order)<EOL><DEDENT>if options is None:<EOL><INDENT>options = {}<EOL><DEDENT>resolvers = []<EOL>for resolver_name in order:<EOL><INDENT>if resolver_name not in known_resolvers:<EOL><INDENT>raise ValueError('<STR_LIT>' % resolver_name)<EOL><DEDENT>resolvers.append((<EOL>resolver_name,<EOL>known_resolvers[resolver_name](**options.get(resolver_name, {}))))<EOL><DEDENT>return ResolverCollection(resolvers)<EOL> | Return a location resolver. The *order* argument, if given,
should be a list of resolver names; results from resolvers named
earlier in the list are preferred over later ones. For a list of
built-in resolver names, see :doc:`/resolvers`. The *options*
argument can be used to pass configuration options to individual
resolvers, in the form of a dictionary mapping resolver names to
keyword arguments::
{'geocode': {'max_distance': 50}}
The *modules* argument can be used to specify a list of additional
modules to look for resolvers in. See :doc:`/develop` for details. | f12524:m1 |
@abstractmethod<EOL><INDENT>def add_location(self, location):<DEDENT> | pass<EOL> | Add an individual :py:class:`.Location` object to this
resolver's set of known locations. | f12524:c0:m0 |
def load_locations(self, location_file=None): | if location_file is None:<EOL><INDENT>contents = pkgutil.get_data(__package__, '<STR_LIT>')<EOL>contents_string = contents.decode("<STR_LIT:ascii>")<EOL>locations = contents_string.split('<STR_LIT:\n>')<EOL><DEDENT>else:<EOL><INDENT>from .cli import open_file<EOL>with open_file(location_file, '<STR_LIT:rb>') as input:<EOL><INDENT>locations = input.readlines()<EOL><DEDENT><DEDENT>for location_string in locations:<EOL><INDENT>if location_string.strip():<EOL><INDENT>location = Location(known=True, **json.loads(location_string))<EOL>self.location_id_to_location[location.id] = location<EOL>self.add_location(location)<EOL><DEDENT><DEDENT> | Load locations into this resolver from the given
*location_file*, which should contain one JSON object per line
representing a location. If *location_file* is not specified,
an internal location database is used. | f12524:c0:m1 |
@abstractmethod<EOL><INDENT>def resolve_tweet(self, tweet):<DEDENT> | pass<EOL> | Find the best known location for the given *tweet*, which is
provided as a deserialized JSON object, and return a tuple
containing two elements: a boolean indicating whether the
resolution is *provisional*, and a :py:class:`.Location` object.
Provisional resolutions may be overridden by non-provisional
resolutions returned by a less preferred resolver (i.e., one
that comes later in the resolver order), and should be used when
returning locations with low confidence, such as those found by
using larger "backed-off" administrative units.
If no suitable locations are found, ``None`` may be returned. | f12524:c0:m2 |
def normalize(location_name, preserve_commas=False): | def replace(match):<EOL><INDENT>if preserve_commas and '<STR_LIT:U+002C>' in match.group(<NUM_LIT:0>):<EOL><INDENT>return '<STR_LIT:U+002C>'<EOL><DEDENT>return '<STR_LIT:U+0020>'<EOL><DEDENT>return NORMALIZATION_RE.sub(replace, location_name).strip().lower()<EOL> | Normalize *location_name* by stripping punctuation and collapsing
runs of whitespace, and return the normalized name. | f12525:m0 |
def _cells_for(self, latitude, longitude): | latitude = latitude * self.cell_size<EOL>longitude = longitude * self.cell_size<EOL>shift_size = self.cell_size / <NUM_LIT:2><EOL>for latitude_cell in (latitude - shift_size,<EOL>latitude, latitude + shift_size):<EOL><INDENT>for longitude_cell in (longitude - shift_size,<EOL>longitude, longitude + shift_size):<EOL><INDENT>yield (int(latitude_cell / self.cell_size),<EOL>int(longitude_cell / self.cell_size))<EOL><DEDENT><DEDENT> | Return a list of cells containing the location at *latitude*
and *longitude*. | f12528:c0:m1 |
def canonical(self): | try:<EOL><INDENT>return tuple(map(lambda x: x.lower(), self.name()))<EOL><DEDENT>except:<EOL><INDENT>return tuple([x.lower() for x in self.name()])<EOL><DEDENT> | Return a tuple containing a canonicalized version of this
location's country, state, county, and city names. | f12530:c0:m3 |
def name(self): | try:<EOL><INDENT>return tuple(<EOL>getattr(self, x) if getattr(self, x) else u'<STR_LIT>'<EOL>for x in ('<STR_LIT>', '<STR_LIT:state>', '<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>except:<EOL><INDENT>return tuple(<EOL>getattr(self, x) if getattr(self, x) else '<STR_LIT>'<EOL>for x in ('<STR_LIT>', '<STR_LIT:state>', '<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT> | Return a tuple containing this location's country, state,
county, and city names. | f12530:c0:m4 |
def parent(self): | if self.city:<EOL><INDENT>return Location(<EOL>country=self.country, state=self.state, county=self.county)<EOL><DEDENT>if self.county:<EOL><INDENT>return Location(country=self.country, state=self.state)<EOL><DEDENT>if self.state:<EOL><INDENT>return Location(country=self.country)<EOL><DEDENT>return Location()<EOL> | Return a location representing the administrative unit above
the one represented by this location. | f12530:c0:m5 |
@contextmanager<EOL>def dynamic_automocking_module(): | tmpdir = tempfile.mkdtemp()<EOL>module_name = '<STR_LIT:_>'.join(fake.words(nb=<NUM_LIT:3>))<EOL>assert module_name not in sys.modules<EOL>module_filepath = os.path.join(tmpdir, '<STR_LIT>'.format(module_name))<EOL>assert not os.path.exists(module_filepath)<EOL>with open(module_filepath, '<STR_LIT>') as tmp:<EOL><INDENT>tmp.write( | Dynamically create a temporary module file, (but don't import it).
Then clean up the temporary module after using. | f12536:m0 |
def _get_from_path(import_path):<EOL> | module_name, obj_name = import_path.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>module = import_module(module_name)<EOL>return getattr(module, obj_name)<EOL> | Kwargs:
import_path: full import path (to a mock factory function)
Returns:
(the mock factory function) | f12543:m0 |
def register(func_path, factory=mock.MagicMock):<EOL> | global _factory_map<EOL>_factory_map[func_path] = factory<EOL>def decorator(decorated_factory):<EOL><INDENT>_factory_map[func_path] = decorated_factory<EOL>return decorated_factory<EOL><DEDENT>return decorator<EOL> | Kwargs:
func_path: import path to mock (as you would give to `mock.patch`)
factory: function that returns a mock for the patched func
Returns:
(decorator)
Usage:
automock.register('path.to.func.to.mock') # default MagicMock
automock.register('path.to.func.to.mock', CustomMockFactory)
@automock.register('path.to.func.to.mock')
def custom_mock(result):
return mock.MagicMock(return_value=result) | f12543:m1 |
def _pre_import():<EOL> | for import_path in settings.REGISTRATION_IMPORTS:<EOL><INDENT>import_module(import_path)<EOL><DEDENT> | Ensure that modules containing mock factories get imported so that their
calls to `register` are made.
(modules which are configured in `TEST_MOCK_FACTORY_MAP` do not need to
be pre-imported, only those which rely on `register`) | f12543:m2 |
def start_patching(name=None):<EOL> | global _factory_map, _patchers, _mocks<EOL>if _patchers and name is None:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>_pre_import()<EOL>if name is not None:<EOL><INDENT>factory = _factory_map[name]<EOL>items = [(name, factory)]<EOL><DEDENT>else:<EOL><INDENT>items = _factory_map.items()<EOL><DEDENT>for name, factory in items:<EOL><INDENT>patcher = mock.patch(name, new=factory())<EOL>mocked = patcher.start()<EOL>_patchers[name] = patcher<EOL>_mocks[name] = mocked<EOL><DEDENT> | Initiate mocking of the functions listed in `_factory_map`.
For this to work reliably all mocked helper functions should be imported
and used like this:
import dp_paypal.client as paypal
res = paypal.do_paypal_express_checkout(...)
(i.e. don't use `from dp_paypal.client import x` import style)
Kwargs:
name (Optional[str]): if given, only patch the specified path, else all
defined default mocks | f12543:m3 |
def stop_patching(name=None):<EOL> | global _patchers, _mocks<EOL>if not _patchers:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>if name is not None:<EOL><INDENT>items = [(name, _patchers[name])]<EOL><DEDENT>else:<EOL><INDENT>items = list(_patchers.items())<EOL><DEDENT>for name, patcher in items:<EOL><INDENT>patcher.stop()<EOL>del _patchers[name]<EOL>del _mocks[name]<EOL><DEDENT> | Finish the mocking initiated by `start_patching`
Kwargs:
name (Optional[str]): if given, only unpatch the specified path, else all
defined default mocks | f12543:m4 |
def get_mock(name):<EOL> | return _mocks[name]<EOL> | Intended for use in test cases e.g. to check if/how a mock was called
Emphasises that `_mocks` is a private value. If you need to customise
mocks use the `swap_mock` helper where possible. | f12543:m5 |
def get_called_mocks():<EOL> | return {<EOL>name: mock<EOL>for name, mock in _mocks.items()<EOL>if mock.called<EOL>}<EOL> | Intended for use in test cases e.g. to check if/how a mock was called
Emphasises that `_mocks` is a private value. If you need to customise
mocks use the `swap_mock` helper where possible. | f12543:m6 |
def __init__(self, _path, *args, **kwargs):<EOL> | global _factory_map, _mocks<EOL>_pre_import()<EOL>factory = _factory_map[_path]<EOL>new_mock = factory(*args, **kwargs)<EOL>super(SwapMockContextDecorator, self).__init__(<EOL>mock.patch(_path, new=new_mock),<EOL>mock.patch.dict(_mocks, {_path: new_mock}),<EOL>)<EOL> | Kwargs:
_path: key in `_factory_map` dict of the method to swap mock for
(should be an import path)
*args, **kwargs: passed through to the mock factory used to generate
a replacement mock to swap in | f12543:c0:m0 |
def __enter__(self):<EOL> | super(SwapMockContextDecorator, self).__enter__()<EOL>return self[<NUM_LIT:0>]<EOL> | Returns the context we care about, i.e. the result of the first patch,
which is the mocked object itself. | f12543:c0:m1 |
def __init__(self, name):<EOL> | _pre_import()<EOL>self.name = name<EOL> | Kwargs:
_path (str): key in `_factory_map` dict of the method to swap mock for
(should be an import path)
*args, **kwargs: passed through to the mock factory used to generate
a replacement mock to swap in | f12543:c1:m0 |
def __enter__(self):<EOL> | stop_patching(self.name)<EOL>return _get_from_path(self.name)<EOL> | Returns the restored function (calling code may still have a reference
to the mock even though we stopped patching the source path) | f12543:c1:m1 |
def __call__(self, f):<EOL> | @wraps(f)<EOL>def decorator(*args):<EOL><INDENT>stop_patching(self.name)<EOL>restored = _get_from_path(self.name)<EOL>args += (restored,)<EOL>ret = f(*args)<EOL>start_patching(self.name)<EOL>return ret<EOL><DEDENT>return decorator<EOL> | Only really useful for test methods, this will inject the restored
function as an arg (in the same way that @mock.patch decorator does
with mocked items) | f12543:c1:m3 |
def __enter__(self):<EOL> | start_patching()<EOL>return self<EOL> | Returns the restored function (calling code may still have a reference
to the mock even though we stopped patching the source path) | f12543:c4:m0 |
def __init__(self,<EOL>condition, <EOL>context_object, <EOL>):<EOL> | self._condition = condition<EOL>self.context_object = context_object<EOL> | Kwargs:
condition: whether to use the context
context_object: a context manager or decorator function, or an
object that can function as both | f12546:c1:m0 |
@property<EOL><INDENT>def condition(self):<EOL><DEDENT> | if callable(self._condition):<EOL><INDENT>return self._condition()<EOL><DEDENT>else:<EOL><INDENT>return self._condition<EOL><DEDENT> | NOTE:
We don't need to use a lazy callable when supplying a django settings
value as the condition, because the `settings` object already
implements lazy property access for us. | f12546:c1:m1 |
def __call__(self, f):<EOL> | @wraps(f)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>if self.condition:<EOL><INDENT>return self.context_object(f)(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return f(*args, **kwargs)<EOL><DEDENT><DEDENT>return wrapped<EOL> | Evaluate `condition` on every call to decorated function
(because this method is being called at import time, so would not
be possible to eg override_settings to modify the condition value
if we didn't do it inside `wrapped`) | f12546:c1:m4 |
def read_vint32(self): | result = <NUM_LIT:0><EOL>count = <NUM_LIT:0><EOL>while True:<EOL><INDENT>if count > <NUM_LIT:4>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>b = self.read_byte()<EOL>result = result | (b & <NUM_LIT>) << (<NUM_LIT:7> * count)<EOL>count += <NUM_LIT:1><EOL>if not b & <NUM_LIT>:<EOL><INDENT>return result<EOL><DEDENT><DEDENT> | This seems to be a variable length integer ala utf-8 style | f12550:c2:m7 |
def read_message(self, message_type, compressed=False, read_size=True): | if read_size:<EOL><INDENT>size = self.read_vint32()<EOL>b = self.read(size)<EOL><DEDENT>else:<EOL><INDENT>b = self.read()<EOL><DEDENT>if compressed:<EOL><INDENT>b = snappy.decompress(b)<EOL><DEDENT>m = message_type()<EOL>m.ParseFromString(b)<EOL>return m<EOL> | Read a protobuf message | f12550:c2:m8 |
def log(self, level, message): | if level <= self.verbosity:<EOL><INDENT>print(message)<EOL><DEDENT> | Log a message if our verbosity permits it | f12550:c3:m1 |
def run_hooks(self, packet): | if packet.__class__ in self.internal_hooks:<EOL><INDENT>self.internal_hooks[packet.__class__](packet)<EOL><DEDENT>if packet.__class__ in self.hooks:<EOL><INDENT>self.hooks[packet.__class__](packet)<EOL><DEDENT> | Run any additional functions that want to process this type of packet.
These can be internal parser hooks, or external hooks that process
information | f12550:c3:m2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.