_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q22100
|
Remote._assert_refspec
|
train
|
def _assert_refspec(self):
"""Turns out we can't deal with remotes if the refspec is missing"""
config = self.config_reader
unset = 'placeholder'
try:
if config.get_value('fetch', default=unset) is unset:
msg = "Remote '%s' has no refspec set.\n"
msg += "You can set it as follows:"
msg += " 'git config --add \"remote.%s.fetch +refs/heads/*:refs/heads/*\"'."
raise AssertionError(msg % (self.name, self.name))
finally:
config.release()
|
python
|
{
"resource": ""
}
|
q22101
|
Remote.fetch
|
train
|
def fetch(self, refspec=None, progress=None, **kwargs):
"""Fetch the latest changes for this remote
:param refspec:
A "refspec" is used by fetch and push to describe the mapping
between remote ref and local ref. They are combined with a colon in
the format <src>:<dst>, preceded by an optional plus sign, +.
For example: git fetch $URL refs/heads/master:refs/heads/origin means
"grab the master branch head from the $URL and store it as my origin
branch head". And git push $URL refs/heads/master:refs/heads/to-upstream
means "publish my master branch head as to-upstream branch at $URL".
See also git-push(1).
Taken from the git manual
Fetch supports multiple refspecs (as the
underlying git-fetch does) - supplying a list rather than a string
for 'refspec' will make use of this facility.
:param progress: See 'push' method
:param kwargs: Additional arguments to be passed to git-fetch
:return:
IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed
information about the fetch results
:note:
As fetch does not provide progress information to non-ttys, we cannot make
it available here unfortunately as in the 'push' method."""
if refspec is None:
# No argument refspec, then ensure the repo's config has a fetch refspec.
self._assert_refspec()
kwargs = add_progress(kwargs, self.repo.git, progress)
if isinstance(refspec, list):
args = refspec
else:
args = [refspec]
proc = self.repo.git.fetch(self, *args, as_process=True, with_stdout=False,
universal_newlines=True, v=True, **kwargs)
res = self._get_fetch_info_from_stderr(proc, progress)
if hasattr(self.repo.odb, 'update_cache'):
self.repo.odb.update_cache()
return res
|
python
|
{
"resource": ""
}
|
q22102
|
Remote.push
|
train
|
def push(self, refspec=None, progress=None, **kwargs):
"""Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Can take one of many value types:
* None to discard progress information
* A function (callable) that is called with the progress information.
Signature: ``progress(op_code, cur_count, max_count=None, message='')``.
`Click here <http://goo.gl/NPa7st>`_ for a description of all arguments
given to the function.
* An instance of a class derived from ``git.RemoteProgress`` that
overrides the ``update()`` function.
:note: No further progress information is returned after push returns.
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null."""
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.push(self, refspec, porcelain=True, as_process=True,
universal_newlines=True, **kwargs)
return self._get_push_info(proc, progress)
|
python
|
{
"resource": ""
}
|
q22103
|
Submodule.move
|
train
|
def move(self, module_path, configuration=True, module=True):
"""Move the submodule to a another module path. This involves physically moving
the repository at our current path, changing the configuration, as well as
adjusting our index entry accordingly.
:param module_path: the path to which to move our module in the parent repostory's working tree,
given as repository-relative or absolute path. Intermediate directories will be created
accordingly. If the path already exists, it must be empty.
Trailing (back)slashes are removed automatically
:param configuration: if True, the configuration will be adjusted to let
the submodule point to the given path.
:param module: if True, the repository managed by this submodule
will be moved as well. If False, we don't move the submodule's checkout, which may leave
the parent repository in an inconsistent state.
:return: self
:raise ValueError: if the module path existed and was not empty, or was a file
:note: Currently the method is not atomic, and it could leave the repository
in an inconsistent state if a sub-step fails for some reason
"""
if module + configuration < 1:
raise ValueError("You must specify to move at least the module or the configuration of the submodule")
# END handle input
module_checkout_path = self._to_relative_path(self.repo, module_path)
# VERIFY DESTINATION
if module_checkout_path == self.path:
return self
# END handle no change
module_checkout_abspath = join_path_native(self.repo.working_tree_dir, module_checkout_path)
if osp.isfile(module_checkout_abspath):
raise ValueError("Cannot move repository onto a file: %s" % module_checkout_abspath)
# END handle target files
index = self.repo.index
tekey = index.entry_key(module_checkout_path, 0)
# if the target item already exists, fail
if configuration and tekey in index.entries:
raise ValueError("Index entry for target path did already exist")
# END handle index key already there
# remove existing destination
if module:
if osp.exists(module_checkout_abspath):
if len(os.listdir(module_checkout_abspath)):
raise ValueError("Destination module directory was not empty")
# END handle non-emptiness
if osp.islink(module_checkout_abspath):
os.remove(module_checkout_abspath)
else:
os.rmdir(module_checkout_abspath)
# END handle link
else:
# recreate parent directories
# NOTE: renames() does that now
pass
# END handle existence
# END handle module
# move the module into place if possible
cur_path = self.abspath
renamed_module = False
if module and osp.exists(cur_path):
os.renames(cur_path, module_checkout_abspath)
renamed_module = True
if osp.isfile(osp.join(module_checkout_abspath, '.git')):
module_abspath = self._module_abspath(self.repo, self.path, self.name)
self._write_git_file_and_module_config(module_checkout_abspath, module_abspath)
# end handle git file rewrite
# END move physical module
# rename the index entry - have to manipulate the index directly as
# git-mv cannot be used on submodules ... yeah
previous_sm_path = self.path
try:
if configuration:
try:
ekey = index.entry_key(self.path, 0)
entry = index.entries[ekey]
del(index.entries[ekey])
nentry = git.IndexEntry(entry[:3] + (module_checkout_path,) + entry[4:])
index.entries[tekey] = nentry
except KeyError:
raise InvalidGitRepositoryError("Submodule's entry at %r did not exist" % (self.path))
# END handle submodule doesn't exist
# update configuration
with self.config_writer(index=index) as writer: # auto-write
writer.set_value('path', module_checkout_path)
self.path = module_checkout_path
# END handle configuration flag
except Exception:
if renamed_module:
os.renames(module_checkout_abspath, cur_path)
# END undo module renaming
raise
# END handle undo rename
# Auto-rename submodule if it's name was 'default', that is, the checkout directory
if previous_sm_path == self.name:
self.rename(module_checkout_path)
# end
return self
|
python
|
{
"resource": ""
}
|
q22104
|
Diffable.diff
|
train
|
def diff(self, other=Index, paths=None, create_patch=False, **kwargs):
"""Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
If git.NULL_TREE, it will compare against the empty tree.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the given path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somewhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occur"""
args = []
args.append("--abbrev=40") # we need full shas
args.append("--full-index") # get full index paths, not only filenames
args.append("-M") # check for renames, in both formats
if create_patch:
args.append("-p")
else:
args.append("--raw")
# in any way, assure we don't see colored output,
# fixes https://github.com/gitpython-developers/GitPython/issues/172
args.append('--no-color')
if paths is not None and not isinstance(paths, (tuple, list)):
paths = [paths]
diff_cmd = self.repo.git.diff
if other is self.Index:
args.insert(0, '--cached')
elif other is NULL_TREE:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, '--root')
diff_cmd = self.repo.git.diff_tree
elif other is not None:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, other)
diff_cmd = self.repo.git.diff_tree
args.insert(0, self)
# paths is list here or None
if paths:
args.append("--")
args.extend(paths)
# END paths handling
kwargs['as_process'] = True
proc = diff_cmd(*self._process_diff_args(args), **kwargs)
diff_method = (Diff._index_from_patch_format
if create_patch
else Diff._index_from_raw_format)
index = diff_method(self.repo, proc)
proc.wait()
return index
|
python
|
{
"resource": ""
}
|
q22105
|
TagObject._set_cache_
|
train
|
def _set_cache_(self, attr):
"""Cache all our attributes at once"""
if attr in TagObject.__slots__:
ostream = self.repo.odb.stream(self.binsha)
lines = ostream.read().decode(defenc).splitlines()
obj, hexsha = lines[0].split(" ") # object <hexsha> @UnusedVariable
type_token, type_name = lines[1].split(" ") # type <type_name> @UnusedVariable
self.object = \
get_object_type_by_name(type_name.encode('ascii'))(self.repo, hex_to_bin(hexsha))
self.tag = lines[2][4:] # tag <tag name>
tagger_info = lines[3] # tagger <actor> <date>
self.tagger, self.tagged_date, self.tagger_tz_offset = parse_actor_and_date(tagger_info)
# line 4 empty - it could mark the beginning of the next header
# in case there really is no message, it would not exist. Otherwise
# a newline separates header from message
if len(lines) > 5:
self.message = "\n".join(lines[5:])
else:
self.message = ''
# END check our attributes
else:
super(TagObject, self)._set_cache_(attr)
|
python
|
{
"resource": ""
}
|
q22106
|
require_remote_ref_path
|
train
|
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper
|
python
|
{
"resource": ""
}
|
q22107
|
post_clear_cache
|
train
|
def post_clear_cache(func):
"""Decorator for functions that alter the index using the git command. This would
invalidate our possibly existing entries dictionary which is why it must be
deleted to allow it to be lazily reread later.
:note:
This decorator will not be required once all functions are implemented
natively which in fact is possible, but probably not feasible performance wise.
"""
@wraps(func)
def post_clear_cache_if_not_raised(self, *args, **kwargs):
rval = func(self, *args, **kwargs)
self._delete_entries_cache()
return rval
# END wrapper method
return post_clear_cache_if_not_raised
|
python
|
{
"resource": ""
}
|
q22108
|
default_index
|
train
|
def default_index(func):
"""Decorator assuring the wrapped method may only run if we are the default
repository index. This is as we rely on git commands that operate
on that index only. """
@wraps(func)
def check_default_index(self, *args, **kwargs):
if self._file_path != self._index_path():
raise AssertionError(
"Cannot call %r on indices that do not represent the default git index" % func.__name__)
return func(self, *args, **kwargs)
# END wrapper method
return check_default_index
|
python
|
{
"resource": ""
}
|
q22109
|
git_working_dir
|
train
|
def git_working_dir(func):
"""Decorator which changes the current working dir to the one of the git
repository in order to assure relative paths are handled correctly"""
@wraps(func)
def set_git_working_dir(self, *args, **kwargs):
cur_wd = os.getcwd()
os.chdir(self.repo.working_tree_dir)
try:
return func(self, *args, **kwargs)
finally:
os.chdir(cur_wd)
# END handle working dir
# END wrapper
return set_git_working_dir
|
python
|
{
"resource": ""
}
|
q22110
|
find_first_remote_branch
|
train
|
def find_first_remote_branch(remotes, branch_name):
"""Find the remote branch matching the name of the given branch or raise InvalidGitRepositoryError"""
for remote in remotes:
try:
return remote.refs[branch_name]
except IndexError:
continue
# END exception handling
# END for remote
raise InvalidGitRepositoryError("Didn't find remote branch '%r' in any of the given remotes" % branch_name)
|
python
|
{
"resource": ""
}
|
q22111
|
SubmoduleConfigParser.flush_to_index
|
train
|
def flush_to_index(self):
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache()
|
python
|
{
"resource": ""
}
|
q22112
|
RefLog.append_entry
|
train
|
def append_entry(cls, config_reader, filepath, oldbinsha, newbinsha, message):
"""Append a new log entry to the revlog at filepath.
:param config_reader: configuration reader of the repository - used to obtain
user information. May also be an Actor instance identifying the committer directly.
May also be None
:param filepath: full path to the log file
:param oldbinsha: binary sha of the previous commit
:param newbinsha: binary sha of the current commit
:param message: message describing the change to the reference
:param write: If True, the changes will be written right away. Otherwise
the change will not be written
:return: RefLogEntry objects which was appended to the log
:note: As we are append-only, concurrent access is not a problem as we
do not interfere with readers."""
if len(oldbinsha) != 20 or len(newbinsha) != 20:
raise ValueError("Shas need to be given in binary format")
# END handle sha type
assure_directory_exists(filepath, is_file=True)
first_line = message.split('\n')[0]
committer = isinstance(config_reader, Actor) and config_reader or Actor.committer(config_reader)
entry = RefLogEntry((
bin_to_hex(oldbinsha).decode('ascii'),
bin_to_hex(newbinsha).decode('ascii'),
committer, (int(time.time()), time.altzone), first_line
))
lf = LockFile(filepath)
lf._obtain_lock_or_raise()
fd = open(filepath, 'ab')
try:
fd.write(entry.format().encode(defenc))
finally:
fd.close()
lf._release_lock()
# END handle write operation
return entry
|
python
|
{
"resource": ""
}
|
q22113
|
unbare_repo
|
train
|
def unbare_repo(func):
"""Methods with this decorator raise InvalidGitRepositoryError if they
encounter a bare repository"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.repo.bare:
raise InvalidGitRepositoryError("Method '%s' cannot operate on bare repositories" % func.__name__)
# END bare method
return func(self, *args, **kwargs)
# END wrapper
return wrapper
|
python
|
{
"resource": ""
}
|
q22114
|
rmtree
|
train
|
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
return shutil.rmtree(path, False, onerror)
|
python
|
{
"resource": ""
}
|
q22115
|
stream_copy
|
train
|
def stream_copy(source, destination, chunk_size=512 * 1024):
"""Copy all data from the source stream into the destination stream in chunks
of size chunk_size
:return: amount of bytes written"""
br = 0
while True:
chunk = source.read(chunk_size)
destination.write(chunk)
br += len(chunk)
if len(chunk) < chunk_size:
break
# END reading output stream
return br
|
python
|
{
"resource": ""
}
|
q22116
|
assure_directory_exists
|
train
|
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path)
return True
return False
|
python
|
{
"resource": ""
}
|
q22117
|
RemoteProgress._parse_progress_line
|
train
|
def _parse_progress_line(self, line):
"""Parse progress information from the given line as retrieved by git-push
or git-fetch.
- Lines that do not contain progress info are stored in :attr:`other_lines`.
- Lines that seem to contain an error (i.e. start with error: or fatal:) are stored
in :attr:`error_lines`.
:return: list(line, ...) list of lines that could not be processed"""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2) \rCompressing objects: 100% (2/2) \rCompressing objects: 100% (2/2), done.
self._cur_line = line = line.decode('utf-8') if isinstance(line, bytes) else line
if len(self.error_lines) > 0 or self._cur_line.startswith(('error:', 'fatal:')):
self.error_lines.append(self._cur_line)
return []
sub_lines = line.split('\r')
failed_lines = []
for sline in sub_lines:
# find escape characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i, c in enumerate(reversed(sline)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i - 1
# END character was non-ascii
# END for each character in sline
if last_valid_index is not None:
sline = sline[:last_valid_index]
# END cut away invalid part
sline = sline.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(sline)
if match is None:
match = self.re_op_absolute.match(sline)
if not match:
self.line_dropped(sline)
failed_lines.append(sline)
continue
# END could not get match
op_code = 0
remote, op_name, percent, cur_count, max_count, message = match.groups() # @UnusedVariable
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
elif op_name == 'Finding sources':
op_code |= self.FINDING_SOURCES
elif op_name == 'Checking out files':
op_code |= self.CHECKING_OUT
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented, so we drop the line verbosely
# to make sure we get informed in case the process spits out new
# commands at some point.
self.line_dropped(sline)
# Note: Don't add this line to the failed lines, as we have to silently
# drop it
self.other_lines.extend(failed_lines)
return failed_lines
# END handle op code
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
# END begin opcode
if message is None:
message = ''
# END message handling
message = message.strip()
if message.endswith(self.DONE_TOKEN):
op_code |= self.END
message = message[:-len(self.DONE_TOKEN)]
# END end message handling
message = message.strip(self.TOKEN_SEPARATOR)
self.update(op_code,
cur_count and float(cur_count),
max_count and float(max_count),
message)
# END for each sub line
self.other_lines.extend(failed_lines)
return failed_lines
|
python
|
{
"resource": ""
}
|
q22118
|
Stats._list_from_string
|
train
|
def _list_from_string(cls, repo, text):
"""Create a Stat object from output retrieved by git-diff.
:return: git.Stat"""
hsh = {'total': {'insertions': 0, 'deletions': 0, 'lines': 0, 'files': 0}, 'files': {}}
for line in text.splitlines():
(raw_insertions, raw_deletions, filename) = line.split("\t")
insertions = raw_insertions != '-' and int(raw_insertions) or 0
deletions = raw_deletions != '-' and int(raw_deletions) or 0
hsh['total']['insertions'] += insertions
hsh['total']['deletions'] += deletions
hsh['total']['lines'] += insertions + deletions
hsh['total']['files'] += 1
hsh['files'][filename.strip()] = {'insertions': insertions,
'deletions': deletions,
'lines': insertions + deletions}
return Stats(hsh['total'], hsh['files'])
|
python
|
{
"resource": ""
}
|
q22119
|
BlockingLockFile._obtain_lock
|
train
|
def _obtain_lock(self):
"""This method blocks until it obtained the lock, or raises IOError if
it ran out of time or if the parent directory was not available anymore.
If this method returns, you are guaranteed to own the lock"""
starttime = time.time()
maxtime = starttime + float(self._max_block_time)
while True:
try:
super(BlockingLockFile, self)._obtain_lock()
except IOError:
# synity check: if the directory leading to the lockfile is not
# readable anymore, raise an exception
curtime = time.time()
if not osp.isdir(osp.dirname(self._lock_file_path())):
msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % (
self._lock_file_path(), curtime - starttime)
raise IOError(msg)
# END handle missing directory
if curtime >= maxtime:
msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path())
raise IOError(msg)
# END abort if we wait too long
time.sleep(self._check_interval)
else:
break
|
python
|
{
"resource": ""
}
|
q22120
|
Iterable.list_items
|
train
|
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
|
python
|
{
"resource": ""
}
|
q22121
|
main
|
train
|
def main(testfiles=None, action=printer):
"""testfiles can be None, in which case the command line arguments are used as filenames.
testfiles can be a string, in which case that file is parsed.
testfiles can be a list.
In all cases, the filenames will be globbed.
If more than one file is parsed successfully, a dictionary of ParseResults is returned.
Otherwise, a simple ParseResults is returned.
"""
testfiles = get_filename_list(testfiles)
print(testfiles)
if action:
for i in (simple_identifier, value, item_list):
i.setParseAction(action)
success = 0
failures = []
retval = {}
for f in testfiles:
try:
retval[f] = object_definition.parseFile(f)
success += 1
except Exception:
failures.append(f)
if failures:
print('\nfailed while processing %s' % ', '.join(failures))
print('\nsucceeded on %d of %d files' %(success, len(testfiles)))
if len(retval) == 1 and len(testfiles) == 1:
# if only one file is parsed, return the parseResults directly
return retval[list(retval.keys())[0]]
# else, return a dictionary of parseResults
return retval
|
python
|
{
"resource": ""
}
|
q22122
|
expand_state_definition
|
train
|
def expand_state_definition(source, loc, tokens):
"""
Parse action to convert statemachine to corresponding Python classes and methods
"""
indent = " " * (pp.col(loc, source) - 1)
statedef = []
# build list of states
states = set()
fromTo = {}
for tn in tokens.transitions:
states.add(tn.from_state)
states.add(tn.to_state)
fromTo[tn.from_state] = tn.to_state
# define base class for state classes
baseStateClass = tokens.name
statedef.extend([
"class %s(object):" % baseStateClass,
" def __str__(self):",
" return self.__class__.__name__",
" @classmethod",
" def states(cls):",
" return list(cls.__subclasses__())",
" def next_state(self):",
" return self._next_state_class()",
])
# define all state classes
statedef.extend("class {0}({1}): pass".format(s, baseStateClass) for s in states)
# define state->state transitions
statedef.extend("{0}._next_state_class = {1}".format(s, fromTo[s]) for s in states if s in fromTo)
statedef.extend([
"class {baseStateClass}Mixin:".format(baseStateClass=baseStateClass),
" def __init__(self):",
" self._state = None",
" def initialize_state(self, init_state):",
" self._state = init_state",
" @property",
" def state(self):",
" return self._state",
" # get behavior/properties from current state",
" def __getattr__(self, attrname):",
" attr = getattr(self._state, attrname)",
" return attr",
" def __str__(self):",
" return '{0}: {1}'.format(self.__class__.__name__, self._state)",
])
return indent + ("\n" + indent).join(statedef) + "\n"
|
python
|
{
"resource": ""
}
|
q22123
|
debug
|
train
|
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("{0}.{1}:{2}\n".format(modname, ftn, txt))
sys.stdout.flush()
|
python
|
{
"resource": ""
}
|
q22124
|
fatal
|
train
|
def fatal(ftn, txt):
"""If can't continue."""
msg = "{0}.{1}:FATAL:{2}\n".format(modname, ftn, txt)
raise SystemExit(msg)
|
python
|
{
"resource": ""
}
|
q22125
|
main
|
train
|
def main(pargs):
"""This should only be used for testing. The primary mode of operation is
as an imported library.
"""
input_file = sys.argv[1]
fp = ParseFileLineByLine(input_file)
for i in fp:
print(i)
|
python
|
{
"resource": ""
}
|
q22126
|
SearchQueryParser.evaluateQuotes
|
train
|
def evaluateQuotes(self, argument):
"""Evaluate quoted strings
First is does an 'and' on the indidual search terms, then it asks the
function GetQuoted to only return the subset of ID's that contain the
literal string.
"""
r = set()
search_terms = []
for item in argument:
search_terms.append(item[0])
if len(r) == 0:
r = self.evaluate(item)
else:
r = r.intersection(self.evaluate(item))
return self.GetQuotes(' '.join(search_terms), r)
|
python
|
{
"resource": ""
}
|
q22127
|
ExceptionSharedData.setpos
|
train
|
def setpos(self, location, text):
"""Helper function for setting curently parsed text and position"""
self.location = location
self.text = text
|
python
|
{
"resource": ""
}
|
q22128
|
SymbolTableEntry.set_attribute
|
train
|
def set_attribute(self, name, value):
"""Sets attribute's name and value"""
self.attribute_name = name
self.attribute = value
|
python
|
{
"resource": ""
}
|
q22129
|
SymbolTable.display
|
train
|
def display(self):
"""Displays the symbol table content"""
#Finding the maximum length for each column
sym_name = "Symbol name"
sym_len = max(max(len(i.name) for i in self.table),len(sym_name))
kind_name = "Kind"
kind_len = max(max(len(SharedData.KINDS[i.kind]) for i in self.table),len(kind_name))
type_name = "Type"
type_len = max(max(len(SharedData.TYPES[i.type]) for i in self.table),len(type_name))
attr_name = "Attribute"
attr_len = max(max(len(i.attribute_str()) for i in self.table),len(attr_name))
#print table header
print("{0:3s} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | {9:s}".format(" No", sym_name, sym_len, kind_name, kind_len, type_name, type_len, attr_name, attr_len, "Parameters"))
print("-----------------------------" + "-" * (sym_len + kind_len + type_len + attr_len))
#print symbol table
for i,sym in enumerate(self.table):
parameters = ""
for p in sym.param_types:
if parameters == "":
parameters = "{0}".format(SharedData.TYPES[p])
else:
parameters += ", {0}".format(SharedData.TYPES[p])
print("{0:3d} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | ({9})".format(i, sym.name, sym_len, SharedData.KINDS[sym.kind], kind_len, SharedData.TYPES[sym.type], type_len, sym.attribute_str(), attr_len, parameters))
|
python
|
{
"resource": ""
}
|
q22130
|
SymbolTable.insert_symbol
|
train
|
def insert_symbol(self, sname, skind, stype):
"""Inserts new symbol at the end of the symbol table.
Returns symbol index
sname - symbol name
skind - symbol kind
stype - symbol type
"""
self.table.append(SymbolTableEntry(sname, skind, stype))
self.table_len = len(self.table)
return self.table_len-1
|
python
|
{
"resource": ""
}
|
q22131
|
SymbolTable.clear_symbols
|
train
|
def clear_symbols(self, index):
"""Clears all symbols begining with the index to the end of table"""
try:
del self.table[index:]
except Exception:
self.error()
self.table_len = len(self.table)
|
python
|
{
"resource": ""
}
|
q22132
|
SymbolTable.insert_id
|
train
|
def insert_id(self, sname, skind, skinds, stype):
"""Inserts a new identifier at the end of the symbol table, if possible.
Returns symbol index, or raises an exception if the symbol alredy exists
sname - symbol name
skind - symbol kind
skinds - symbol kinds to check for
stype - symbol type
"""
index = self.lookup_symbol(sname, skinds)
if index == None:
index = self.insert_symbol(sname, skind, stype)
return index
else:
raise SemanticException("Redefinition of '%s'" % sname)
|
python
|
{
"resource": ""
}
|
q22133
|
SymbolTable.insert_global_var
|
train
|
def insert_global_var(self, vname, vtype):
"Inserts a new global variable"
return self.insert_id(vname, SharedData.KINDS.GLOBAL_VAR, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.FUNCTION], vtype)
|
python
|
{
"resource": ""
}
|
q22134
|
SymbolTable.insert_local_var
|
train
|
def insert_local_var(self, vname, vtype, position):
"Inserts a new local variable"
index = self.insert_id(vname, SharedData.KINDS.LOCAL_VAR, [SharedData.KINDS.LOCAL_VAR, SharedData.KINDS.PARAMETER], vtype)
self.table[index].attribute = position
|
python
|
{
"resource": ""
}
|
q22135
|
SymbolTable.insert_parameter
|
train
|
def insert_parameter(self, pname, ptype):
"Inserts a new parameter"
index = self.insert_id(pname, SharedData.KINDS.PARAMETER, SharedData.KINDS.PARAMETER, ptype)
#set parameter's attribute to it's ordinal number
self.table[index].set_attribute("Index", self.shared.function_params)
#set parameter's type in param_types list of a function
self.table[self.shared.function_index].param_types.append(ptype)
return index
|
python
|
{
"resource": ""
}
|
q22136
|
SymbolTable.insert_function
|
train
|
def insert_function(self, fname, ftype):
"Inserts a new function"
index = self.insert_id(fname, SharedData.KINDS.FUNCTION, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.FUNCTION], ftype)
self.table[index].set_attribute("Params",0)
return index
|
python
|
{
"resource": ""
}
|
q22137
|
SymbolTable.same_types
|
train
|
def same_types(self, index1, index2):
"""Returns True if both symbol table elements are of the same type"""
try:
same = self.table[index1].type == self.table[index2].type != SharedData.TYPES.NO_TYPE
except Exception:
self.error()
return same
|
python
|
{
"resource": ""
}
|
q22138
|
CodeGenerator.take_register
|
train
|
def take_register(self, rtype = SharedData.TYPES.NO_TYPE):
"""Reserves one working register and sets its type"""
if len(self.free_registers) == 0:
self.error("no more free registers")
reg = self.free_registers.pop()
self.used_registers.append(reg)
self.symtab.set_type(reg, rtype)
return reg
|
python
|
{
"resource": ""
}
|
q22139
|
CodeGenerator.take_function_register
|
train
|
def take_function_register(self, rtype = SharedData.TYPES.NO_TYPE):
"""Reserves register for function return value and sets its type"""
reg = SharedData.FUNCTION_REGISTER
if reg not in self.free_registers:
self.error("function register already taken")
self.free_registers.remove(reg)
self.used_registers.append(reg)
self.symtab.set_type(reg, rtype)
return reg
|
python
|
{
"resource": ""
}
|
q22140
|
CodeGenerator.free_register
|
train
|
def free_register(self, reg):
"""Releases working register"""
if reg not in self.used_registers:
self.error("register %s is not taken" % self.REGISTERS[reg])
self.used_registers.remove(reg)
self.free_registers.append(reg)
self.free_registers.sort(reverse = True)
|
python
|
{
"resource": ""
}
|
q22141
|
CodeGenerator.symbol
|
train
|
def symbol(self, index):
"""Generates symbol name from index"""
#if index is actually a string, just return it
if isinstance(index, str):
return index
elif (index < 0) or (index >= self.symtab.table_len):
self.error("symbol table index out of range")
sym = self.symtab.table[index]
#local variables are located at negative offset from frame pointer register
if sym.kind == SharedData.KINDS.LOCAL_VAR:
return "-{0}(1:%14)".format(sym.attribute * 4 + 4)
#parameters are located at positive offset from frame pointer register
elif sym.kind == SharedData.KINDS.PARAMETER:
return "{0}(1:%14)".format(8 + sym.attribute * 4)
elif sym.kind == SharedData.KINDS.CONSTANT:
return "${0}".format(sym.name)
else:
return "{0}".format(sym.name)
|
python
|
{
"resource": ""
}
|
q22142
|
CodeGenerator.save_used_registers
|
train
|
def save_used_registers(self):
"""Pushes all used working registers before function call"""
used = self.used_registers[:]
del self.used_registers[:]
self.used_registers_stack.append(used[:])
used.sort()
for reg in used:
self.newline_text("PUSH\t%s" % SharedData.REGISTERS[reg], True)
self.free_registers.extend(used)
self.free_registers.sort(reverse = True)
|
python
|
{
"resource": ""
}
|
q22143
|
CodeGenerator.restore_used_registers
|
train
|
def restore_used_registers(self):
"""Pops all used working registers after function call"""
used = self.used_registers_stack.pop()
self.used_registers = used[:]
used.sort(reverse = True)
for reg in used:
self.newline_text("POP \t%s" % SharedData.REGISTERS[reg], True)
self.free_registers.remove(reg)
|
python
|
{
"resource": ""
}
|
q22144
|
CodeGenerator.arithmetic_mnemonic
|
train
|
def arithmetic_mnemonic(self, op_name, op_type):
"""Generates an arithmetic instruction mnemonic"""
return self.OPERATIONS[op_name] + self.OPSIGNS[op_type]
|
python
|
{
"resource": ""
}
|
q22145
|
CodeGenerator.arithmetic
|
train
|
def arithmetic(self, operation, operand1, operand2, operand3 = None):
"""Generates an arithmetic instruction
operation - one of supporetd operations
operandX - index in symbol table or text representation of operand
First two operands are input, third one is output
"""
if isinstance(operand1, int):
output_type = self.symtab.get_type(operand1)
self.free_if_register(operand1)
else:
output_type = None
if isinstance(operand2, int):
output_type = self.symtab.get_type(operand2) if output_type == None else output_type
self.free_if_register(operand2)
else:
output_type = SharedData.TYPES.NO_TYPE if output_type == None else output_type
#if operand3 is not defined, reserve one free register for it
output = self.take_register(output_type) if operand3 == None else operand3
mnemonic = self.arithmetic_mnemonic(operation, output_type)
self.newline_text("{0}\t{1},{2},{3}".format(mnemonic, self.symbol(operand1), self.symbol(operand2), self.symbol(output)), True)
return output
|
python
|
{
"resource": ""
}
|
q22146
|
CodeGenerator.relop_code
|
train
|
def relop_code(self, relop, operands_type):
"""Returns code for relational operator
relop - relational operator
operands_type - int or unsigned
"""
code = self.RELATIONAL_DICT[relop]
offset = 0 if operands_type == SharedData.TYPES.INT else len(SharedData.RELATIONAL_OPERATORS)
return code + offset
|
python
|
{
"resource": ""
}
|
q22147
|
CodeGenerator.jump
|
train
|
def jump(self, relcode, opposite, label):
"""Generates a jump instruction
relcode - relational operator code
opposite - generate normal or opposite jump
label - jump label
"""
jump = self.OPPOSITE_JUMPS[relcode] if opposite else self.CONDITIONAL_JUMPS[relcode]
self.newline_text("{0}\t{1}".format(jump, label), True)
|
python
|
{
"resource": ""
}
|
q22148
|
CodeGenerator.compare
|
train
|
def compare(self, operand1, operand2):
"""Generates a compare instruction
operandX - index in symbol table
"""
typ = self.symtab.get_type(operand1)
self.free_if_register(operand1)
self.free_if_register(operand2)
self.newline_text("CMP{0}\t{1},{2}".format(self.OPSIGNS[typ], self.symbol(operand1), self.symbol(operand2)), True)
|
python
|
{
"resource": ""
}
|
q22149
|
CodeGenerator.function_begin
|
train
|
def function_begin(self):
"""Inserts function name label and function frame initialization"""
self.newline_label(self.shared.function_name, False, True)
self.push("%14")
self.move("%15", "%14")
|
python
|
{
"resource": ""
}
|
q22150
|
CodeGenerator.function_body
|
train
|
def function_body(self):
"""Inserts a local variable initialization and body label"""
if self.shared.function_vars > 0:
const = self.symtab.insert_constant("0{}".format(self.shared.function_vars * 4), SharedData.TYPES.UNSIGNED)
self.arithmetic("-", "%15", const, "%15")
self.newline_label(self.shared.function_name + "_body", True, True)
|
python
|
{
"resource": ""
}
|
q22151
|
CodeGenerator.function_end
|
train
|
def function_end(self):
"""Inserts an exit label and function return instructions"""
self.newline_label(self.shared.function_name + "_exit", True, True)
self.move("%14", "%15")
self.pop("%14")
self.newline_text("RET", True)
|
python
|
{
"resource": ""
}
|
q22152
|
MicroC.warning
|
train
|
def warning(self, message, print_location=True):
"""Displays warning message. Uses exshared for current location of parsing"""
msg = "Warning"
if print_location and (exshared.location != None):
wline = lineno(exshared.location, exshared.text)
wcol = col(exshared.location, exshared.text)
wtext = line(exshared.location, exshared.text)
msg += " at line %d, col %d" % (wline, wcol)
msg += ": %s" % message
if print_location and (exshared.location != None):
msg += "\n%s" % wtext
print(msg)
|
python
|
{
"resource": ""
}
|
q22153
|
MicroC.global_variable_action
|
train
|
def global_variable_action(self, text, loc, var):
"""Code executed after recognising a global variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("GLOBAL_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_global_var(var.name, var.type)
self.codegen.global_var(var.name)
return index
|
python
|
{
"resource": ""
}
|
q22154
|
MicroC.local_variable_action
|
train
|
def local_variable_action(self, text, loc, var):
"""Code executed after recognising a local variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("LOCAL_VAR:",var, var.name, var.type)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_local_var(var.name, var.type, self.shared.function_vars)
self.shared.function_vars += 1
return index
|
python
|
{
"resource": ""
}
|
q22155
|
MicroC.parameter_action
|
train
|
def parameter_action(self, text, loc, par):
"""Code executed after recognising a parameter"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("PARAM:",par)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_parameter(par.name, par.type)
self.shared.function_params += 1
return index
|
python
|
{
"resource": ""
}
|
q22156
|
MicroC.constant_action
|
train
|
def constant_action(self, text, loc, const):
"""Code executed after recognising a constant"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("CONST:",const)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
return self.symtab.insert_constant(const[0], const[1])
|
python
|
{
"resource": ""
}
|
q22157
|
MicroC.function_body_action
|
train
|
def function_body_action(self, text, loc, fun):
"""Code executed after recognising the beginning of function's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_BODY:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.function_body()
|
python
|
{
"resource": ""
}
|
q22158
|
MicroC.function_end_action
|
train
|
def function_end_action(self, text, loc, fun):
"""Code executed at the end of function definition"""
if DEBUG > 0:
print("FUN_END:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#set function's attribute to number of function parameters
self.symtab.set_attribute(self.shared.function_index, self.shared.function_params)
#clear local function symbols (but leave function name)
self.symtab.clear_symbols(self.shared.function_index + 1)
self.codegen.function_end()
|
python
|
{
"resource": ""
}
|
q22159
|
MicroC.return_action
|
train
|
def return_action(self, text, loc, ret):
"""Code executed after recognising a return statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("RETURN:",ret)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
if not self.symtab.same_types(self.shared.function_index, ret.exp[0]):
raise SemanticException("Incompatible type in return")
#set register for function's return value to expression value
reg = self.codegen.take_function_register()
self.codegen.move(ret.exp[0], reg)
#after return statement, register for function's return value is available again
self.codegen.free_register(reg)
#jump to function's exit
self.codegen.unconditional_jump(self.codegen.label(self.shared.function_name+"_exit", True))
|
python
|
{
"resource": ""
}
|
q22160
|
MicroC.lookup_id_action
|
train
|
def lookup_id_action(self, text, loc, var):
"""Code executed after recognising an identificator in expression"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("EXP_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(var.name, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
if var_index == None:
raise SemanticException("'%s' undefined" % var.name)
return var_index
|
python
|
{
"resource": ""
}
|
q22161
|
MicroC.assignment_action
|
train
|
def assignment_action(self, text, loc, assign):
"""Code executed after recognising an assignment statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("ASSIGN:",assign)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(assign.var, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
if var_index == None:
raise SemanticException("Undefined lvalue '%s' in assignment" % assign.var)
if not self.symtab.same_types(var_index, assign.exp[0]):
raise SemanticException("Incompatible types in assignment")
self.codegen.move(assign.exp[0], var_index)
|
python
|
{
"resource": ""
}
|
q22162
|
MicroC.argument_action
|
train
|
def argument_action(self, text, loc, arg):
"""Code executed after recognising each of function's arguments"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("ARGUMENT:",arg.exp)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
arg_ordinal = len(self.function_arguments)
#check argument's type
if not self.symtab.same_type_as_argument(arg.exp, self.function_call_index, arg_ordinal):
raise SemanticException("Incompatible type for argument %d in '%s'" % (arg_ordinal + 1, self.symtab.get_name(self.function_call_index)))
self.function_arguments.append(arg.exp)
|
python
|
{
"resource": ""
}
|
q22163
|
MicroC.function_call_action
|
train
|
def function_call_action(self, text, loc, fun):
"""Code executed after recognising the whole function call"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_CALL:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#check number of arguments
if len(self.function_arguments) != self.symtab.get_attribute(self.function_call_index):
raise SemanticException("Wrong number of arguments for function '%s'" % fun.name)
#arguments should be pushed to stack in reverse order
self.function_arguments.reverse()
self.codegen.function_call(self.function_call_index, self.function_arguments)
self.codegen.restore_used_registers()
return_type = self.symtab.get_type(self.function_call_index)
#restore previous function call data
self.function_call_index = self.function_call_stack.pop()
self.function_arguments = self.function_arguments_stack.pop()
register = self.codegen.take_register(return_type)
#move result to a new free register, to allow the next function call
self.codegen.move(self.codegen.take_function_register(return_type), register)
return register
|
python
|
{
"resource": ""
}
|
q22164
|
MicroC.if_body_action
|
train
|
def if_body_action(self, text, loc, arg):
"""Code executed after recognising if statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
#generate 'true' label (executes if condition is satisfied)
self.codegen.newline_label("true{0}".format(self.label_number), True, True)
#save label numbers (needed for nested if/while statements)
self.label_stack.append(self.false_label_number)
self.label_stack.append(self.label_number)
|
python
|
{
"resource": ""
}
|
q22165
|
MicroC.if_else_action
|
train
|
def if_else_action(self, text, loc, arg):
"""Code executed after recognising if statement's else body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_ELSE:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#jump to exit after all statements for true condition are executed
self.label_number = self.label_stack.pop()
label = self.codegen.label("exit{0}".format(self.label_number), True, False)
self.codegen.unconditional_jump(label)
#generate final 'false' label (executes if condition isn't satisfied)
self.codegen.newline_label("false{0}".format(self.label_stack.pop()), True, True)
self.label_stack.append(self.label_number)
|
python
|
{
"resource": ""
}
|
q22166
|
MicroC.if_end_action
|
train
|
def if_end_action(self, text, loc, arg):
"""Code executed after recognising a whole if statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.newline_label("exit{0}".format(self.label_stack.pop()), True, True)
|
python
|
{
"resource": ""
}
|
q22167
|
MicroC.program_end_action
|
train
|
def program_end_action(self, text, loc, arg):
"""Checks if there is a 'main' function and the type of 'main' function"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("PROGRAM_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol("main",SharedData.KINDS.FUNCTION)
if index == None:
raise SemanticException("Undefined reference to 'main'", False)
elif self.symtab.get_type(index) != SharedData.TYPES.INT:
self.warning("Return type of 'main' is not int", False)
|
python
|
{
"resource": ""
}
|
q22168
|
encode_cookie
|
train
|
def encode_cookie(payload, key=None):
'''
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str
'''
return u'{0}|{1}'.format(payload, _cookie_digest(payload, key=key))
|
python
|
{
"resource": ""
}
|
q22169
|
decode_cookie
|
train
|
def decode_cookie(cookie, key=None):
'''
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, ``None`` will be implicitly returned.
:param cookie: An encoded cookie.
:type cookie: str
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str
'''
try:
payload, digest = cookie.rsplit(u'|', 1)
if hasattr(digest, 'decode'):
digest = digest.decode('ascii') # pragma: no cover
except ValueError:
return
if safe_str_cmp(_cookie_digest(payload, key=key), digest):
return payload
|
python
|
{
"resource": ""
}
|
q22170
|
make_next_param
|
train
|
def make_next_param(login_url, current_url):
'''
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login_url: The login URL being redirected to.
:type login_url: str
:param current_url: The URL to reduce.
:type current_url: str
'''
l = urlparse(login_url)
c = urlparse(current_url)
if (not l.scheme or l.scheme == c.scheme) and \
(not l.netloc or l.netloc == c.netloc):
return urlunparse(('', '', c.path, c.params, c.query, ''))
return current_url
|
python
|
{
"resource": ""
}
|
q22171
|
login_url
|
train
|
def login_url(login_view, next_url=None, next_field='next'):
'''
Creates a URL for redirecting to a login page. If only `login_view` is
provided, this will just return the URL for it. If `next_url` is provided,
however, this will append a ``next=URL`` parameter to the query string
so that the login view can redirect back to that URL. Flask-Login's default
unauthorized handler uses this function when redirecting to your login url.
To force the host name used, set `FORCE_HOST_FOR_REDIRECTS` to a host. This
prevents from redirecting to external sites if request headers Host or
X-Forwarded-For are present.
:param login_view: The name of the login view. (Alternately, the actual
URL to the login view.)
:type login_view: str
:param next_url: The URL to give the login view for redirection.
:type next_url: str
:param next_field: What field to store the next URL in. (It defaults to
``next``.)
:type next_field: str
'''
base = expand_login_view(login_view)
if next_url is None:
return base
parsed_result = urlparse(base)
md = url_decode(parsed_result.query)
md[next_field] = make_next_param(base, next_url)
netloc = current_app.config.get('FORCE_HOST_FOR_REDIRECTS') or \
parsed_result.netloc
parsed_result = parsed_result._replace(netloc=netloc,
query=url_encode(md, sort=True))
return urlunparse(parsed_result)
|
python
|
{
"resource": ""
}
|
q22172
|
login_user
|
train
|
def login_user(user, remember=False, duration=None, force=False, fresh=True):
'''
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` property is ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param duration: The amount of time before the remember cookie expires. If
``None`` the value set in the settings is used. Defaults to ``None``.
:type duration: :class:`datetime.timedelta`
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
:param fresh: setting this to ``False`` will log in the user with a session
marked as not "fresh". Defaults to ``True``.
:type fresh: bool
'''
if not force and not user.is_active:
return False
user_id = getattr(user, current_app.login_manager.id_attribute)()
session['user_id'] = user_id
session['_fresh'] = fresh
session['_id'] = current_app.login_manager._session_identifier_generator()
if remember:
session['remember'] = 'set'
if duration is not None:
try:
# equal to timedelta.total_seconds() but works with Python 2.6
session['remember_seconds'] = (duration.microseconds +
(duration.seconds +
duration.days * 24 * 3600) *
10**6) / 10.0**6
except AttributeError:
raise Exception('duration must be a datetime.timedelta, '
'instead got: {0}'.format(duration))
current_app.login_manager._update_request_context_with_user(user)
user_logged_in.send(current_app._get_current_object(), user=_get_user())
return True
|
python
|
{
"resource": ""
}
|
q22173
|
confirm_login
|
train
|
def confirm_login():
'''
This sets the current session as fresh. Sessions become stale when they
are reloaded from a cookie.
'''
session['_fresh'] = True
session['_id'] = current_app.login_manager._session_identifier_generator()
user_login_confirmed.send(current_app._get_current_object())
|
python
|
{
"resource": ""
}
|
q22174
|
fresh_login_required
|
train
|
def fresh_login_required(func):
'''
If you decorate a view with this, it will ensure that the current user's
login is fresh - i.e. their session was not restored from a 'remember me'
cookie. Sensitive operations, like changing a password or e-mail, should
be protected with this, to impede the efforts of cookie thieves.
If the user is not authenticated, :meth:`LoginManager.unauthorized` is
called as normal. If they are authenticated, but their session is not
fresh, it will call :meth:`LoginManager.needs_refresh` instead. (In that
case, you will need to provide a :attr:`LoginManager.refresh_view`.)
Behaves identically to the :func:`login_required` decorator with respect
to configutation variables.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.config.get('LOGIN_DISABLED'):
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not login_fresh():
return current_app.login_manager.needs_refresh()
return func(*args, **kwargs)
return decorated_view
|
python
|
{
"resource": ""
}
|
q22175
|
set_login_view
|
train
|
def set_login_view(login_view, blueprint=None):
'''
Sets the login view for the app or blueprint. If a blueprint is passed,
the login view is set for this blueprint on ``blueprint_login_views``.
:param login_view: The user object to log in.
:type login_view: str
:param blueprint: The blueprint which this login view should be set on.
Defaults to ``None``.
:type blueprint: object
'''
num_login_views = len(current_app.login_manager.blueprint_login_views)
if blueprint is not None or num_login_views != 0:
(current_app.login_manager
.blueprint_login_views[blueprint.name]) = login_view
if (current_app.login_manager.login_view is not None and
None not in current_app.login_manager.blueprint_login_views):
(current_app.login_manager
.blueprint_login_views[None]) = (current_app.login_manager
.login_view)
current_app.login_manager.login_view = None
else:
current_app.login_manager.login_view = login_view
|
python
|
{
"resource": ""
}
|
q22176
|
LoginManager._update_request_context_with_user
|
train
|
def _update_request_context_with_user(self, user=None):
'''Store the given user as ctx.user.'''
ctx = _request_ctx_stack.top
ctx.user = self.anonymous_user() if user is None else user
|
python
|
{
"resource": ""
}
|
q22177
|
LoginManager._load_user
|
train
|
def _load_user(self):
'''Loads user from session or remember_me cookie as applicable'''
if self._user_callback is None and self._request_callback is None:
raise Exception(
"Missing user_loader or request_loader. Refer to "
"http://flask-login.readthedocs.io/#how-it-works "
"for more info.")
user_accessed.send(current_app._get_current_object())
# Check SESSION_PROTECTION
if self._session_protection_failed():
return self._update_request_context_with_user()
user = None
# Load user from Flask Session
user_id = session.get('user_id')
if user_id is not None and self._user_callback is not None:
user = self._user_callback(user_id)
# Load user from Remember Me Cookie or Request Loader
if user is None:
config = current_app.config
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME)
has_cookie = (cookie_name in request.cookies and
session.get('remember') != 'clear')
if has_cookie:
cookie = request.cookies[cookie_name]
user = self._load_user_from_remember_cookie(cookie)
elif self._request_callback:
user = self._load_user_from_request(request)
elif header_name in request.headers:
header = request.headers[header_name]
user = self._load_user_from_header(header)
return self._update_request_context_with_user(user)
|
python
|
{
"resource": ""
}
|
q22178
|
_tree_to_labels
|
train
|
def _tree_to_labels(X, single_linkage_tree, min_cluster_size=10,
cluster_selection_method='eom',
allow_single_cluster=False,
match_reference_implementation=False):
"""Converts a pretrained tree and cluster size into a
set of labels and probabilities.
"""
condensed_tree = condense_tree(single_linkage_tree,
min_cluster_size)
stability_dict = compute_stability(condensed_tree)
labels, probabilities, stabilities = get_clusters(condensed_tree,
stability_dict,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation)
return (labels, probabilities, stabilities, condensed_tree,
single_linkage_tree)
|
python
|
{
"resource": ""
}
|
q22179
|
HDBSCAN.fit
|
train
|
def fit(self, X, y=None):
"""Perform HDBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
self : object
Returns self
"""
if self.metric != 'precomputed':
X = check_array(X, accept_sparse='csr')
self._raw_data = X
elif issparse(X):
# Handle sparse precomputed distance matrices separately
X = check_array(X, accept_sparse='csr')
else:
# Only non-sparse, precomputed distance matrices are allowed
# to have numpy.inf values indicating missing distances
check_precomputed_distance_matrix(X)
kwargs = self.get_params()
# prediction data only applies to the persistent model, so remove
# it from the keyword args we pass on the the function
kwargs.pop('prediction_data', None)
kwargs.update(self._metric_kwargs)
(self.labels_,
self.probabilities_,
self.cluster_persistence_,
self._condensed_tree,
self._single_linkage_tree,
self._min_spanning_tree) = hdbscan(X, **kwargs)
if self.prediction_data:
self.generate_prediction_data()
return self
|
python
|
{
"resource": ""
}
|
q22180
|
_bfs_from_cluster_tree
|
train
|
def _bfs_from_cluster_tree(tree, bfs_root):
"""
Perform a breadth first search on a tree in condensed tree format
"""
result = []
to_process = [bfs_root]
while to_process:
result.extend(to_process)
to_process = tree['child'][np.in1d(tree['parent'], to_process)].tolist()
return result
|
python
|
{
"resource": ""
}
|
q22181
|
CondensedTree.to_pandas
|
train
|
def to_pandas(self):
"""Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame(self._raw_tree)
return result
|
python
|
{
"resource": ""
}
|
q22182
|
CondensedTree.to_networkx
|
train
|
def to_networkx(self):
"""Return a NetworkX DiGraph object representing the condensed tree.
Edge weights in the graph are the lamba values at which child nodes
'leave' the parent cluster.
Nodes have a `size` attribute attached giving the number of points
that are in the cluster (or 1 if it is a singleton point) at the
point of cluster creation (fewer points may be in the cluster at
larger lambda values).
"""
try:
from networkx import DiGraph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = DiGraph()
for row in self._raw_tree:
result.add_edge(row['parent'], row['child'], weight=row['lambda_val'])
set_node_attributes(result, dict(self._raw_tree[['child', 'child_size']]), 'size')
return result
|
python
|
{
"resource": ""
}
|
q22183
|
SingleLinkageTree.to_pandas
|
train
|
def to_pandas(self):
"""Return a pandas dataframe representation of the single linkage tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `left_child`,
`right_child`, `distance` and `size`.
The `parent`, `left_child` and `right_child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `distance` value is the at which the child nodes merge to form
the parent node.
The `size` is the number of points in the `parent` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
max_node = 2 * self._linkage.shape[0]
num_points = max_node - (self._linkage.shape[0] - 1)
parent_array = np.arange(num_points, max_node + 1)
result = DataFrame({
'parent': parent_array,
'left_child': self._linkage.T[0],
'right_child': self._linkage.T[1],
'distance': self._linkage.T[2],
'size': self._linkage.T[3]
})[['parent', 'left_child', 'right_child', 'distance', 'size']]
return result
|
python
|
{
"resource": ""
}
|
q22184
|
SingleLinkageTree.to_networkx
|
train
|
def to_networkx(self):
"""Return a NetworkX DiGraph object representing the single linkage tree.
Edge weights in the graph are the distance values at which child nodes
merge to form the parent cluster.
Nodes have a `size` attribute attached giving the number of points
that are in the cluster.
"""
try:
from networkx import DiGraph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
max_node = 2 * self._linkage.shape[0]
num_points = max_node - (self._linkage.shape[0] - 1)
result = DiGraph()
for parent, row in enumerate(self._linkage, num_points):
result.add_edge(parent, row[0], weight=row[2])
result.add_edge(parent, row[1], weight=row[2])
size_dict = {parent: row[3] for parent, row in enumerate(self._linkage, num_points)}
set_node_attributes(result, size_dict, 'size')
return result
|
python
|
{
"resource": ""
}
|
q22185
|
MinimumSpanningTree.to_pandas
|
train
|
def to_pandas(self):
"""Return a Pandas dataframe of the minimum spanning tree.
Each row is an edge in the tree; the columns are `from`,
`to`, and `distance` giving the two vertices of the edge
which are indices into the dataset, and the distance
between those datapoints.
"""
try:
from pandas import DataFrame
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame({'from': self._mst.T[0].astype(int),
'to': self._mst.T[1].astype(int),
'distance': self._mst.T[2]})
return result
|
python
|
{
"resource": ""
}
|
q22186
|
MinimumSpanningTree.to_networkx
|
train
|
def to_networkx(self):
"""Return a NetworkX Graph object representing the minimum spanning tree.
Edge weights in the graph are the distance between the nodes they connect.
Nodes have a `data` attribute attached giving the data vector of the
associated point.
"""
try:
from networkx import Graph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = Graph()
for row in self._mst:
result.add_edge(row[0], row[1], weight=row[2])
data_dict = {index: tuple(row) for index, row in enumerate(self._data)}
set_node_attributes(result, data_dict, 'data')
return result
|
python
|
{
"resource": ""
}
|
q22187
|
all_points_core_distance
|
train
|
def all_points_core_distance(distance_matrix, d=2.0):
"""
Compute the all-points-core-distance for all the points of a cluster.
Parameters
----------
distance_matrix : array (cluster_size, cluster_size)
The pairwise distance matrix between points in the cluster.
d : integer
The dimension of the data set, which is used in the computation
of the all-point-core-distance as per the paper.
Returns
-------
core_distances : array (cluster_size,)
The all-points-core-distance of each point in the cluster
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
distance_matrix[distance_matrix != 0] = (1.0 / distance_matrix[
distance_matrix != 0]) ** d
result = distance_matrix.sum(axis=1)
result /= distance_matrix.shape[0] - 1
result **= (-1.0 / d)
return result
|
python
|
{
"resource": ""
}
|
q22188
|
all_points_mutual_reachability
|
train
|
def all_points_mutual_reachability(X, labels, cluster_id,
metric='euclidean', d=None, **kwd_args):
"""
Compute the all-points-mutual-reachability distances for all the points of
a cluster.
If metric is 'precomputed' then assume X is a distance matrix for the full
dataset. Note that in this case you must pass in 'd' the dimension of the
dataset.
Parameters
----------
X : array (n_samples, n_features) or (n_samples, n_samples)
The input data of the clustering. This can be the data, or, if
metric is set to `precomputed` the pairwise distance matrix used
for the clustering.
labels : array (n_samples)
The label array output by the clustering, providing an integral
cluster label to each data point, with -1 for noise points.
cluster_id : integer
The cluster label for which to compute the all-points
mutual-reachability (which should be done on a cluster
by cluster basis).
metric : string
The metric used to compute distances for the clustering (and
to be re-used in computing distances for mr distance). If
set to `precomputed` then X is assumed to be the precomputed
distance matrix between samples.
d : integer (or None)
The number of features (dimension) of the dataset. This need only
be set in the case of metric being set to `precomputed`, where
the ambient dimension of the data is unknown to the function.
**kwd_args :
Extra arguments to pass to the distance computation for other
metrics, such as minkowski, Mahanalobis etc.
Returns
-------
mutual_reachaibility : array (n_samples, n_samples)
The pairwise mutual reachability distances between all points in `X`
with `label` equal to `cluster_id`.
core_distances : array (n_samples,)
The all-points-core_distance of all points in `X` with `label` equal
to `cluster_id`.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
if metric == 'precomputed':
if d is None:
raise ValueError('If metric is precomputed a '
'd value must be provided!')
distance_matrix = X[labels == cluster_id, :][:, labels == cluster_id]
else:
subset_X = X[labels == cluster_id, :]
distance_matrix = pairwise_distances(subset_X, metric=metric,
**kwd_args)
d = X.shape[1]
core_distances = all_points_core_distance(distance_matrix.copy(), d=d)
core_dist_matrix = np.tile(core_distances, (core_distances.shape[0], 1))
result = np.dstack(
[distance_matrix, core_dist_matrix, core_dist_matrix.T]).max(axis=-1)
return result, core_distances
|
python
|
{
"resource": ""
}
|
q22189
|
internal_minimum_spanning_tree
|
train
|
def internal_minimum_spanning_tree(mr_distances):
"""
Compute the 'internal' minimum spanning tree given a matrix of mutual
reachability distances. Given a minimum spanning tree the 'internal'
graph is the subgraph induced by vertices of degree greater than one.
Parameters
----------
mr_distances : array (cluster_size, cluster_size)
The pairwise mutual reachability distances, inferred to be the edge
weights of a complete graph. Since MSTs are computed per cluster
this is the all-points-mutual-reacability for points within a single
cluster.
Returns
-------
internal_nodes : array
An array listing the indices of the internal nodes of the MST
internal_edges : array (?, 3)
An array of internal edges in weighted edge list format; that is
an edge is an array of length three listing the two vertices
forming the edge and weight of the edge.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
single_linkage_data = mst_linkage_core(mr_distances)
min_span_tree = single_linkage_data.copy()
for index, row in enumerate(min_span_tree[1:], 1):
candidates = np.where(isclose(mr_distances[int(row[1])], row[2]))[0]
candidates = np.intersect1d(candidates,
single_linkage_data[:index, :2].astype(
int))
candidates = candidates[candidates != row[1]]
assert len(candidates) > 0
row[0] = candidates[0]
vertices = np.arange(mr_distances.shape[0])[
np.bincount(min_span_tree.T[:2].flatten().astype(np.intp)) > 1]
# A little "fancy" we select from the flattened array reshape back
# (Fortran format to get indexing right) and take the product to do an and
# then convert back to boolean type.
edge_selection = np.prod(np.in1d(min_span_tree.T[:2], vertices).reshape(
(min_span_tree.shape[0], 2), order='F'), axis=1).astype(bool)
# Density sparseness is not well defined if there are no
# internal edges (as per the referenced paper). However
# MATLAB code from the original authors simply selects the
# largest of *all* the edges in the case that there are
# no internal edges, so we do the same here
if np.any(edge_selection):
# If there are any internal edges, then subselect them out
edges = min_span_tree[edge_selection]
else:
# If there are no internal edges then we want to take the
# max over all the edges that exist in the MST, so we simply
# do nothing and return all the edges in the MST.
edges = min_span_tree.copy()
return vertices, edges
|
python
|
{
"resource": ""
}
|
q22190
|
density_separation
|
train
|
def density_separation(X, labels, cluster_id1, cluster_id2,
internal_nodes1, internal_nodes2,
core_distances1, core_distances2,
metric='euclidean', **kwd_args):
"""
Compute the density separation between two clusters. This is the minimum
all-points mutual reachability distance between pairs of points, one from
internal nodes of MSTs of each cluster.
Parameters
----------
X : array (n_samples, n_features) or (n_samples, n_samples)
The input data of the clustering. This can be the data, or, if
metric is set to `precomputed` the pairwise distance matrix used
for the clustering.
labels : array (n_samples)
The label array output by the clustering, providing an integral
cluster label to each data point, with -1 for noise points.
cluster_id1 : integer
The first cluster label to compute separation between.
cluster_id2 : integer
The second cluster label to compute separation between.
internal_nodes1 : array
The vertices of the MST for `cluster_id1` that were internal vertices.
internal_nodes2 : array
The vertices of the MST for `cluster_id2` that were internal vertices.
core_distances1 : array (size of cluster_id1,)
The all-points-core_distances of all points in the cluster
specified by cluster_id1.
core_distances2 : array (size of cluster_id2,)
The all-points-core_distances of all points in the cluster
specified by cluster_id2.
metric : string
The metric used to compute distances for the clustering (and
to be re-used in computing distances for mr distance). If
set to `precomputed` then X is assumed to be the precomputed
distance matrix between samples.
**kwd_args :
Extra arguments to pass to the distance computation for other
metrics, such as minkowski, Mahanalobis etc.
Returns
-------
The 'density separation' between the clusters specified by
`cluster_id1` and `cluster_id2`.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
if metric == 'precomputed':
sub_select = X[labels == cluster_id1, :][:, labels == cluster_id2]
distance_matrix = sub_select[internal_nodes1, :][:, internal_nodes2]
else:
cluster1 = X[labels == cluster_id1][internal_nodes1]
cluster2 = X[labels == cluster_id2][internal_nodes2]
distance_matrix = cdist(cluster1, cluster2, metric, **kwd_args)
core_dist_matrix1 = np.tile(core_distances1[internal_nodes1],
(distance_matrix.shape[1], 1)).T
core_dist_matrix2 = np.tile(core_distances2[internal_nodes2],
(distance_matrix.shape[0], 1))
mr_dist_matrix = np.dstack([distance_matrix,
core_dist_matrix1,
core_dist_matrix2]).max(axis=-1)
return mr_dist_matrix.min()
|
python
|
{
"resource": ""
}
|
q22191
|
validity_index
|
train
|
def validity_index(X, labels, metric='euclidean',
d=None, per_cluster_scores=False, **kwd_args):
"""
Compute the density based cluster validity index for the
clustering specified by `labels` and for each cluster in `labels`.
Parameters
----------
X : array (n_samples, n_features) or (n_samples, n_samples)
The input data of the clustering. This can be the data, or, if
metric is set to `precomputed` the pairwise distance matrix used
for the clustering.
labels : array (n_samples)
The label array output by the clustering, providing an integral
cluster label to each data point, with -1 for noise points.
metric : optional, string (default 'euclidean')
The metric used to compute distances for the clustering (and
to be re-used in computing distances for mr distance). If
set to `precomputed` then X is assumed to be the precomputed
distance matrix between samples.
d : optional, integer (or None) (default None)
The number of features (dimension) of the dataset. This need only
be set in the case of metric being set to `precomputed`, where
the ambient dimension of the data is unknown to the function.
per_cluster_scores : optional, boolean (default False)
Whether to return the validity index for individual clusters.
Defaults to False with the function returning a single float
value for the whole clustering.
**kwd_args :
Extra arguments to pass to the distance computation for other
metrics, such as minkowski, Mahanalobis etc.
Returns
-------
validity_index : float
The density based cluster validity index for the clustering. This
is a numeric value between -1 and 1, with higher values indicating
a 'better' clustering.
per_cluster_validity_index : array (n_clusters,)
The cluster validity index of each individual cluster as an array.
The overall validity index is the weighted average of these values.
Only returned if per_cluster_scores is set to True.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
core_distances = {}
density_sparseness = {}
mst_nodes = {}
mst_edges = {}
max_cluster_id = labels.max() + 1
density_sep = np.inf * np.ones((max_cluster_id, max_cluster_id),
dtype=np.float64)
cluster_validity_indices = np.empty(max_cluster_id, dtype=np.float64)
for cluster_id in range(max_cluster_id):
if np.sum(labels == cluster_id) == 0:
continue
mr_distances, core_distances[
cluster_id] = all_points_mutual_reachability(
X,
labels,
cluster_id,
metric,
d,
**kwd_args
)
mst_nodes[cluster_id], mst_edges[cluster_id] = \
internal_minimum_spanning_tree(mr_distances)
density_sparseness[cluster_id] = mst_edges[cluster_id].T[2].max()
for i in range(max_cluster_id):
if np.sum(labels == i) == 0:
continue
internal_nodes_i = mst_nodes[i]
for j in range(i + 1, max_cluster_id):
if np.sum(labels == j) == 0:
continue
internal_nodes_j = mst_nodes[j]
density_sep[i, j] = density_separation(
X, labels, i, j,
internal_nodes_i, internal_nodes_j,
core_distances[i], core_distances[j],
metric=metric, **kwd_args
)
density_sep[j, i] = density_sep[i, j]
n_samples = float(X.shape[0])
result = 0
for i in range(max_cluster_id):
if np.sum(labels == i) == 0:
continue
min_density_sep = density_sep[i].min()
cluster_validity_indices[i] = (
(min_density_sep - density_sparseness[i]) /
max(min_density_sep, density_sparseness[i])
)
cluster_size = np.sum(labels == i)
result += (cluster_size / n_samples) * cluster_validity_indices[i]
if per_cluster_scores:
return result, cluster_validity_indices
else:
return result
|
python
|
{
"resource": ""
}
|
q22192
|
RobustSingleLinkage.fit
|
train
|
def fit(self, X, y=None):
"""Perform robust single linkage clustering from features or
distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
self : object
Returns self
"""
X = check_array(X, accept_sparse='csr')
kwargs = self.get_params()
del kwargs['metric_params']
kwargs.update(self.metric_params)
self.labels_, self._cluster_hierarchy = robust_single_linkage(
X, **kwargs)
return self
|
python
|
{
"resource": ""
}
|
q22193
|
_find_neighbor_and_lambda
|
train
|
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
|
python
|
{
"resource": ""
}
|
q22194
|
membership_vector
|
train
|
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
points_to_predict = points_to_predict.astype(np.float64)
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
|
python
|
{
"resource": ""
}
|
q22195
|
all_points_membership_vectors
|
train
|
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
|
python
|
{
"resource": ""
}
|
q22196
|
filter_cells
|
train
|
def filter_cells(
data: AnnData,
min_counts: Optional[int] = None,
min_genes: Optional[int] = None,
max_counts: Optional[int] = None,
max_genes: Optional[int] = None,
inplace: bool = True,
copy: bool = False,
) -> Optional[Tuple[np.ndarray, np.ndarray]]:
"""Filter cell outliers based on counts and numbers of genes expressed.
For instance, only keep cells with at least `min_counts` counts or
`min_genes` genes expressed. This is to filter measurement outliers,
i.e. “unreliable” observations.
Only provide one of the optional parameters ``min_counts``, ``min_genes``,
``max_counts``, ``max_genes`` per call.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
min_counts
Minimum number of counts required for a cell to pass filtering.
min_genes
Minimum number of genes expressed required for a cell to pass filtering.
max_counts
Maximum number of counts required for a cell to pass filtering.
max_genes
Maximum number of genes expressed required for a cell to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on ``inplace``, returns the following arrays or directly subsets
and annotates the data matrix:
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Examples
--------
>>> adata = sc.datasets.krumsiek11()
>>> adata.n_obs
640
>>> adata.var_names
['Gata2' 'Gata1' 'Fog1' 'EKLF' 'Fli1' 'SCL' 'Cebpa'
'Pu.1' 'cJun' 'EgrNab' 'Gfi1']
>>> # add some true zeros
>>> adata.X[adata.X < 0.3] = 0
>>> # simply compute the number of genes per cell
>>> sc.pp.filter_cells(adata, min_genes=0)
>>> adata.n_obs
640
>>> adata.obs['n_genes'].min()
1
>>> # filter manually
>>> adata_copy = adata[adata.obs['n_genes'] >= 3]
>>> adata_copy.obs['n_genes'].min()
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
>>> # actually do some filtering
>>> sc.pp.filter_cells(adata, min_genes=3)
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
"""
if copy:
logg.warn('`copy` is deprecated, use `inplace` instead.')
n_given_options = sum(
option is not None for option in
[min_genes, min_counts, max_genes, max_counts])
if n_given_options != 1:
raise ValueError(
'Only provide one of the optional parameters `min_counts`,'
'`min_genes`, `max_counts`, `max_genes` per call.')
if isinstance(data, AnnData):
adata = data.copy() if copy else data
cell_subset, number = materialize_as_ndarray(filter_cells(adata.X, min_counts, min_genes, max_counts, max_genes))
if not inplace:
return cell_subset, number
if min_genes is None and max_genes is None: adata.obs['n_counts'] = number
else: adata.obs['n_genes'] = number
adata._inplace_subset_obs(cell_subset)
return adata if copy else None
X = data # proceed with processing the data matrix
min_number = min_counts if min_genes is None else min_genes
max_number = max_counts if max_genes is None else max_genes
number_per_cell = np.sum(X if min_genes is None and max_genes is None
else X > 0, axis=1)
if issparse(X): number_per_cell = number_per_cell.A1
if min_number is not None:
cell_subset = number_per_cell >= min_number
if max_number is not None:
cell_subset = number_per_cell <= max_number
s = np.sum(~cell_subset)
if s > 0:
logg.info('filtered out {} cells that have'.format(s), end=' ')
if min_genes is not None or min_counts is not None:
logg.info('less than',
str(min_genes) + ' genes expressed'
if min_counts is None else str(min_counts) + ' counts', no_indent=True)
if max_genes is not None or max_counts is not None:
logg.info('more than ',
str(max_genes) + ' genes expressed'
if max_counts is None else str(max_counts) + ' counts', no_indent=True)
return cell_subset, number_per_cell
|
python
|
{
"resource": ""
}
|
q22197
|
filter_genes
|
train
|
def filter_genes(
data: AnnData,
min_counts: Optional[int] = None,
min_cells: Optional[int] = None,
max_counts: Optional[int] = None,
max_cells: Optional[int] = None,
inplace: bool = True,
copy: bool = False,
) -> Union[AnnData, None, Tuple[np.ndarray, np.ndarray]]:
"""Filter genes based on number of cells or counts.
Keep genes that have at least ``min_counts`` counts or are expressed in at
least ``min_cells`` cells or have at most ``max_counts`` counts or are expressed
in at most ``max_cells`` cells.
Only provide one of the optional parameters ``min_counts``, ``min_cells``,
``max_counts``, ``max_cells`` per call.
Parameters
----------
data
An annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_counts
Minimum number of counts required for a gene to pass filtering.
min_cells
Minimum number of cells expressed required for a gene to pass filtering.
max_counts
Maximum number of counts required for a gene to pass filtering.
max_cells
Maximum number of cells expressed required for a gene to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on `inplace`, returns the following arrays or directly subsets
and annotates the data matrix
gene_subset : numpy.ndarray
Boolean index mask that does filtering. `True` means that the
gene is kept. `False` means the gene is removed.
number_per_gene : numpy.ndarray
Depending on what was tresholded (`counts` or `cells`), the array stores
`n_counts` or `n_cells` per gene.
"""
if copy:
logg.warn('`copy` is deprecated, use `inplace` instead.')
n_given_options = sum(
option is not None for option in
[min_cells, min_counts, max_cells, max_counts])
if n_given_options != 1:
raise ValueError(
'Only provide one of the optional parameters `min_counts`,'
'`min_cells`, `max_counts`, `max_cells` per call.')
if isinstance(data, AnnData):
adata = data.copy() if copy else data
gene_subset, number = materialize_as_ndarray(
filter_genes(adata.X, min_cells=min_cells,
min_counts=min_counts, max_cells=max_cells,
max_counts=max_counts))
if not inplace:
return gene_subset, number
if min_cells is None and max_cells is None:
adata.var['n_counts'] = number
else:
adata.var['n_cells'] = number
adata._inplace_subset_var(gene_subset)
return adata if copy else None
X = data # proceed with processing the data matrix
min_number = min_counts if min_cells is None else min_cells
max_number = max_counts if max_cells is None else max_cells
number_per_gene = np.sum(X if min_cells is None and max_cells is None
else X > 0, axis=0)
if issparse(X):
number_per_gene = number_per_gene.A1
if min_number is not None:
gene_subset = number_per_gene >= min_number
if max_number is not None:
gene_subset = number_per_gene <= max_number
s = np.sum(~gene_subset)
if s > 0:
logg.info('filtered out {} genes that are detected'.format(s), end=' ')
if min_cells is not None or min_counts is not None:
logg.info('in less than',
str(min_cells) + ' cells'
if min_counts is None else str(min_counts) + ' counts', no_indent=True)
if max_cells is not None or max_counts is not None:
logg.info('in more than ',
str(max_cells) + ' cells'
if max_counts is None else str(max_counts) + ' counts', no_indent=True)
return gene_subset, number_per_gene
|
python
|
{
"resource": ""
}
|
q22198
|
log1p
|
train
|
def log1p(
data: Union[AnnData, np.ndarray, spmatrix],
copy: bool = False,
chunked: bool = False,
chunk_size: Optional[int] = None,
) -> Optional[AnnData]:
"""Logarithmize the data matrix.
Computes :math:`X = \\log(X + 1)`, where :math:`log` denotes the natural logarithm.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
copy
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
chunked
Process the data matrix in chunks, which will save memory.
Applies only to :class:`~anndata.AnnData`.
chunk_size
``n_obs`` of the chunks to process the data in.
Returns
-------
Returns or updates ``data``, depending on ``copy``.
"""
if copy:
if not isinstance(data, AnnData):
data = data.astype(np.floating)
else:
data = data.copy()
elif not isinstance(data, AnnData) and np.issubdtype(data.dtype, np.integer):
raise TypeError("Cannot perform inplace log1p on integer array")
def _log1p(X):
if issparse(X):
np.log1p(X.data, out=X.data)
else:
np.log1p(X, out=X)
return X
if isinstance(data, AnnData):
if not np.issubdtype(data.X.dtype, np.floating):
data.X = data.X.astype(np.float32)
if chunked:
for chunk, start, end in data.chunked_X(chunk_size):
data.X[start:end] = _log1p(chunk)
else:
_log1p(data.X)
else:
_log1p(data)
return data if copy else None
|
python
|
{
"resource": ""
}
|
q22199
|
sqrt
|
train
|
def sqrt(
data: AnnData,
copy: bool = False,
chunked: bool = False,
chunk_size: Optional[int] = None,
) -> Optional[AnnData]:
"""Square root the data matrix.
Computes :math:`X = \\sqrt(X)`.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
copy
If an :class:`~scanpy.api.AnnData` is passed,
determines whether a copy is returned.
chunked
Process the data matrix in chunks, which will save memory.
Applies only to :class:`~anndata.AnnData`.
chunk_size
``n_obs`` of the chunks to process the data in.
Returns
-------
Returns or updates `data`, depending on `copy`.
"""
if isinstance(data, AnnData):
adata = data.copy() if copy else data
if chunked:
for chunk, start, end in adata.chunked_X(chunk_size):
adata.X[start:end] = sqrt(chunk)
else:
adata.X = sqrt(data.X)
return adata if copy else None
X = data # proceed with data matrix
if not issparse(X):
return np.sqrt(X)
else:
return X.sqrt()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.