partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
urlsplit
Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.
third_party/stdlib/urlparse.py
def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v
def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return v
[ "Parse", "a", "URL", "into", "5", "components", ":", "<scheme", ">", ":", "//", "<netloc", ">", "/", "<path", ">", "?<query", ">", "#<fragment", ">", "Return", "a", "5", "-", "tuple", ":", "(", "scheme", "netloc", "path", "query", "fragment", ")", "....
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L290-L343
[ "def", "urlsplit", "(", "url", ",", "scheme", "=", "''", ",", "allow_fragments", "=", "True", ")", ":", "allow_fragments", "=", "bool", "(", "allow_fragments", ")", "key", "=", "url", ",", "scheme", ",", "allow_fragments", ",", "type", "(", "url", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
urlunparse
Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).
third_party/stdlib/urlparse.py
def urlunparse(data): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment = data if params: url = "%s;%s" % (url, params) return urlunsplit((scheme, netloc, url, query, fragment))
def urlunparse(data): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment = data if params: url = "%s;%s" % (url, params) return urlunsplit((scheme, netloc, url, query, fragment))
[ "Put", "a", "parsed", "URL", "back", "together", "again", ".", "This", "may", "result", "in", "a", "slightly", "different", "but", "equivalent", "URL", "if", "the", "URL", "that", "was", "parsed", "originally", "had", "redundant", "delimiters", "e", ".", "...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L345-L353
[ "def", "urlunparse", "(", "data", ")", ":", "scheme", ",", "netloc", ",", "url", ",", "params", ",", "query", ",", "fragment", "=", "data", "if", "params", ":", "url", "=", "\"%s;%s\"", "%", "(", "url", ",", "params", ")", "return", "urlunsplit", "("...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
urlunsplit
Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).
third_party/stdlib/urlparse.py
def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment = data if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url
def urlunsplit(data): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment = data if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url
[ "Combine", "the", "elements", "of", "a", "tuple", "as", "returned", "by", "urlsplit", "()", "into", "a", "complete", "URL", "as", "a", "string", ".", "The", "data", "argument", "can", "be", "any", "five", "-", "item", "iterable", ".", "This", "may", "r...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L355-L371
[ "def", "urlunsplit", "(", "data", ")", ":", "scheme", ",", "netloc", ",", "url", ",", "query", ",", "fragment", "=", "data", "if", "netloc", "or", "(", "scheme", "and", "scheme", "in", "uses_netloc", "and", "url", "[", ":", "2", "]", "!=", "'//'", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
urljoin
Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.
third_party/stdlib/urlparse.py
def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return url if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path and not params: path = bpath params = bparams if not query: query = bquery return urlunparse((scheme, netloc, path, params, query, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if (segments[i] == '..' and segments[i-1] not in ('', '..')): del segments[i-1:i+1] break i = i+1 else: break if segments == ['', '..']: segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return url if scheme in uses_netloc: if netloc: return urlunparse((scheme, netloc, path, params, query, fragment)) netloc = bnetloc if path[:1] == '/': return urlunparse((scheme, netloc, path, params, query, fragment)) if not path and not params: path = bpath params = bparams if not query: query = bquery return urlunparse((scheme, netloc, path, params, query, fragment)) segments = bpath.split('/')[:-1] + path.split('/') # XXX The stuff below is bogus in various ways... if segments[-1] == '.': segments[-1] = '' while '.' in segments: segments.remove('.') while 1: i = 1 n = len(segments) - 1 while i < n: if (segments[i] == '..' and segments[i-1] not in ('', '..')): del segments[i-1:i+1] break i = i+1 else: break if segments == ['', '..']: segments[-1] = '' elif len(segments) >= 2 and segments[-1] == '..': segments[-2:] = [''] return urlunparse((scheme, netloc, '/'.join(segments), params, query, fragment))
[ "Join", "a", "base", "URL", "and", "a", "possibly", "relative", "URL", "to", "form", "an", "absolute", "interpretation", "of", "the", "latter", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L373-L423
[ "def", "urljoin", "(", "base", ",", "url", ",", "allow_fragments", "=", "True", ")", ":", "if", "not", "base", ":", "return", "url", "if", "not", "url", ":", "return", "base", "bscheme", ",", "bnetloc", ",", "bpath", ",", "bparams", ",", "bquery", ",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
urldefrag
Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string.
third_party/stdlib/urlparse.py
def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, ''
def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, ''
[ "Removes", "any", "existing", "fragment", "from", "URL", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L425-L437
[ "def", "urldefrag", "(", "url", ")", ":", "if", "'#'", "in", "url", ":", "s", ",", "n", ",", "p", ",", "a", ",", "q", ",", "frag", "=", "urlparse", "(", "url", ")", "defrag", "=", "urlunparse", "(", "(", "s", ",", "n", ",", "p", ",", "a", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
unquote
unquote('abc%20def') -> 'abc def'.
third_party/stdlib/urlparse.py
def unquote(s): """unquote('abc%20def') -> 'abc def'.""" if _is_unicode(s): if '%' not in s: return s bits = _asciire.split(s) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote(str(bits[i])).decode('latin1')) append(bits[i + 1]) return ''.join(res) bits = s.split('%') # fastpath if len(bits) == 1: return s res = [bits[0]] append = res.append for item in bits[1:]: try: append(_hextochr[item[:2]]) append(item[2:]) except KeyError: append('%') append(item) return ''.join(res)
def unquote(s): """unquote('abc%20def') -> 'abc def'.""" if _is_unicode(s): if '%' not in s: return s bits = _asciire.split(s) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote(str(bits[i])).decode('latin1')) append(bits[i + 1]) return ''.join(res) bits = s.split('%') # fastpath if len(bits) == 1: return s res = [bits[0]] append = res.append for item in bits[1:]: try: append(_hextochr[item[:2]]) append(item[2:]) except KeyError: append('%') append(item) return ''.join(res)
[ "unquote", "(", "abc%20def", ")", "-", ">", "abc", "def", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L458-L484
[ "def", "unquote", "(", "s", ")", ":", "if", "_is_unicode", "(", "s", ")", ":", "if", "'%'", "not", "in", "s", ":", "return", "s", "bits", "=", "_asciire", ".", "split", "(", "s", ")", "res", "=", "[", "bits", "[", "0", "]", "]", "append", "="...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
parse_qs
Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception.
third_party/stdlib/urlparse.py
def parse_qs(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. """ dict = {} for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): if name in dict: dict[name].append(value) else: dict[name] = [value] return dict
def parse_qs(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. """ dict = {} for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): if name in dict: dict[name].append(value) else: dict[name] = [value] return dict
[ "Parse", "a", "query", "given", "as", "a", "string", "argument", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L486-L510
[ "def", "parse_qs", "(", "qs", ",", "keep_blank_values", "=", "0", ",", "strict_parsing", "=", "0", ")", ":", "dict", "=", "{", "}", "for", "name", ",", "value", "in", "parse_qsl", "(", "qs", ",", "keep_blank_values", ",", "strict_parsing", ")", ":", "i...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
parse_qsl
Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a list, as G-d intended.
third_party/stdlib/urlparse.py
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a list, as G-d intended. """ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError, "bad query field: %r" % (name_value,) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = unquote(nv[0].replace('+', ' ')) value = unquote(nv[1].replace('+', ' ')) r.append((name, value)) return r
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a list, as G-d intended. """ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError, "bad query field: %r" % (name_value,) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = unquote(nv[0].replace('+', ' ')) value = unquote(nv[1].replace('+', ' ')) r.append((name, value)) return r
[ "Parse", "a", "query", "given", "as", "a", "string", "argument", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L512-L550
[ "def", "parse_qsl", "(", "qs", ",", "keep_blank_values", "=", "0", ",", "strict_parsing", "=", "0", ")", ":", "pairs", "=", "[", "s2", "for", "s1", "in", "qs", ".", "split", "(", "'&'", ")", "for", "s2", "in", "s1", ".", "split", "(", "';'", ")",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_SplitResult._replace
Return a new SplitResult object replacing specified fields with new values
third_party/stdlib/urlparse.py
def _replace(_self, **kwds): 'Return a new SplitResult object replacing specified fields with new values' result = _self._make(map(kwds.pop, ('scheme', 'netloc', 'path', 'query', 'fragment'), _self)) if kwds: raise ValueError('Got unexpected field names: %r' % kwds.keys()) return result
def _replace(_self, **kwds): 'Return a new SplitResult object replacing specified fields with new values' result = _self._make(map(kwds.pop, ('scheme', 'netloc', 'path', 'query', 'fragment'), _self)) if kwds: raise ValueError('Got unexpected field names: %r' % kwds.keys()) return result
[ "Return", "a", "new", "SplitResult", "object", "replacing", "specified", "fields", "with", "new", "values" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L158-L163
[ "def", "_replace", "(", "_self", ",", "*", "*", "kwds", ")", ":", "result", "=", "_self", ".", "_make", "(", "map", "(", "kwds", ".", "pop", ",", "(", "'scheme'", ",", "'netloc'", ",", "'path'", ",", "'query'", ",", "'fragment'", ")", ",", "_self",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
getlines
Get the lines for a file from the cache. Update the cache if it doesn't contain an entry for this file already.
third_party/stdlib/linecache.py
def getlines(filename, module_globals=None): """Get the lines for a file from the cache. Update the cache if it doesn't contain an entry for this file already.""" if filename in cache: return cache[filename][2] try: return updatecache(filename, module_globals) except MemoryError: clearcache() return []
def getlines(filename, module_globals=None): """Get the lines for a file from the cache. Update the cache if it doesn't contain an entry for this file already.""" if filename in cache: return cache[filename][2] try: return updatecache(filename, module_globals) except MemoryError: clearcache() return []
[ "Get", "the", "lines", "for", "a", "file", "from", "the", "cache", ".", "Update", "the", "cache", "if", "it", "doesn", "t", "contain", "an", "entry", "for", "this", "file", "already", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/linecache.py#L33-L44
[ "def", "getlines", "(", "filename", ",", "module_globals", "=", "None", ")", ":", "if", "filename", "in", "cache", ":", "return", "cache", "[", "filename", "]", "[", "2", "]", "try", ":", "return", "updatecache", "(", "filename", ",", "module_globals", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
checkcache
Discard cache entries that are out of date. (This is not checked upon each call!)
third_party/stdlib/linecache.py
def checkcache(filename=None): """Discard cache entries that are out of date. (This is not checked upon each call!)""" if filename is None: filenames = cache.keys() else: if filename in cache: filenames = [filename] else: return for filename in filenames: size, mtime, lines, fullname = cache[filename] if mtime is None: continue # no-op for files loaded via a __loader__ try: stat = os.stat(fullname) except os.error: del cache[filename] continue if size != stat.st_size or mtime != stat.st_mtime: del cache[filename]
def checkcache(filename=None): """Discard cache entries that are out of date. (This is not checked upon each call!)""" if filename is None: filenames = cache.keys() else: if filename in cache: filenames = [filename] else: return for filename in filenames: size, mtime, lines, fullname = cache[filename] if mtime is None: continue # no-op for files loaded via a __loader__ try: stat = os.stat(fullname) except os.error: del cache[filename] continue if size != stat.st_size or mtime != stat.st_mtime: del cache[filename]
[ "Discard", "cache", "entries", "that", "are", "out", "of", "date", ".", "(", "This", "is", "not", "checked", "upon", "each", "call!", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/linecache.py#L47-L69
[ "def", "checkcache", "(", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filenames", "=", "cache", ".", "keys", "(", ")", "else", ":", "if", "filename", "in", "cache", ":", "filenames", "=", "[", "filename", "]", "else", ":"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
updatecache
Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.
third_party/stdlib/linecache.py
def updatecache(filename, module_globals=None): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if filename in cache: del cache[filename] if not filename or (filename.startswith('<') and filename.endswith('>')): return [] fullname = filename try: stat = os.stat(fullname) except OSError: basename = filename # Try for a __loader__, if available if module_globals and '__loader__' in module_globals: name = module_globals.get('__name__') loader = module_globals['__loader__'] get_source = getattr(loader, 'get_source', None) if name and get_source: try: data = get_source(name) except (ImportError, IOError): pass else: if data is None: # No luck, the PEP302 loader cannot find the source # for this module. return [] cache[filename] = ( len(data), None, [line+'\n' for line in data.splitlines()], fullname ) return cache[filename][2] # Try looking through the module search path, which is only useful # when handling a relative filename. if os.path.isabs(filename): return [] for dirname in sys.path: # When using imputil, sys.path may contain things other than # strings; ignore them when it happens. try: fullname = os.path.join(dirname, basename) except (TypeError, AttributeError): # Not sufficiently string-like to do anything useful with. continue try: stat = os.stat(fullname) break except os.error: pass else: return [] try: with open(fullname, 'rU') as fp: lines = fp.readlines() except IOError: return [] if lines and not lines[-1].endswith('\n'): lines[-1] += '\n' size, mtime = stat.st_size, stat.st_mtime cache[filename] = size, mtime, lines, fullname return lines
def updatecache(filename, module_globals=None): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if filename in cache: del cache[filename] if not filename or (filename.startswith('<') and filename.endswith('>')): return [] fullname = filename try: stat = os.stat(fullname) except OSError: basename = filename # Try for a __loader__, if available if module_globals and '__loader__' in module_globals: name = module_globals.get('__name__') loader = module_globals['__loader__'] get_source = getattr(loader, 'get_source', None) if name and get_source: try: data = get_source(name) except (ImportError, IOError): pass else: if data is None: # No luck, the PEP302 loader cannot find the source # for this module. return [] cache[filename] = ( len(data), None, [line+'\n' for line in data.splitlines()], fullname ) return cache[filename][2] # Try looking through the module search path, which is only useful # when handling a relative filename. if os.path.isabs(filename): return [] for dirname in sys.path: # When using imputil, sys.path may contain things other than # strings; ignore them when it happens. try: fullname = os.path.join(dirname, basename) except (TypeError, AttributeError): # Not sufficiently string-like to do anything useful with. continue try: stat = os.stat(fullname) break except os.error: pass else: return [] try: with open(fullname, 'rU') as fp: lines = fp.readlines() except IOError: return [] if lines and not lines[-1].endswith('\n'): lines[-1] += '\n' size, mtime = stat.st_size, stat.st_mtime cache[filename] = size, mtime, lines, fullname return lines
[ "Update", "a", "cache", "entry", "and", "return", "its", "list", "of", "lines", ".", "If", "something", "s", "wrong", "print", "a", "message", "discard", "the", "cache", "entry", "and", "return", "an", "empty", "list", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/linecache.py#L72-L139
[ "def", "updatecache", "(", "filename", ",", "module_globals", "=", "None", ")", ":", "if", "filename", "in", "cache", ":", "del", "cache", "[", "filename", "]", "if", "not", "filename", "or", "(", "filename", ".", "startswith", "(", "'<'", ")", "and", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
isfile
Test whether a path is a regular file
third_party/stdlib/genericpath.py
def isfile(path): """Test whether a path is a regular file""" try: st = os.stat(path) except os.error: return False return stat.S_ISREG(st.st_mode)
def isfile(path): """Test whether a path is a regular file""" try: st = os.stat(path) except os.error: return False return stat.S_ISREG(st.st_mode)
[ "Test", "whether", "a", "path", "is", "a", "regular", "file" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/genericpath.py#L34-L40
[ "def", "isfile", "(", "path", ")", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "path", ")", "except", "os", ".", "error", ":", "return", "False", "return", "stat", ".", "S_ISREG", "(", "st", ".", "st_mode", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
isdir
Return true if the pathname refers to an existing directory.
third_party/stdlib/genericpath.py
def isdir(s): """Return true if the pathname refers to an existing directory.""" try: st = os.stat(s) except os.error: return False return stat.S_ISDIR(st.st_mode)
def isdir(s): """Return true if the pathname refers to an existing directory.""" try: st = os.stat(s) except os.error: return False return stat.S_ISDIR(st.st_mode)
[ "Return", "true", "if", "the", "pathname", "refers", "to", "an", "existing", "directory", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/genericpath.py#L46-L52
[ "def", "isdir", "(", "s", ")", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "s", ")", "except", "os", ".", "error", ":", "return", "False", "return", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
commonprefix
Given a list of pathnames, returns the longest common leading component
third_party/stdlib/genericpath.py
def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
[ "Given", "a", "list", "of", "pathnames", "returns", "the", "longest", "common", "leading", "component" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/genericpath.py#L76-L84
[ "def", "commonprefix", "(", "m", ")", ":", "if", "not", "m", ":", "return", "''", "s1", "=", "min", "(", "m", ")", "s2", "=", "max", "(", "m", ")", "for", "i", ",", "c", "in", "enumerate", "(", "s1", ")", ":", "if", "c", "!=", "s2", "[", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_splitext
Split the extension from a pathname. Extension is everything from the last dot to the end, ignoring leading dots. Returns "(root, ext)"; ext may be empty.
third_party/stdlib/genericpath.py
def _splitext(p, sep, altsep, extsep): """Split the extension from a pathname. Extension is everything from the last dot to the end, ignoring leading dots. Returns "(root, ext)"; ext may be empty.""" sepIndex = p.rfind(sep) if altsep: altsepIndex = p.rfind(altsep) sepIndex = max(sepIndex, altsepIndex) dotIndex = p.rfind(extsep) if dotIndex > sepIndex: # skip all leading dots filenameIndex = sepIndex + 1 while filenameIndex < dotIndex: if p[filenameIndex] != extsep: return p[:dotIndex], p[dotIndex:] filenameIndex += 1 return p, ''
def _splitext(p, sep, altsep, extsep): """Split the extension from a pathname. Extension is everything from the last dot to the end, ignoring leading dots. Returns "(root, ext)"; ext may be empty.""" sepIndex = p.rfind(sep) if altsep: altsepIndex = p.rfind(altsep) sepIndex = max(sepIndex, altsepIndex) dotIndex = p.rfind(extsep) if dotIndex > sepIndex: # skip all leading dots filenameIndex = sepIndex + 1 while filenameIndex < dotIndex: if p[filenameIndex] != extsep: return p[:dotIndex], p[dotIndex:] filenameIndex += 1 return p, ''
[ "Split", "the", "extension", "from", "a", "pathname", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/genericpath.py#L93-L113
[ "def", "_splitext", "(", "p", ",", "sep", ",", "altsep", ",", "extsep", ")", ":", "sepIndex", "=", "p", ".", "rfind", "(", "sep", ")", "if", "altsep", ":", "altsepIndex", "=", "p", ".", "rfind", "(", "altsep", ")", "sepIndex", "=", "max", "(", "s...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
wrap
Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour.
third_party/stdlib/textwrap.py
def wrap(text, width=70, **kwargs): """Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.wrap(text)
def wrap(text, width=70, **kwargs): """Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.wrap(text)
[ "Wrap", "a", "single", "paragraph", "of", "text", "returning", "a", "list", "of", "wrapped", "lines", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L349-L360
[ "def", "wrap", "(", "text", ",", "width", "=", "70", ",", "*", "*", "kwargs", ")", ":", "w", "=", "TextWrapper", "(", "width", "=", "width", ",", "*", "*", "kwargs", ")", "return", "w", ".", "wrap", "(", "text", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
fill
Fill a single paragraph of text, returning a new string. Reformat the single paragraph in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped paragraph. As with wrap(), tabs are expanded and other whitespace characters converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour.
third_party/stdlib/textwrap.py
def fill(text, width=70, **kwargs): """Fill a single paragraph of text, returning a new string. Reformat the single paragraph in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped paragraph. As with wrap(), tabs are expanded and other whitespace characters converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.fill(text)
def fill(text, width=70, **kwargs): """Fill a single paragraph of text, returning a new string. Reformat the single paragraph in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped paragraph. As with wrap(), tabs are expanded and other whitespace characters converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.fill(text)
[ "Fill", "a", "single", "paragraph", "of", "text", "returning", "a", "new", "string", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L362-L372
[ "def", "fill", "(", "text", ",", "width", "=", "70", ",", "*", "*", "kwargs", ")", ":", "w", "=", "TextWrapper", "(", "width", "=", "width", ",", "*", "*", "kwargs", ")", "return", "w", ".", "fill", "(", "text", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
dedent
Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.)
third_party/stdlib/textwrap.py
def dedent(text): """Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.) """ # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Find the largest common whitespace between current line and previous # winner. else: for i, (x, y) in enumerate(zip(margin, indent)): if x != y: margin = margin[:i] break else: margin = margin[:len(indent)] # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
def dedent(text): """Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.) """ # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Find the largest common whitespace between current line and previous # winner. else: for i, (x, y) in enumerate(zip(margin, indent)): if x != y: margin = margin[:i] break else: margin = margin[:len(indent)] # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
[ "Remove", "any", "common", "leading", "whitespace", "from", "every", "line", "in", "text", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L380-L430
[ "def", "dedent", "(", "text", ")", ":", "# Look for the longest leading string of spaces and tabs common to", "# all lines.", "margin", "=", "None", "text", "=", "_whitespace_only_re", ".", "sub", "(", "''", ",", "text", ")", "indents", "=", "_leading_whitespace_re", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper._munge_whitespace
_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" becomes " foo bar baz".
third_party/stdlib/textwrap.py
def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" becomes " foo bar baz". """ if self.expand_tabs: # text = text.expandtabs() text = ' '.join((' '.join(text.split('\n'))).split('\t')) if self.replace_whitespace: # if isinstance(text, str): # text = text.translate(self.whitespace_trans) # elif isinstance(text, _unicode): # text = text.translate(self.unicode_whitespace_trans) text = ' '.join(' '.join(text.split('\n')).split('\t')) return text
def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" becomes " foo bar baz". """ if self.expand_tabs: # text = text.expandtabs() text = ' '.join((' '.join(text.split('\n'))).split('\t')) if self.replace_whitespace: # if isinstance(text, str): # text = text.translate(self.whitespace_trans) # elif isinstance(text, _unicode): # text = text.translate(self.unicode_whitespace_trans) text = ' '.join(' '.join(text.split('\n')).split('\t')) return text
[ "_munge_whitespace", "(", "text", ":", "string", ")", "-", ">", "string" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L147-L163
[ "def", "_munge_whitespace", "(", "self", ",", "text", ")", ":", "if", "self", ".", "expand_tabs", ":", "# text = text.expandtabs()", "text", "=", "' '", ".", "join", "(", "(", "' '", ".", "join", "(", "text", ".", "split", "(", "'\\n'", ")", ")", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper._split
_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see _wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' if break_on_hyphens is True, or in: 'Look,', ' ', 'goof-ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', option!' otherwise.
third_party/stdlib/textwrap.py
def _split(self, text): """_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see _wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' if break_on_hyphens is True, or in: 'Look,', ' ', 'goof-ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', option!' otherwise. """ if isinstance(text, _unicode): if self.break_on_hyphens: pat = self.wordsep_re_uni else: pat = self.wordsep_simple_re_uni else: if self.break_on_hyphens: pat = self.wordsep_re else: pat = self.wordsep_simple_re chunks = pat.split(text) # chunks = filter(None, chunks) # remove empty chunks chunks = [x for x in chunks if x is not None] return chunks
def _split(self, text): """_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see _wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' if break_on_hyphens is True, or in: 'Look,', ' ', 'goof-ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', option!' otherwise. """ if isinstance(text, _unicode): if self.break_on_hyphens: pat = self.wordsep_re_uni else: pat = self.wordsep_simple_re_uni else: if self.break_on_hyphens: pat = self.wordsep_re else: pat = self.wordsep_simple_re chunks = pat.split(text) # chunks = filter(None, chunks) # remove empty chunks chunks = [x for x in chunks if x is not None] return chunks
[ "_split", "(", "text", ":", "string", ")", "-", ">", "[", "string", "]" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L166-L194
[ "def", "_split", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "text", ",", "_unicode", ")", ":", "if", "self", ".", "break_on_hyphens", ":", "pat", "=", "self", ".", "wordsep_re_uni", "else", ":", "pat", "=", "self", ".", "wordsep_simpl...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper._fix_sentence_endings
_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two.
third_party/stdlib/textwrap.py
def _fix_sentence_endings(self, chunks): """_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two. """ i = 0 patsearch = self.sentence_end_re.search while i < len(chunks)-1: if chunks[i+1] == " " and patsearch(chunks[i]): chunks[i+1] = " " i += 2 else: i += 1
def _fix_sentence_endings(self, chunks): """_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two. """ i = 0 patsearch = self.sentence_end_re.search while i < len(chunks)-1: if chunks[i+1] == " " and patsearch(chunks[i]): chunks[i+1] = " " i += 2 else: i += 1
[ "_fix_sentence_endings", "(", "chunks", ":", "[", "string", "]", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L196-L212
[ "def", "_fix_sentence_endings", "(", "self", ",", "chunks", ")", ":", "i", "=", "0", "patsearch", "=", "self", ".", "sentence_end_re", ".", "search", "while", "i", "<", "len", "(", "chunks", ")", "-", "1", ":", "if", "chunks", "[", "i", "+", "1", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper._handle_long_word
_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line.
third_party/stdlib/textwrap.py
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ # Figure out when indent is larger than the specified width, and make # sure at least one character is stripped off on every pass if width < 1: space_left = 1 else: space_left = width - cur_len # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop())
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ # Figure out when indent is larger than the specified width, and make # sure at least one character is stripped off on every pass if width < 1: space_left = 1 else: space_left = width - cur_len # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop())
[ "_handle_long_word", "(", "chunks", ":", "[", "string", "]", "cur_line", ":", "[", "string", "]", "cur_len", ":", "int", "width", ":", "int", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L214-L239
[ "def", "_handle_long_word", "(", "self", ",", "reversed_chunks", ",", "cur_line", ",", "cur_len", ",", "width", ")", ":", "# Figure out when indent is larger than the specified width, and make", "# sure at least one character is stripped off on every pass", "if", "width", "<", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper._wrap_chunks
_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved.
third_party/stdlib/textwrap.py
def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if self.drop_whitespace and chunks[-1].strip() == '' and lines: # del chunks[-1] chunks.pop() while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) # If the last chunk on this line is all whitespace, drop it. if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': # del cur_line[-1] cur_line.pop() # Convert current line back to a string and store it in list # of all lines (return value). if cur_line: lines.append(indent + ''.join(cur_line)) return lines
def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if self.drop_whitespace and chunks[-1].strip() == '' and lines: # del chunks[-1] chunks.pop() while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) # If the last chunk on this line is all whitespace, drop it. if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': # del cur_line[-1] cur_line.pop() # Convert current line back to a string and store it in list # of all lines (return value). if cur_line: lines.append(indent + ''.join(cur_line)) return lines
[ "_wrap_chunks", "(", "chunks", ":", "[", "string", "]", ")", "-", ">", "[", "string", "]" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L247-L317
[ "def", "_wrap_chunks", "(", "self", ",", "chunks", ")", ":", "lines", "=", "[", "]", "if", "self", ".", "width", "<=", "0", ":", "raise", "ValueError", "(", "\"invalid width %r (must be > 0)\"", "%", "self", ".", "width", ")", "# Arrange in reverse order so it...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
TextWrapper.wrap
wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space.
third_party/stdlib/textwrap.py
def wrap(self, text): """wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ text = self._munge_whitespace(text) chunks = self._split(text) if self.fix_sentence_endings: self._fix_sentence_endings(chunks) return self._wrap_chunks(chunks)
def wrap(self, text): """wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ text = self._munge_whitespace(text) chunks = self._split(text) if self.fix_sentence_endings: self._fix_sentence_endings(chunks) return self._wrap_chunks(chunks)
[ "wrap", "(", "text", ":", "string", ")", "-", ">", "[", "string", "]" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/textwrap.py#L322-L335
[ "def", "wrap", "(", "self", ",", "text", ")", ":", "text", "=", "self", ".", "_munge_whitespace", "(", "text", ")", "chunks", "=", "self", ".", "_split", "(", "text", ")", "if", "self", ".", "fix_sentence_endings", ":", "self", ".", "_fix_sentence_ending...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_long2bytesBigEndian
Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize.
third_party/pypy/_sha.py
def _long2bytesBigEndian(n, blocksize=0): """Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # After much testing, this algorithm was deemed to be the fastest. s = b'' pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffff) + s n = n >> 32 # Strip off leading zeros. for i in range(len(s)): if s[i] != '\000': break else: # Only happens when n == 0. s = '\000' i = 0 s = s[i:] # Add back some pad bytes. This could be done more efficiently # w.r.t. the de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * '\000' + s return s
def _long2bytesBigEndian(n, blocksize=0): """Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # After much testing, this algorithm was deemed to be the fastest. s = b'' pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffff) + s n = n >> 32 # Strip off leading zeros. for i in range(len(s)): if s[i] != '\000': break else: # Only happens when n == 0. s = '\000' i = 0 s = s[i:] # Add back some pad bytes. This could be done more efficiently # w.r.t. the de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * '\000' + s return s
[ "Convert", "a", "long", "integer", "to", "a", "byte", "string", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sha.py#L30-L61
[ "def", "_long2bytesBigEndian", "(", "n", ",", "blocksize", "=", "0", ")", ":", "# After much testing, this algorithm was deemed to be the fastest.", "s", "=", "b''", "pack", "=", "struct", ".", "pack", "while", "n", ">", "0", ":", "s", "=", "pack", "(", "'>I'"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_bytelist2longBigEndian
Transform a list of characters into a list of longs.
third_party/pypy/_sha.py
def _bytelist2longBigEndian(list): "Transform a list of characters into a list of longs." imax = len(list) // 4 hl = [0] * imax j = 0 i = 0 while i < imax: b0 = ord(list[j]) << 24 b1 = ord(list[j+1]) << 16 b2 = ord(list[j+2]) << 8 b3 = ord(list[j+3]) hl[i] = b0 | b1 | b2 | b3 i = i+1 j = j+4 return hl
def _bytelist2longBigEndian(list): "Transform a list of characters into a list of longs." imax = len(list) // 4 hl = [0] * imax j = 0 i = 0 while i < imax: b0 = ord(list[j]) << 24 b1 = ord(list[j+1]) << 16 b2 = ord(list[j+2]) << 8 b3 = ord(list[j+3]) hl[i] = b0 | b1 | b2 | b3 i = i+1 j = j+4 return hl
[ "Transform", "a", "list", "of", "characters", "into", "a", "list", "of", "longs", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sha.py#L64-L81
[ "def", "_bytelist2longBigEndian", "(", "list", ")", ":", "imax", "=", "len", "(", "list", ")", "//", "4", "hl", "=", "[", "0", "]", "*", "imax", "j", "=", "0", "i", "=", "0", "while", "i", "<", "imax", ":", "b0", "=", "ord", "(", "list", "[",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
sha.init
Initialize the message-digest and set all fields to zero.
third_party/pypy/_sha.py
def init(self): "Initialize the message-digest and set all fields to zero." self.length = 0 self.input = [] # Initial 160 bit message digest (5 times 32 bit). self.H0 = 0x67452301 self.H1 = 0xEFCDAB89 self.H2 = 0x98BADCFE self.H3 = 0x10325476 self.H4 = 0xC3D2E1F0
def init(self): "Initialize the message-digest and set all fields to zero." self.length = 0 self.input = [] # Initial 160 bit message digest (5 times 32 bit). self.H0 = 0x67452301 self.H1 = 0xEFCDAB89 self.H2 = 0x98BADCFE self.H3 = 0x10325476 self.H4 = 0xC3D2E1F0
[ "Initialize", "the", "message", "-", "digest", "and", "set", "all", "fields", "to", "zero", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sha.py#L139-L150
[ "def", "init", "(", "self", ")", ":", "self", ".", "length", "=", "0", "self", ".", "input", "=", "[", "]", "# Initial 160 bit message digest (5 times 32 bit).", "self", ".", "H0", "=", "0x67452301", "self", ".", "H1", "=", "0xEFCDAB89", "self", ".", "H2",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
sha._transform
This loop was unrolled to gain about 10% in speed for t in range(0, 80): TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff
third_party/pypy/_sha.py
def _transform(self, W): for t in range(16, 80): W.append(_rotateLeft( W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffff) A = self.H0 B = self.H1 C = self.H2 D = self.H3 E = self.H4 """ This loop was unrolled to gain about 10% in speed for t in range(0, 80): TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff """ for t in range(0, 20): TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(20, 40): TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(40, 60): TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(60, 80): TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff self.H0 = (self.H0 + A) & 0xffffffff self.H1 = (self.H1 + B) & 0xffffffff self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff
def _transform(self, W): for t in range(16, 80): W.append(_rotateLeft( W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffff) A = self.H0 B = self.H1 C = self.H2 D = self.H3 E = self.H4 """ This loop was unrolled to gain about 10% in speed for t in range(0, 80): TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff """ for t in range(0, 20): TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(20, 40): TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(40, 60): TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff for t in range(60, 80): TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3] E = D D = C C = _rotateLeft(B, 30) & 0xffffffff B = A A = TEMP & 0xffffffff self.H0 = (self.H0 + A) & 0xffffffff self.H1 = (self.H1 + B) & 0xffffffff self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff
[ "This", "loop", "was", "unrolled", "to", "gain", "about", "10%", "in", "speed", "for", "t", "in", "range", "(", "0", "80", ")", ":", "TEMP", "=", "_rotateLeft", "(", "A", "5", ")", "+", "f", "[", "t", "/", "20", "]", "+", "E", "+", "W", "[", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sha.py#L152-L212
[ "def", "_transform", "(", "self", ",", "W", ")", ":", "for", "t", "in", "range", "(", "16", ",", "80", ")", ":", "W", ".", "append", "(", "_rotateLeft", "(", "W", "[", "t", "-", "3", "]", "^", "W", "[", "t", "-", "8", "]", "^", "W", "[", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
sha.digest
Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes.
third_party/pypy/_sha.py
def digest(self): """Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes. """ H0 = self.H0 H1 = self.H1 H2 = self.H2 H3 = self.H3 H4 = self.H4 input = [] + self.input count = [] + self.count index = (self.count[1] >> 3) & 0x3f if index < 56: padLen = 56 - index else: padLen = 120 - index padding = ['\200'] + ['\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2longBigEndian(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = _long2bytesBigEndian(self.H0, 4) + \ _long2bytesBigEndian(self.H1, 4) + \ _long2bytesBigEndian(self.H2, 4) + \ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) self.H0 = H0 self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 self.input = input self.count = count return digest
def digest(self): """Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes. """ H0 = self.H0 H1 = self.H1 H2 = self.H2 H3 = self.H3 H4 = self.H4 input = [] + self.input count = [] + self.count index = (self.count[1] >> 3) & 0x3f if index < 56: padLen = 56 - index else: padLen = 120 - index padding = ['\200'] + ['\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2longBigEndian(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = _long2bytesBigEndian(self.H0, 4) + \ _long2bytesBigEndian(self.H1, 4) + \ _long2bytesBigEndian(self.H2, 4) + \ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) self.H0 = H0 self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 self.input = input self.count = count return digest
[ "Terminate", "the", "message", "-", "digest", "computation", "and", "return", "digest", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sha.py#L261-L307
[ "def", "digest", "(", "self", ")", ":", "H0", "=", "self", ".", "H0", "H1", "=", "self", ".", "H1", "H2", "=", "self", ".", "H2", "H3", "=", "self", ".", "H3", "H4", "=", "self", ".", "H4", "input", "=", "[", "]", "+", "self", ".", "input",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Lexer.next
Returns token at ``offset`` as a :class:`Token` and advances ``offset`` to point past the end of the token, where the token has: - *range* which is a :class:`pythonparser.source.Range` that includes the token but not surrounding whitespace, - *kind* which is a string containing one of Python keywords or operators, ``newline``, ``float``, ``int``, ``complex``, ``strbegin``, ``strdata``, ``strend``, ``ident``, ``indent``, ``dedent`` or ``eof`` (if ``eof_token`` is True). - *value* which is the flags as lowercase string if *kind* is ``strbegin``, the string contents if *kind* is ``strdata``, the numeric value if *kind* is ``float``, ``int`` or ``complex``, the identifier if *kind* is ``ident`` and ``None`` in any other case. :param eof_token: if true, will return a token with kind ``eof`` when the input is exhausted; if false, will raise ``StopIteration``.
third_party/pythonparser/lexer.py
def next(self, eof_token=False): """ Returns token at ``offset`` as a :class:`Token` and advances ``offset`` to point past the end of the token, where the token has: - *range* which is a :class:`pythonparser.source.Range` that includes the token but not surrounding whitespace, - *kind* which is a string containing one of Python keywords or operators, ``newline``, ``float``, ``int``, ``complex``, ``strbegin``, ``strdata``, ``strend``, ``ident``, ``indent``, ``dedent`` or ``eof`` (if ``eof_token`` is True). - *value* which is the flags as lowercase string if *kind* is ``strbegin``, the string contents if *kind* is ``strdata``, the numeric value if *kind* is ``float``, ``int`` or ``complex``, the identifier if *kind* is ``ident`` and ``None`` in any other case. :param eof_token: if true, will return a token with kind ``eof`` when the input is exhausted; if false, will raise ``StopIteration``. """ if len(self.queue) == 0: self._refill(eof_token) return self.queue.pop(0)
def next(self, eof_token=False): """ Returns token at ``offset`` as a :class:`Token` and advances ``offset`` to point past the end of the token, where the token has: - *range* which is a :class:`pythonparser.source.Range` that includes the token but not surrounding whitespace, - *kind* which is a string containing one of Python keywords or operators, ``newline``, ``float``, ``int``, ``complex``, ``strbegin``, ``strdata``, ``strend``, ``ident``, ``indent``, ``dedent`` or ``eof`` (if ``eof_token`` is True). - *value* which is the flags as lowercase string if *kind* is ``strbegin``, the string contents if *kind* is ``strdata``, the numeric value if *kind* is ``float``, ``int`` or ``complex``, the identifier if *kind* is ``ident`` and ``None`` in any other case. :param eof_token: if true, will return a token with kind ``eof`` when the input is exhausted; if false, will raise ``StopIteration``. """ if len(self.queue) == 0: self._refill(eof_token) return self.queue.pop(0)
[ "Returns", "token", "at", "offset", "as", "a", ":", "class", ":", "Token", "and", "advances", "offset", "to", "point", "past", "the", "end", "of", "the", "token", "where", "the", "token", "has", ":" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/lexer.py#L208-L230
[ "def", "next", "(", "self", ",", "eof_token", "=", "False", ")", ":", "if", "len", "(", "self", ".", "queue", ")", "==", "0", ":", "self", ".", "_refill", "(", "eof_token", ")", "return", "self", ".", "queue", ".", "pop", "(", "0", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Lexer.peek
Same as :meth:`next`, except the token is not dequeued.
third_party/pythonparser/lexer.py
def peek(self, eof_token=False): """Same as :meth:`next`, except the token is not dequeued.""" if len(self.queue) == 0: self._refill(eof_token) return self.queue[-1]
def peek(self, eof_token=False): """Same as :meth:`next`, except the token is not dequeued.""" if len(self.queue) == 0: self._refill(eof_token) return self.queue[-1]
[ "Same", "as", ":", "meth", ":", "next", "except", "the", "token", "is", "not", "dequeued", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/lexer.py#L232-L237
[ "def", "peek", "(", "self", ",", "eof_token", "=", "False", ")", ":", "if", "len", "(", "self", ".", "queue", ")", "==", "0", ":", "self", ".", "_refill", "(", "eof_token", ")", "return", "self", ".", "queue", "[", "-", "1", "]" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
scheduler.enterabs
Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary.
third_party/stdlib/sched.py
def enterabs(self, time, priority, action, argument): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ event = Event(time, priority, action, argument) heapq.heappush(self._queue, event) return event
def enterabs(self, time, priority, action, argument): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ event = Event(time, priority, action, argument) heapq.heappush(self._queue, event) return event
[ "Enter", "a", "new", "event", "in", "the", "queue", "at", "an", "absolute", "time", ".", "Returns", "an", "ID", "for", "the", "event", "which", "can", "be", "used", "to", "remove", "it", "if", "necessary", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/sched.py#L64-L71
[ "def", "enterabs", "(", "self", ",", "time", ",", "priority", ",", "action", ",", "argument", ")", ":", "event", "=", "Event", "(", "time", ",", "priority", ",", "action", ",", "argument", ")", "heapq", ".", "heappush", "(", "self", ".", "_queue", ",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
scheduler.run
Execute events until the queue is empty. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable.
third_party/stdlib/sched.py
def run(self): """Execute events until the queue is empty. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable. """ # localize variable access to minimize overhead # and to improve thread safety q = self._queue delayfunc = self.delayfunc timefunc = self.timefunc pop = heapq.heappop while q: # TODO: modified part of grumpy version. checked_event = q[0] time, priority, action, argument = checked_event.get_fields() now = timefunc() if now < time: delayfunc(time - now) else: event = pop(q) # Verify that the event was not removed or altered # by another thread after we last looked at q[0]. if event is checked_event: action(*argument) delayfunc(0) # Let other threads run else: heapq.heappush(q, event)
def run(self): """Execute events until the queue is empty. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable. """ # localize variable access to minimize overhead # and to improve thread safety q = self._queue delayfunc = self.delayfunc timefunc = self.timefunc pop = heapq.heappop while q: # TODO: modified part of grumpy version. checked_event = q[0] time, priority, action, argument = checked_event.get_fields() now = timefunc() if now < time: delayfunc(time - now) else: event = pop(q) # Verify that the event was not removed or altered # by another thread after we last looked at q[0]. if event is checked_event: action(*argument) delayfunc(0) # Let other threads run else: heapq.heappush(q, event)
[ "Execute", "events", "until", "the", "queue", "is", "empty", ".", "When", "there", "is", "a", "positive", "delay", "until", "the", "first", "event", "the", "delay", "function", "is", "called", "and", "the", "event", "is", "left", "in", "the", "queue", ";...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/sched.py#L92-L130
[ "def", "run", "(", "self", ")", ":", "# localize variable access to minimize overhead", "# and to improve thread safety", "q", "=", "self", ".", "_queue", "delayfunc", "=", "self", ".", "delayfunc", "timefunc", "=", "self", ".", "timefunc", "pop", "=", "heapq", "....
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
scheduler.queue
An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments
third_party/stdlib/sched.py
def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. events = self._queue[:] return map(heapq.heappop, [events]*len(events))
def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. events = self._queue[:] return map(heapq.heappop, [events]*len(events))
[ "An", "ordered", "list", "of", "upcoming", "events", ".", "Events", "are", "named", "tuples", "with", "fields", "for", ":", "time", "priority", "action", "arguments" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/sched.py#L133-L142
[ "def", "queue", "(", "self", ")", ":", "# Use heapq to sort the queue rather than using 'sorted(self._queue)'.", "# With heapq, two events scheduled at the same time will show in", "# the actual order they would be retrieved.", "events", "=", "self", ".", "_queue", "[", ":", "]", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
register_dialect
Create a mapping from a string name to a dialect class. dialect = csv.register_dialect(name, dialect)
third_party/pypy/_csv.py
def register_dialect(name, dialect=None, **kwargs): """Create a mapping from a string name to a dialect class. dialect = csv.register_dialect(name, dialect)""" if not isinstance(name, basestring): raise TypeError("dialect name must be a string or unicode") dialect = _call_dialect(dialect, kwargs) _dialects[name] = dialect
def register_dialect(name, dialect=None, **kwargs): """Create a mapping from a string name to a dialect class. dialect = csv.register_dialect(name, dialect)""" if not isinstance(name, basestring): raise TypeError("dialect name must be a string or unicode") dialect = _call_dialect(dialect, kwargs) _dialects[name] = dialect
[ "Create", "a", "mapping", "from", "a", "string", "name", "to", "a", "dialect", "class", ".", "dialect", "=", "csv", ".", "register_dialect", "(", "name", "dialect", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_csv.py#L176-L183
[ "def", "register_dialect", "(", "name", ",", "dialect", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"dialect name must be a string or unicode\"", ")", "dialec...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
field_size_limit
Sets an upper limit on parsed fields. csv.field_size_limit([limit]) Returns old limit. If limit is not given, no new limit is set and the old limit is returned
third_party/pypy/_csv.py
def field_size_limit(limit=undefined): """Sets an upper limit on parsed fields. csv.field_size_limit([limit]) Returns old limit. If limit is not given, no new limit is set and the old limit is returned""" global _field_limit old_limit = _field_limit if limit is not undefined: if not isinstance(limit, (int, long)): raise TypeError("int expected, got %s" % (limit.__class__.__name__,)) _field_limit = limit return old_limit
def field_size_limit(limit=undefined): """Sets an upper limit on parsed fields. csv.field_size_limit([limit]) Returns old limit. If limit is not given, no new limit is set and the old limit is returned""" global _field_limit old_limit = _field_limit if limit is not undefined: if not isinstance(limit, (int, long)): raise TypeError("int expected, got %s" % (limit.__class__.__name__,)) _field_limit = limit return old_limit
[ "Sets", "an", "upper", "limit", "on", "parsed", "fields", ".", "csv", ".", "field_size_limit", "(", "[", "limit", "]", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_csv.py#L566-L582
[ "def", "field_size_limit", "(", "limit", "=", "undefined", ")", ":", "global", "_field_limit", "old_limit", "=", "_field_limit", "if", "limit", "is", "not", "undefined", ":", "if", "not", "isinstance", "(", "limit", ",", "(", "int", ",", "long", ")", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
copy
Shallow copy operation on arbitrary Python objects. See the module's __doc__ string for more info.
third_party/stdlib/copy.py
def copy(x): """Shallow copy operation on arbitrary Python objects. See the module's __doc__ string for more info. """ cls = type(x) copier = _copy_dispatch.get(cls) if copier: return copier(x) copier = getattr(cls, "__copy__", None) if copier: return copier(x) reductor = dispatch_table.get(cls) if reductor: rv = reductor(x) else: reductor = getattr(x, "__reduce_ex__", None) if reductor: rv = reductor(2) else: reductor = getattr(x, "__reduce__", None) if reductor: rv = reductor() else: raise Error("un(shallow)copyable object of type %s" % cls) return _reconstruct(x, rv, 0)
def copy(x): """Shallow copy operation on arbitrary Python objects. See the module's __doc__ string for more info. """ cls = type(x) copier = _copy_dispatch.get(cls) if copier: return copier(x) copier = getattr(cls, "__copy__", None) if copier: return copier(x) reductor = dispatch_table.get(cls) if reductor: rv = reductor(x) else: reductor = getattr(x, "__reduce_ex__", None) if reductor: rv = reductor(2) else: reductor = getattr(x, "__reduce__", None) if reductor: rv = reductor() else: raise Error("un(shallow)copyable object of type %s" % cls) return _reconstruct(x, rv, 0)
[ "Shallow", "copy", "operation", "on", "arbitrary", "Python", "objects", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/copy.py#L69-L99
[ "def", "copy", "(", "x", ")", ":", "cls", "=", "type", "(", "x", ")", "copier", "=", "_copy_dispatch", ".", "get", "(", "cls", ")", "if", "copier", ":", "return", "copier", "(", "x", ")", "copier", "=", "getattr", "(", "cls", ",", "\"__copy__\"", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
deepcopy
Deep copy operation on arbitrary Python objects. See the module's __doc__ string for more info.
third_party/stdlib/copy.py
def deepcopy(x, memo=None, _nil=[]): """Deep copy operation on arbitrary Python objects. See the module's __doc__ string for more info. """ if memo is None: memo = {} d = id(x) y = memo.get(d, _nil) if y is not _nil: return y cls = type(x) copier = _deepcopy_dispatch.get(cls) if copier: y = copier(x, memo) else: try: issc = issubclass(cls, type) except TypeError: # cls is not a class (old Boost; see SF #502085) issc = 0 if issc: y = _deepcopy_atomic(x, memo) else: copier = getattr(x, "__deepcopy__", None) if copier: y = copier(memo) else: reductor = dispatch_table.get(cls) if reductor: rv = reductor(x) else: reductor = getattr(x, "__reduce_ex__", None) if reductor: rv = reductor(2) else: reductor = getattr(x, "__reduce__", None) if reductor: rv = reductor() else: raise Error( "un(deep)copyable object of type %s" % cls) y = _reconstruct(x, rv, 1, memo) memo[d] = y _keep_alive(x, memo) # Make sure x lives at least as long as d return y
def deepcopy(x, memo=None, _nil=[]): """Deep copy operation on arbitrary Python objects. See the module's __doc__ string for more info. """ if memo is None: memo = {} d = id(x) y = memo.get(d, _nil) if y is not _nil: return y cls = type(x) copier = _deepcopy_dispatch.get(cls) if copier: y = copier(x, memo) else: try: issc = issubclass(cls, type) except TypeError: # cls is not a class (old Boost; see SF #502085) issc = 0 if issc: y = _deepcopy_atomic(x, memo) else: copier = getattr(x, "__deepcopy__", None) if copier: y = copier(memo) else: reductor = dispatch_table.get(cls) if reductor: rv = reductor(x) else: reductor = getattr(x, "__reduce_ex__", None) if reductor: rv = reductor(2) else: reductor = getattr(x, "__reduce__", None) if reductor: rv = reductor() else: raise Error( "un(deep)copyable object of type %s" % cls) y = _reconstruct(x, rv, 1, memo) memo[d] = y _keep_alive(x, memo) # Make sure x lives at least as long as d return y
[ "Deep", "copy", "operation", "on", "arbitrary", "Python", "objects", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/copy.py#L148-L197
[ "def", "deepcopy", "(", "x", ",", "memo", "=", "None", ",", "_nil", "=", "[", "]", ")", ":", "if", "memo", "is", "None", ":", "memo", "=", "{", "}", "d", "=", "id", "(", "x", ")", "y", "=", "memo", ".", "get", "(", "d", ",", "_nil", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_keep_alive
Keeps a reference to the object x in the memo. Because we remember objects by their id, we have to assure that possibly temporary objects are kept alive by referencing them. We store a reference at the id of the memo, which should normally not be used unless someone tries to deepcopy the memo itself...
third_party/stdlib/copy.py
def _keep_alive(x, memo): """Keeps a reference to the object x in the memo. Because we remember objects by their id, we have to assure that possibly temporary objects are kept alive by referencing them. We store a reference at the id of the memo, which should normally not be used unless someone tries to deepcopy the memo itself... """ try: memo[id(memo)].append(x) except KeyError: # aha, this is the first one :-) memo[id(memo)]=[x]
def _keep_alive(x, memo): """Keeps a reference to the object x in the memo. Because we remember objects by their id, we have to assure that possibly temporary objects are kept alive by referencing them. We store a reference at the id of the memo, which should normally not be used unless someone tries to deepcopy the memo itself... """ try: memo[id(memo)].append(x) except KeyError: # aha, this is the first one :-) memo[id(memo)]=[x]
[ "Keeps", "a", "reference", "to", "the", "object", "x", "in", "the", "memo", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/copy.py#L270-L284
[ "def", "_keep_alive", "(", "x", ",", "memo", ")", ":", "try", ":", "memo", "[", "id", "(", "memo", ")", "]", ".", "append", "(", "x", ")", "except", "KeyError", ":", "# aha, this is the first one :-)", "memo", "[", "id", "(", "memo", ")", "]", "=", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
warnpy3k
Issue a deprecation warning for Python 3.x related changes. Warnings are omitted unless Python is started with the -3 option.
third_party/stdlib/warnings.py
def warnpy3k(message, category=None, stacklevel=1): """Issue a deprecation warning for Python 3.x related changes. Warnings are omitted unless Python is started with the -3 option. """ if sys.py3kwarning: if category is None: category = DeprecationWarning warn(message, category, stacklevel+1)
def warnpy3k(message, category=None, stacklevel=1): """Issue a deprecation warning for Python 3.x related changes. Warnings are omitted unless Python is started with the -3 option. """ if sys.py3kwarning: if category is None: category = DeprecationWarning warn(message, category, stacklevel+1)
[ "Issue", "a", "deprecation", "warning", "for", "Python", "3", ".", "x", "related", "changes", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L16-L24
[ "def", "warnpy3k", "(", "message", ",", "category", "=", "None", ",", "stacklevel", "=", "1", ")", ":", "if", "sys", ".", "py3kwarning", ":", "if", "category", "is", "None", ":", "category", "=", "DeprecationWarning", "warn", "(", "message", ",", "catego...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_show_warning
Hook to write a warning to a file; replace if you like.
third_party/stdlib/warnings.py
def _show_warning(message, category, filename, lineno, file=None, line=None): """Hook to write a warning to a file; replace if you like.""" if file is None: file = sys.stderr if file is None: # sys.stderr is None - warnings get lost return try: file.write(formatwarning(message, category, filename, lineno, line)) except (IOError, UnicodeError): pass # the file (probably stderr) is invalid - this warning gets lost.
def _show_warning(message, category, filename, lineno, file=None, line=None): """Hook to write a warning to a file; replace if you like.""" if file is None: file = sys.stderr if file is None: # sys.stderr is None - warnings get lost return try: file.write(formatwarning(message, category, filename, lineno, line)) except (IOError, UnicodeError): pass # the file (probably stderr) is invalid - this warning gets lost.
[ "Hook", "to", "write", "a", "warning", "to", "a", "file", ";", "replace", "if", "you", "like", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L26-L36
[ "def", "_show_warning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "file", "=", "None", ",", "line", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stderr", "if", "file", "is", "None", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
formatwarning
Function to format a warning the standard way.
third_party/stdlib/warnings.py
def formatwarning(message, category, filename, lineno, line=None): """Function to format a warning the standard way.""" try: unicodetype = unicode except NameError: unicodetype = () try: message = str(message) except UnicodeEncodeError: pass s = "%s: %s: %s\n" % (lineno, category.__name__, message) line = linecache.getline(filename, lineno) if line is None else line if line: line = line.strip() if isinstance(s, unicodetype) and isinstance(line, str): line = unicode(line, 'latin1') s += " %s\n" % line if isinstance(s, unicodetype) and isinstance(filename, str): enc = sys.getfilesystemencoding() if enc: try: filename = unicode(filename, enc) except UnicodeDecodeError: pass s = "%s:%s" % (filename, s) return s
def formatwarning(message, category, filename, lineno, line=None): """Function to format a warning the standard way.""" try: unicodetype = unicode except NameError: unicodetype = () try: message = str(message) except UnicodeEncodeError: pass s = "%s: %s: %s\n" % (lineno, category.__name__, message) line = linecache.getline(filename, lineno) if line is None else line if line: line = line.strip() if isinstance(s, unicodetype) and isinstance(line, str): line = unicode(line, 'latin1') s += " %s\n" % line if isinstance(s, unicodetype) and isinstance(filename, str): enc = sys.getfilesystemencoding() if enc: try: filename = unicode(filename, enc) except UnicodeDecodeError: pass s = "%s:%s" % (filename, s) return s
[ "Function", "to", "format", "a", "warning", "the", "standard", "way", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L41-L66
[ "def", "formatwarning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "line", "=", "None", ")", ":", "try", ":", "unicodetype", "=", "unicode", "except", "NameError", ":", "unicodetype", "=", "(", ")", "try", ":", "message", "=", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
filterwarnings
Insert an entry into the list of warnings filters (at the front). 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'message' -- a regex that the warning message must match 'category' -- a class that the warning must be a subclass of 'module' -- a regex that the module name must match 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters
third_party/stdlib/warnings.py
def filterwarnings(action, message="", category=Warning, module="", lineno=0, append=0): """Insert an entry into the list of warnings filters (at the front). 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'message' -- a regex that the warning message must match 'category' -- a class that the warning must be a subclass of 'module' -- a regex that the module name must match 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(message, basestring), "message must be a string" assert isinstance(category, type), "category must be a class" assert issubclass(category, Warning), "category must be a Warning subclass" assert isinstance(module, basestring), "module must be a string" assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, re.compile(message, re.I), category, re.compile(module), lineno) if append: filters.append(item) else: filters.insert(0, item)
def filterwarnings(action, message="", category=Warning, module="", lineno=0, append=0): """Insert an entry into the list of warnings filters (at the front). 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'message' -- a regex that the warning message must match 'category' -- a class that the warning must be a subclass of 'module' -- a regex that the module name must match 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(message, basestring), "message must be a string" assert isinstance(category, type), "category must be a class" assert issubclass(category, Warning), "category must be a Warning subclass" assert isinstance(module, basestring), "module must be a string" assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, re.compile(message, re.I), category, re.compile(module), lineno) if append: filters.append(item) else: filters.insert(0, item)
[ "Insert", "an", "entry", "into", "the", "list", "of", "warnings", "filters", "(", "at", "the", "front", ")", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L68-L93
[ "def", "filterwarnings", "(", "action", ",", "message", "=", "\"\"", ",", "category", "=", "Warning", ",", "module", "=", "\"\"", ",", "lineno", "=", "0", ",", "append", "=", "0", ")", ":", "assert", "action", "in", "(", "\"error\"", ",", "\"ignore\"",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
simplefilter
Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'category' -- a class that the warning must be a subclass of 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters
third_party/stdlib/warnings.py
def simplefilter(action, category=Warning, lineno=0, append=0): """Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'category' -- a class that the warning must be a subclass of 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, None, category, None, lineno) if append: filters.append(item) else: filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0): """Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'category' -- a class that the warning must be a subclass of 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, None, category, None, lineno) if append: filters.append(item) else: filters.insert(0, item)
[ "Insert", "a", "simple", "entry", "into", "the", "list", "of", "warnings", "filters", "(", "at", "the", "front", ")", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L95-L113
[ "def", "simplefilter", "(", "action", ",", "category", "=", "Warning", ",", "lineno", "=", "0", ",", "append", "=", "0", ")", ":", "assert", "action", "in", "(", "\"error\"", ",", "\"ignore\"", ",", "\"always\"", ",", "\"default\"", ",", "\"module\"", ",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
warn
Issue a warning, or maybe ignore it or raise an exception.
third_party/stdlib/warnings.py
def warn(message, category=None, stacklevel=1): """Issue a warning, or maybe ignore it or raise an exception.""" # Check if message is already a Warning object if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning assert issubclass(category, Warning) # Get context information try: caller = sys._getframe(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = caller.f_globals lineno = caller.f_lineno if '__name__' in globals: module = globals['__name__'] else: module = "<string>" filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] else: if module == "__main__": try: filename = sys.argv[0] except AttributeError: # embedded interpreters don't have sys.argv, see bug #839151 filename = '__main__' if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry, globals)
def warn(message, category=None, stacklevel=1): """Issue a warning, or maybe ignore it or raise an exception.""" # Check if message is already a Warning object if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning assert issubclass(category, Warning) # Get context information try: caller = sys._getframe(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = caller.f_globals lineno = caller.f_lineno if '__name__' in globals: module = globals['__name__'] else: module = "<string>" filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] else: if module == "__main__": try: filename = sys.argv[0] except AttributeError: # embedded interpreters don't have sys.argv, see bug #839151 filename = '__main__' if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry, globals)
[ "Issue", "a", "warning", "or", "maybe", "ignore", "it", "or", "raise", "an", "exception", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L194-L232
[ "def", "warn", "(", "message", ",", "category", "=", "None", ",", "stacklevel", "=", "1", ")", ":", "# Check if message is already a Warning object", "if", "isinstance", "(", "message", ",", "Warning", ")", ":", "category", "=", "message", ".", "__class__", "#...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Set._hash
Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type.
third_party/stdlib/_abcoll.py
def _hash(self): """Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type. """ MAX = sys.maxint MASK = 2 * MAX + 1 n = len(self) h = 1927868237 * (n + 1) h &= MASK for x in self: hx = hash(x) h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 h &= MASK h = h * 69069 + 907133923 h &= MASK if h > MAX: h -= MASK + 1 if h == -1: h = 590923713 return h
def _hash(self): """Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type. """ MAX = sys.maxint MASK = 2 * MAX + 1 n = len(self) h = 1927868237 * (n + 1) h &= MASK for x in self: hx = hash(x) h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 h &= MASK h = h * 69069 + 907133923 h &= MASK if h > MAX: h -= MASK + 1 if h == -1: h = 590923713 return h
[ "Compute", "the", "hash", "value", "of", "a", "set", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L249-L279
[ "def", "_hash", "(", "self", ")", ":", "MAX", "=", "sys", ".", "maxint", "MASK", "=", "2", "*", "MAX", "+", "1", "n", "=", "len", "(", "self", ")", "h", "=", "1927868237", "*", "(", "n", "+", "1", ")", "h", "&=", "MASK", "for", "x", "in", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MutableSet.remove
Remove an element. If not a member, raise a KeyError.
third_party/stdlib/_abcoll.py
def remove(self, value): """Remove an element. If not a member, raise a KeyError.""" if value not in self: raise KeyError(value) self.discard(value)
def remove(self, value): """Remove an element. If not a member, raise a KeyError.""" if value not in self: raise KeyError(value) self.discard(value)
[ "Remove", "an", "element", ".", "If", "not", "a", "member", "raise", "a", "KeyError", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L306-L310
[ "def", "remove", "(", "self", ",", "value", ")", ":", "if", "value", "not", "in", "self", ":", "raise", "KeyError", "(", "value", ")", "self", ".", "discard", "(", "value", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MutableSet.pop
Return the popped value. Raise KeyError if empty.
third_party/stdlib/_abcoll.py
def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) except StopIteration: raise KeyError self.discard(value) return value
def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) except StopIteration: raise KeyError self.discard(value) return value
[ "Return", "the", "popped", "value", ".", "Raise", "KeyError", "if", "empty", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L312-L320
[ "def", "pop", "(", "self", ")", ":", "it", "=", "iter", "(", "self", ")", "try", ":", "value", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "raise", "KeyError", "self", ".", "discard", "(", "value", ")", "return", "value" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MutableMapping.update
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k, v in F.items(): D[k] = v
third_party/stdlib/_abcoll.py
def update(*args, **kwds): ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k, v in F.items(): D[k] = v ''' if not args: raise TypeError("descriptor 'update' of 'MutableMapping' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('update expected at most 1 arguments, got %d' % len(args)) if args: other = args[0] if isinstance(other, Mapping): for key in other: self[key] = other[key] elif hasattr(other, "keys"): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value
def update(*args, **kwds): ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k, v in F.items(): D[k] = v ''' if not args: raise TypeError("descriptor 'update' of 'MutableMapping' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('update expected at most 1 arguments, got %d' % len(args)) if args: other = args[0] if isinstance(other, Mapping): for key in other: self[key] = other[key] elif hasattr(other, "keys"): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value
[ "D", ".", "update", "(", "[", "E", "]", "**", "F", ")", "-", ">", "None", ".", "Update", "D", "from", "mapping", "/", "iterable", "E", "and", "F", ".", "If", "E", "present", "and", "has", "a", ".", "keys", "()", "method", "does", ":", "for", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L551-L577
[ "def", "update", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "not", "args", ":", "raise", "TypeError", "(", "\"descriptor 'update' of 'MutableMapping' object \"", "\"needs an argument\"", ")", "self", "=", "args", "[", "0", "]", "args", "=", "ar...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Sequence.index
S.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.
third_party/stdlib/_abcoll.py
def index(self, value): '''S.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present. ''' for i, v in enumerate(self): if v == value: return i raise ValueError
def index(self, value): '''S.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present. ''' for i, v in enumerate(self): if v == value: return i raise ValueError
[ "S", ".", "index", "(", "value", ")", "-", ">", "integer", "--", "return", "first", "index", "of", "value", ".", "Raises", "ValueError", "if", "the", "value", "is", "not", "present", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L624-L631
[ "def", "index", "(", "self", ",", "value", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "self", ")", ":", "if", "v", "==", "value", ":", "return", "i", "raise", "ValueError" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MutableSequence.reverse
S.reverse() -- reverse *IN PLACE*
third_party/stdlib/_abcoll.py
def reverse(self): 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i]
def reverse(self): 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i]
[ "S", ".", "reverse", "()", "--", "reverse", "*", "IN", "PLACE", "*" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L669-L673
[ "def", "reverse", "(", "self", ")", ":", "n", "=", "len", "(", "self", ")", "for", "i", "in", "range", "(", "n", "//", "2", ")", ":", "self", "[", "i", "]", ",", "self", "[", "n", "-", "i", "-", "1", "]", "=", "self", "[", "n", "-", "i"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
go_str
Returns value as a valid Go string literal.
compiler/util.py
def go_str(value): """Returns value as a valid Go string literal.""" io = StringIO.StringIO() io.write('"') for c in value: if c in _ESCAPES: io.write(_ESCAPES[c]) elif c in _SIMPLE_CHARS: io.write(c) else: io.write(r'\x{:02x}'.format(ord(c))) io.write('"') return io.getvalue()
def go_str(value): """Returns value as a valid Go string literal.""" io = StringIO.StringIO() io.write('"') for c in value: if c in _ESCAPES: io.write(_ESCAPES[c]) elif c in _SIMPLE_CHARS: io.write(c) else: io.write(r'\x{:02x}'.format(ord(c))) io.write('"') return io.getvalue()
[ "Returns", "value", "as", "a", "valid", "Go", "string", "literal", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/compiler/util.py#L137-L149
[ "def", "go_str", "(", "value", ")", ":", "io", "=", "StringIO", ".", "StringIO", "(", ")", "io", ".", "write", "(", "'\"'", ")", "for", "c", "in", "value", ":", "if", "c", "in", "_ESCAPES", ":", "io", ".", "write", "(", "_ESCAPES", "[", "c", "]...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Writer.write_block
Outputs the boilerplate necessary for code blocks like functions. Args: block_: The Block object representing the code block. body: String containing Go code making up the body of the code block.
compiler/util.py
def write_block(self, block_, body): """Outputs the boilerplate necessary for code blocks like functions. Args: block_: The Block object representing the code block. body: String containing Go code making up the body of the code block. """ self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {') with self.indent_block(): self.write('switch πF.State() {') self.write('case 0:') for checkpoint in block_.checkpoints: self.write_tmpl('case $state: goto Label$state', state=checkpoint) self.write('default: panic("unexpected function state")') self.write('}') # Assume that body is aligned with goto labels. with self.indent_block(-1): self.write(body) self.write('}')
def write_block(self, block_, body): """Outputs the boilerplate necessary for code blocks like functions. Args: block_: The Block object representing the code block. body: String containing Go code making up the body of the code block. """ self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {') with self.indent_block(): self.write('switch πF.State() {') self.write('case 0:') for checkpoint in block_.checkpoints: self.write_tmpl('case $state: goto Label$state', state=checkpoint) self.write('default: panic("unexpected function state")') self.write('}') # Assume that body is aligned with goto labels. with self.indent_block(-1): self.write(body) self.write('}')
[ "Outputs", "the", "boilerplate", "necessary", "for", "code", "blocks", "like", "functions", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/compiler/util.py#L83-L101
[ "def", "write_block", "(", "self", ",", "block_", ",", "body", ")", ":", "self", ".", "write", "(", "'for ; πF.State() >= 0; πF.PopCheckpoint() {')", "", "with", "self", ".", "indent_block", "(", ")", ":", "self", ".", "write", "(", "'switch πF.State() {')", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_RLock.acquire
Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true.
third_party/stdlib/threading.py
def acquire(self, blocking=1): """Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ me = _get_ident() if self.__owner == me: self.__count = self.__count + 1 if __debug__: self._note("%s.acquire(%s): recursive success", self, blocking) return 1 rc = self.__block.acquire(blocking) if rc: self.__owner = me self.__count = 1 if __debug__: self._note("%s.acquire(%s): initial success", self, blocking) else: if __debug__: self._note("%s.acquire(%s): failure", self, blocking) return rc
def acquire(self, blocking=1): """Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ me = _get_ident() if self.__owner == me: self.__count = self.__count + 1 if __debug__: self._note("%s.acquire(%s): recursive success", self, blocking) return 1 rc = self.__block.acquire(blocking) if rc: self.__owner = me self.__count = 1 if __debug__: self._note("%s.acquire(%s): initial success", self, blocking) else: if __debug__: self._note("%s.acquire(%s): failure", self, blocking) return rc
[ "Acquire", "a", "lock", "blocking", "or", "non", "-", "blocking", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L147-L183
[ "def", "acquire", "(", "self", ",", "blocking", "=", "1", ")", ":", "me", "=", "_get_ident", "(", ")", "if", "self", ".", "__owner", "==", "me", ":", "self", ".", "__count", "=", "self", ".", "__count", "+", "1", "if", "__debug__", ":", "self", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_RLock.release
Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value.
third_party/stdlib/threading.py
def release(self): """Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value. """ if self.__owner != _get_ident(): raise RuntimeError("cannot release un-acquired lock") self.__count = count = self.__count - 1 if not count: self.__owner = None self.__block.release() if __debug__: self._note("%s.release(): final release", self) else: if __debug__: self._note("%s.release(): non-final release", self)
def release(self): """Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value. """ if self.__owner != _get_ident(): raise RuntimeError("cannot release un-acquired lock") self.__count = count = self.__count - 1 if not count: self.__owner = None self.__block.release() if __debug__: self._note("%s.release(): final release", self) else: if __debug__: self._note("%s.release(): non-final release", self)
[ "Release", "a", "lock", "decrementing", "the", "recursion", "level", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L187-L213
[ "def", "release", "(", "self", ")", ":", "if", "self", ".", "__owner", "!=", "_get_ident", "(", ")", ":", "raise", "RuntimeError", "(", "\"cannot release un-acquired lock\"", ")", "self", ".", "__count", "=", "count", "=", "self", ".", "__count", "-", "1",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Condition.wait
Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired.
third_party/stdlib/threading.py
def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self.__waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() if __debug__: self._note("%s.wait(): got it", self) else: # Balancing act: We can't afford a pure busy loop, so we # have to sleep; but if we sleep the whole timeout time, # we'll be unresponsive. The scheme here sleeps very # little at first, longer as time goes on, but never longer # than 20 times per second (or the timeout time remaining). endtime = _time() + timeout delay = 0.0005 # 500 us -> initial delay of 1 ms while True: gotit = waiter.acquire(0) if gotit: break remaining = endtime - _time() if remaining <= 0: break delay = min(delay * 2, remaining, .05) _sleep(delay) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) try: self.__waiters.remove(waiter) except ValueError: pass else: if __debug__: self._note("%s.wait(%s): got it", self, timeout) finally: self._acquire_restore(saved_state)
def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self.__waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() if __debug__: self._note("%s.wait(): got it", self) else: # Balancing act: We can't afford a pure busy loop, so we # have to sleep; but if we sleep the whole timeout time, # we'll be unresponsive. The scheme here sleeps very # little at first, longer as time goes on, but never longer # than 20 times per second (or the timeout time remaining). endtime = _time() + timeout delay = 0.0005 # 500 us -> initial delay of 1 ms while True: gotit = waiter.acquire(0) if gotit: break remaining = endtime - _time() if remaining <= 0: break delay = min(delay * 2, remaining, .05) _sleep(delay) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) try: self.__waiters.remove(waiter) except ValueError: pass else: if __debug__: self._note("%s.wait(%s): got it", self, timeout) finally: self._acquire_restore(saved_state)
[ "Wait", "until", "notified", "or", "until", "a", "timeout", "occurs", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L309-L371
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "_is_owned", "(", ")", ":", "raise", "RuntimeError", "(", "\"cannot wait on un-acquired lock\"", ")", "waiter", "=", "_allocate_lock", "(", ")", "waiter", ".", "ac...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Condition.notify
Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting.
third_party/stdlib/threading.py
def notify(self, n=1): """Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting. """ if not self._is_owned(): raise RuntimeError("cannot notify on un-acquired lock") __waiters = self.__waiters waiters = __waiters[:n] if not waiters: if __debug__: self._note("%s.notify(): no waiters", self) return self._note("%s.notify(): notifying %d waiter%s", self, n, n!=1 and "s" or "") for waiter in waiters: waiter.release() try: __waiters.remove(waiter) except ValueError: pass
def notify(self, n=1): """Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting. """ if not self._is_owned(): raise RuntimeError("cannot notify on un-acquired lock") __waiters = self.__waiters waiters = __waiters[:n] if not waiters: if __debug__: self._note("%s.notify(): no waiters", self) return self._note("%s.notify(): notifying %d waiter%s", self, n, n!=1 and "s" or "") for waiter in waiters: waiter.release() try: __waiters.remove(waiter) except ValueError: pass
[ "Wake", "up", "one", "or", "more", "threads", "waiting", "on", "this", "condition", "if", "any", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L373-L398
[ "def", "notify", "(", "self", ",", "n", "=", "1", ")", ":", "if", "not", "self", ".", "_is_owned", "(", ")", ":", "raise", "RuntimeError", "(", "\"cannot notify on un-acquired lock\"", ")", "__waiters", "=", "self", ".", "__waiters", "waiters", "=", "__wai...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Semaphore.acquire
Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true.
third_party/stdlib/threading.py
def acquire(self, blocking=1): """Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ rc = False with self.__cond: while self.__value == 0: if not blocking: break if __debug__: self._note("%s.acquire(%s): blocked waiting, value=%s", self, blocking, self.__value) self.__cond.wait() else: self.__value = self.__value - 1 if __debug__: self._note("%s.acquire: success, value=%s", self, self.__value) rc = True return rc
def acquire(self, blocking=1): """Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ rc = False with self.__cond: while self.__value == 0: if not blocking: break if __debug__: self._note("%s.acquire(%s): blocked waiting, value=%s", self, blocking, self.__value) self.__cond.wait() else: self.__value = self.__value - 1 if __debug__: self._note("%s.acquire: success, value=%s", self, self.__value) rc = True return rc
[ "Acquire", "a", "semaphore", "decrementing", "the", "internal", "counter", "by", "one", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L440-L475
[ "def", "acquire", "(", "self", ",", "blocking", "=", "1", ")", ":", "rc", "=", "False", "with", "self", ".", "__cond", ":", "while", "self", ".", "__value", "==", "0", ":", "if", "not", "blocking", ":", "break", "if", "__debug__", ":", "self", ".",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Semaphore.release
Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread.
third_party/stdlib/threading.py
def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. """ with self.__cond: self.__value = self.__value + 1 if __debug__: self._note("%s.release: success, value=%s", self, self.__value) self.__cond.notify()
def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. """ with self.__cond: self.__value = self.__value + 1 if __debug__: self._note("%s.release: success, value=%s", self, self.__value) self.__cond.notify()
[ "Release", "a", "semaphore", "incrementing", "the", "internal", "counter", "by", "one", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L479-L491
[ "def", "release", "(", "self", ")", ":", "with", "self", ".", "__cond", ":", "self", ".", "__value", "=", "self", ".", "__value", "+", "1", "if", "__debug__", ":", "self", ".", "_note", "(", "\"%s.release: success, value=%s\"", ",", "self", ",", "self", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_BoundedSemaphore.release
Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError.
third_party/stdlib/threading.py
def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError. """ with self.__cond: if self.__value >= self._initial_value: raise ValueError("Semaphore released too many times") self.__value += 1 self.__cond.notify()
def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError. """ with self.__cond: if self.__value >= self._initial_value: raise ValueError("Semaphore released too many times") self.__value += 1 self.__cond.notify()
[ "Release", "a", "semaphore", "incrementing", "the", "internal", "counter", "by", "one", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L525-L539
[ "def", "release", "(", "self", ")", ":", "with", "self", ".", "__cond", ":", "if", "self", ".", "__value", ">=", "self", ".", "_initial_value", ":", "raise", "ValueError", "(", "\"Semaphore released too many times\"", ")", "self", ".", "__value", "+=", "1", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Event.set
Set the internal flag to true. All threads waiting for the flag to become true are awakened. Threads that call wait() once the flag is true will not block at all.
third_party/stdlib/threading.py
def set(self): """Set the internal flag to true. All threads waiting for the flag to become true are awakened. Threads that call wait() once the flag is true will not block at all. """ with self.__cond: self.__flag = True self.__cond.notify_all()
def set(self): """Set the internal flag to true. All threads waiting for the flag to become true are awakened. Threads that call wait() once the flag is true will not block at all. """ with self.__cond: self.__flag = True self.__cond.notify_all()
[ "Set", "the", "internal", "flag", "to", "true", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L576-L585
[ "def", "set", "(", "self", ")", ":", "with", "self", ".", "__cond", ":", "self", ".", "__flag", "=", "True", "self", ".", "__cond", ".", "notify_all", "(", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_Event.wait
Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out.
third_party/stdlib/threading.py
def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ with self.__cond: if not self.__flag: self.__cond.wait(timeout) return self.__flag
def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ with self.__cond: if not self.__flag: self.__cond.wait(timeout) return self.__flag
[ "Block", "until", "the", "internal", "flag", "is", "true", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L597-L615
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "with", "self", ".", "__cond", ":", "if", "not", "self", ".", "__flag", ":", "self", ".", "__cond", ".", "wait", "(", "timeout", ")", "return", "self", ".", "__flag" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Thread.start
Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object.
third_party/stdlib/threading.py
def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self.__initialized: raise RuntimeError("thread.__init__() not called") if self.__started.is_set(): raise RuntimeError("threads can only be started once") if __debug__: self._note("%s.start(): starting thread", self) with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self.__bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self.__started.wait()
def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self.__initialized: raise RuntimeError("thread.__init__() not called") if self.__started.is_set(): raise RuntimeError("threads can only be started once") if __debug__: self._note("%s.start(): starting thread", self) with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self.__bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self.__started.wait()
[ "Start", "the", "thread", "s", "activity", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L709-L733
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "__initialized", ":", "raise", "RuntimeError", "(", "\"thread.__init__() not called\"", ")", "if", "self", ".", "__started", ".", "is_set", "(", ")", ":", "raise", "RuntimeError", "(", "\"threa...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Thread.run
Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.
third_party/stdlib/threading.py
def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self.__target: self.__target(*self.__args, **self.__kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self.__target, self.__args, self.__kwargs
def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self.__target: self.__target(*self.__args, **self.__kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self.__target, self.__args, self.__kwargs
[ "Method", "representing", "the", "thread", "s", "activity", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L735-L750
[ "def", "run", "(", "self", ")", ":", "try", ":", "if", "self", ".", "__target", ":", "self", ".", "__target", "(", "*", "self", ".", "__args", ",", "*", "*", "self", ".", "__kwargs", ")", "finally", ":", "# Avoid a refcycle if the thread is running a funct...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Thread.join
Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception.
third_party/stdlib/threading.py
def join(self, timeout=None): """Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception. """ if not self.__initialized: raise RuntimeError("Thread.__init__() not called") if not self.__started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") if __debug__: if not self.__stopped: self._note("%s.join(): waiting until thread stops", self) self.__block.acquire() try: if timeout is None: while not self.__stopped: self.__block.wait() if __debug__: self._note("%s.join(): thread stopped", self) else: deadline = _time() + timeout while not self.__stopped: delay = deadline - _time() if delay <= 0: if __debug__: self._note("%s.join(): timed out", self) break self.__block.wait(delay) else: if __debug__: self._note("%s.join(): thread stopped", self) finally: self.__block.release()
def join(self, timeout=None): """Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception. """ if not self.__initialized: raise RuntimeError("Thread.__init__() not called") if not self.__started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") if __debug__: if not self.__stopped: self._note("%s.join(): waiting until thread stops", self) self.__block.acquire() try: if timeout is None: while not self.__stopped: self.__block.wait() if __debug__: self._note("%s.join(): thread stopped", self) else: deadline = _time() + timeout while not self.__stopped: delay = deadline - _time() if delay <= 0: if __debug__: self._note("%s.join(): timed out", self) break self.__block.wait(delay) else: if __debug__: self._note("%s.join(): thread stopped", self) finally: self.__block.release()
[ "Wait", "until", "the", "thread", "terminates", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L894-L948
[ "def", "join", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "__initialized", ":", "raise", "RuntimeError", "(", "\"Thread.__init__() not called\"", ")", "if", "not", "self", ".", "__started", ".", "is_set", "(", ")", ":", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
ABCMeta._dump_registry
Debug helper to print the ABC registry.
third_party/stdlib/abc.py
def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__) print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter for name in sorted(cls.__dict__.keys()): if name.startswith("_abc_"): value = getattr(cls, name) print >> file, "%s: %r" % (name, value)
def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__) print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter for name in sorted(cls.__dict__.keys()): if name.startswith("_abc_"): value = getattr(cls, name) print >> file, "%s: %r" % (name, value)
[ "Debug", "helper", "to", "print", "the", "ABC", "registry", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/abc.py#L119-L126
[ "def", "_dump_registry", "(", "cls", ",", "file", "=", "None", ")", ":", "print", ">>", "file", ",", "\"Class: %s.%s\"", "%", "(", "cls", ".", "__module__", ",", "cls", ".", "__name__", ")", "print", ">>", "file", ",", "\"Inv.counter: %s\"", "%", "ABCMet...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
b2a_qp
quotetabs=True means that tab and space characters are always quoted. istext=False means that \r and \n are treated as regular characters header=True encodes space characters with '_' and requires real '_' characters to be quoted.
third_party/pypy/binascii.py
def b2a_qp(data, quotetabs=False, istext=True, header=False): """quotetabs=True means that tab and space characters are always quoted. istext=False means that \r and \n are treated as regular characters header=True encodes space characters with '_' and requires real '_' characters to be quoted. """ MAXLINESIZE = 76 # See if this string is using CRLF line ends lf = data.find('\n') crlf = lf > 0 and data[lf-1] == '\r' inp = 0 linelen = 0 odata = [] while inp < len(data): c = data[inp] if (c > '~' or c == '=' or (header and c == '_') or (c == '.' and linelen == 0 and (inp+1 == len(data) or data[inp+1] == '\n' or data[inp+1] == '\r')) or (not istext and (c == '\r' or c == '\n')) or ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or (c <= ' ' and c != '\r' and c != '\n' and (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): linelen += 3 if linelen >= MAXLINESIZE: odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 3 odata.append('=' + two_hex_digits(ord(c))) inp += 1 else: if (istext and (c == '\n' or (inp+1 < len(data) and c == '\r' and data[inp+1] == '\n'))): linelen = 0 # Protect against whitespace on end of line if (len(odata) > 0 and (odata[-1] == ' ' or odata[-1] == '\t')): ch = ord(odata[-1]) odata[-1] = '=' odata.append(two_hex_digits(ch)) if crlf: odata.append('\r') odata.append('\n') if c == '\r': inp += 2 else: inp += 1 else: if (inp + 1 < len(data) and data[inp+1] != '\n' and (linelen + 1) >= MAXLINESIZE): odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 0 linelen += 1 if header and c == ' ': c = '_' odata.append(c) inp += 1 return ''.join(odata)
def b2a_qp(data, quotetabs=False, istext=True, header=False): """quotetabs=True means that tab and space characters are always quoted. istext=False means that \r and \n are treated as regular characters header=True encodes space characters with '_' and requires real '_' characters to be quoted. """ MAXLINESIZE = 76 # See if this string is using CRLF line ends lf = data.find('\n') crlf = lf > 0 and data[lf-1] == '\r' inp = 0 linelen = 0 odata = [] while inp < len(data): c = data[inp] if (c > '~' or c == '=' or (header and c == '_') or (c == '.' and linelen == 0 and (inp+1 == len(data) or data[inp+1] == '\n' or data[inp+1] == '\r')) or (not istext and (c == '\r' or c == '\n')) or ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or (c <= ' ' and c != '\r' and c != '\n' and (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): linelen += 3 if linelen >= MAXLINESIZE: odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 3 odata.append('=' + two_hex_digits(ord(c))) inp += 1 else: if (istext and (c == '\n' or (inp+1 < len(data) and c == '\r' and data[inp+1] == '\n'))): linelen = 0 # Protect against whitespace on end of line if (len(odata) > 0 and (odata[-1] == ' ' or odata[-1] == '\t')): ch = ord(odata[-1]) odata[-1] = '=' odata.append(two_hex_digits(ch)) if crlf: odata.append('\r') odata.append('\n') if c == '\r': inp += 2 else: inp += 1 else: if (inp + 1 < len(data) and data[inp+1] != '\n' and (linelen + 1) >= MAXLINESIZE): odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 0 linelen += 1 if header and c == ' ': c = '_' odata.append(c) inp += 1 return ''.join(odata)
[ "quotetabs", "=", "True", "means", "that", "tab", "and", "space", "characters", "are", "always", "quoted", ".", "istext", "=", "False", "means", "that", "\\", "r", "and", "\\", "n", "are", "treated", "as", "regular", "characters", "header", "=", "True", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/binascii.py#L265-L333
[ "def", "b2a_qp", "(", "data", ",", "quotetabs", "=", "False", ",", "istext", "=", "True", ",", "header", "=", "False", ")", ":", "MAXLINESIZE", "=", "76", "# See if this string is using CRLF line ends", "lf", "=", "data", ".", "find", "(", "'\\n'", ")", "c...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
rlecode_hqx
Run length encoding for binhex4. The CPython implementation does not do run length encoding of \x90 characters. This implementation does.
third_party/pypy/binascii.py
def rlecode_hqx(s): """ Run length encoding for binhex4. The CPython implementation does not do run length encoding of \x90 characters. This implementation does. """ if not s: return '' result = [] prev = s[0] count = 1 # Add a dummy character to get the loop to go one extra round. # The dummy must be different from the last character of s. # In the same step we remove the first character, which has # already been stored in prev. if s[-1] == '!': s = s[1:] + '?' else: s = s[1:] + '!' for c in s: if c == prev and count < 255: count += 1 else: if count == 1: if prev != '\x90': result.append(prev) else: result += ['\x90', '\x00'] elif count < 4: if prev != '\x90': result += [prev] * count else: result += ['\x90', '\x00'] * count else: if prev != '\x90': result += [prev, '\x90', chr(count)] else: result += ['\x90', '\x00', '\x90', chr(count)] count = 1 prev = c return ''.join(result)
def rlecode_hqx(s): """ Run length encoding for binhex4. The CPython implementation does not do run length encoding of \x90 characters. This implementation does. """ if not s: return '' result = [] prev = s[0] count = 1 # Add a dummy character to get the loop to go one extra round. # The dummy must be different from the last character of s. # In the same step we remove the first character, which has # already been stored in prev. if s[-1] == '!': s = s[1:] + '?' else: s = s[1:] + '!' for c in s: if c == prev and count < 255: count += 1 else: if count == 1: if prev != '\x90': result.append(prev) else: result += ['\x90', '\x00'] elif count < 4: if prev != '\x90': result += [prev] * count else: result += ['\x90', '\x00'] * count else: if prev != '\x90': result += [prev, '\x90', chr(count)] else: result += ['\x90', '\x00', '\x90', chr(count)] count = 1 prev = c return ''.join(result)
[ "Run", "length", "encoding", "for", "binhex4", ".", "The", "CPython", "implementation", "does", "not", "do", "run", "length", "encoding", "of", "\\", "x90", "characters", ".", "This", "implementation", "does", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/binascii.py#L540-L582
[ "def", "rlecode_hqx", "(", "s", ")", ":", "if", "not", "s", ":", "return", "''", "result", "=", "[", "]", "prev", "=", "s", "[", "0", "]", "count", "=", "1", "# Add a dummy character to get the loop to go one extra round.", "# The dummy must be different from the ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
_match_abbrev
_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError.
third_party/stdlib/optparse.py
def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if s in wordmap: return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise AmbiguousOptionError(s, possibilities)
def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if s in wordmap: return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise AmbiguousOptionError(s, possibilities)
[ "_match_abbrev", "(", "s", ":", "string", "wordmap", ":", "{", "string", ":", "Option", "}", ")", "-", ">", "string" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1706-L1728
[ "def", "_match_abbrev", "(", "s", ",", "wordmap", ")", ":", "# Is there an exact match?", "if", "s", "in", "wordmap", ":", "return", "s", "else", ":", "# Isolate all words with s as a prefix.", "possibilities", "=", "[", "word", "for", "word", "in", "wordmap", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
HelpFormatter._format_text
Format a paragraph of free-form text for inclusion in the help output at the current indentation level.
third_party/stdlib/optparse.py
def _format_text(self, text): """ Format a paragraph of free-form text for inclusion in the help output at the current indentation level. """ text_width = max(self.width - self.current_indent, 11) indent = " "*self.current_indent return textwrap.fill(text, text_width, initial_indent=indent, subsequent_indent=indent)
def _format_text(self, text): """ Format a paragraph of free-form text for inclusion in the help output at the current indentation level. """ text_width = max(self.width - self.current_indent, 11) indent = " "*self.current_indent return textwrap.fill(text, text_width, initial_indent=indent, subsequent_indent=indent)
[ "Format", "a", "paragraph", "of", "free", "-", "form", "text", "for", "inclusion", "in", "the", "help", "output", "at", "the", "current", "indentation", "level", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L261-L271
[ "def", "_format_text", "(", "self", ",", "text", ")", ":", "text_width", "=", "max", "(", "self", ".", "width", "-", "self", ".", "current_indent", ",", "11", ")", "indent", "=", "\" \"", "*", "self", ".", "current_indent", "return", "textwrap", ".", "...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
HelpFormatter.format_option_strings
Return a comma-separated list of option strings & metavariables.
third_party/stdlib/optparse.py
def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [self._short_opt_fmt % (sopt, metavar) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts if self.short_first: opts = short_opts + long_opts else: opts = long_opts + short_opts return ", ".join(opts)
def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [self._short_opt_fmt % (sopt, metavar) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts if self.short_first: opts = short_opts + long_opts else: opts = long_opts + short_opts return ", ".join(opts)
[ "Return", "a", "comma", "-", "separated", "list", "of", "option", "strings", "&", "metavariables", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L356-L373
[ "def", "format_option_strings", "(", "self", ",", "option", ")", ":", "if", "option", ".", "takes_value", "(", ")", ":", "metavar", "=", "option", ".", "metavar", "or", "option", ".", "dest", ".", "upper", "(", ")", "short_opts", "=", "[", "self", ".",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Values._update_careful
Update the option values from an arbitrary dictionary, but only use keys from dict that already have a corresponding attribute in self. Any keys in dict without a corresponding attribute are silently ignored.
third_party/stdlib/optparse.py
def _update_careful(self, dict): """ Update the option values from an arbitrary dictionary, but only use keys from dict that already have a corresponding attribute in self. Any keys in dict without a corresponding attribute are silently ignored. """ for attr in dir(self): if attr in dict: dval = dict[attr] if dval is not None: setattr(self, attr, dval)
def _update_careful(self, dict): """ Update the option values from an arbitrary dictionary, but only use keys from dict that already have a corresponding attribute in self. Any keys in dict without a corresponding attribute are silently ignored. """ for attr in dir(self): if attr in dict: dval = dict[attr] if dval is not None: setattr(self, attr, dval)
[ "Update", "the", "option", "values", "from", "an", "arbitrary", "dictionary", "but", "only", "use", "keys", "from", "dict", "that", "already", "have", "a", "corresponding", "attribute", "in", "self", ".", "Any", "keys", "in", "dict", "without", "a", "corresp...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L872-L883
[ "def", "_update_careful", "(", "self", ",", "dict", ")", ":", "for", "attr", "in", "dir", "(", "self", ")", ":", "if", "attr", "in", "dict", ":", "dval", "=", "dict", "[", "attr", "]", "if", "dval", "is", "not", "None", ":", "setattr", "(", "self...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OptionContainer.add_option
add_option(Option) add_option(opt_str, ..., kwarg=val, ...)
third_party/stdlib/optparse.py
def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if type(args[0]) in types.StringTypes: option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError, "not an Option instance: %r" % option else: raise TypeError, "invalid arguments" self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif option.dest not in self.defaults: self.defaults[option.dest] = None return option
def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if type(args[0]) in types.StringTypes: option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError, "not an Option instance: %r" % option else: raise TypeError, "invalid arguments" self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif option.dest not in self.defaults: self.defaults[option.dest] = None return option
[ "add_option", "(", "Option", ")", "add_option", "(", "opt_str", "...", "kwarg", "=", "val", "...", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1024-L1052
[ "def", "add_option", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "args", "[", "0", "]", ")", "in", "types", ".", "StringTypes", ":", "option", "=", "self", ".", "option_class", "(", "*", "args", ",", "*", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OptionParser.destroy
Declare that you are done with this OptionParser. This cleans up reference cycles so the OptionParser (and all objects referenced by it) can be garbage-collected promptly. After calling destroy(), the OptionParser is unusable.
third_party/stdlib/optparse.py
def destroy(self): """ Declare that you are done with this OptionParser. This cleans up reference cycles so the OptionParser (and all objects referenced by it) can be garbage-collected promptly. After calling destroy(), the OptionParser is unusable. """ OptionContainer.destroy(self) for group in self.option_groups: group.destroy() del self.option_list del self.option_groups del self.formatter
def destroy(self): """ Declare that you are done with this OptionParser. This cleans up reference cycles so the OptionParser (and all objects referenced by it) can be garbage-collected promptly. After calling destroy(), the OptionParser is unusable. """ OptionContainer.destroy(self) for group in self.option_groups: group.destroy() del self.option_list del self.option_groups del self.formatter
[ "Declare", "that", "you", "are", "done", "with", "this", "OptionParser", ".", "This", "cleans", "up", "reference", "cycles", "so", "the", "OptionParser", "(", "and", "all", "objects", "referenced", "by", "it", ")", "can", "be", "garbage", "-", "collected", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1241-L1253
[ "def", "destroy", "(", "self", ")", ":", "OptionContainer", ".", "destroy", "(", "self", ")", "for", "group", "in", "self", ".", "option_groups", ":", "group", ".", "destroy", "(", ")", "del", "self", ".", "option_list", "del", "self", ".", "option_group...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OptionParser.parse_args
parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is a Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options.
third_party/stdlib/optparse.py
def parse_args(self, args=None, values=None): """ parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is a Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options. """ rargs = self._get_args(args) if values is None: values = self.get_default_values() # Store the halves of the argument list as attributes for the # convenience of callbacks: # rargs # the rest of the command-line (the "r" stands for # "remaining" or "right-hand") # largs # the leftover arguments -- ie. what's left after removing # options and their arguments (the "l" stands for "leftover" # or "left-hand") self.rargs = rargs self.largs = largs = [] self.values = values try: stop = self._process_args(largs, rargs, values) except (BadOptionError, OptionValueError), err: self.error(str(err)) args = largs + rargs return self.check_values(values, args)
def parse_args(self, args=None, values=None): """ parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is a Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options. """ rargs = self._get_args(args) if values is None: values = self.get_default_values() # Store the halves of the argument list as attributes for the # convenience of callbacks: # rargs # the rest of the command-line (the "r" stands for # "remaining" or "right-hand") # largs # the leftover arguments -- ie. what's left after removing # options and their arguments (the "l" stands for "leftover" # or "left-hand") self.rargs = rargs self.largs = largs = [] self.values = values try: stop = self._process_args(largs, rargs, values) except (BadOptionError, OptionValueError), err: self.error(str(err)) args = largs + rargs return self.check_values(values, args)
[ "parse_args", "(", "args", ":", "[", "string", "]", "=", "sys", ".", "argv", "[", "1", ":", "]", "values", ":", "Values", "=", "None", ")", "-", ">", "(", "values", ":", "Values", "args", ":", "[", "string", "]", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1386-L1423
[ "def", "parse_args", "(", "self", ",", "args", "=", "None", ",", "values", "=", "None", ")", ":", "rargs", "=", "self", ".", "_get_args", "(", "args", ")", "if", "values", "is", "None", ":", "values", "=", "self", ".", "get_default_values", "(", ")",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OptionParser.print_help
print_help(file : file = stdout) Print an extended help message, listing all options and any help text provided with them, to 'file' (default stdout).
third_party/stdlib/optparse.py
def print_help(self, file=None): """print_help(file : file = stdout) Print an extended help message, listing all options and any help text provided with them, to 'file' (default stdout). """ if file is None: file = sys.stdout encoding = self._get_encoding(file) # file.write(self.format_help().encode(encoding, "replace")) file.write(self.format_help())
def print_help(self, file=None): """print_help(file : file = stdout) Print an extended help message, listing all options and any help text provided with them, to 'file' (default stdout). """ if file is None: file = sys.stdout encoding = self._get_encoding(file) # file.write(self.format_help().encode(encoding, "replace")) file.write(self.format_help())
[ "print_help", "(", "file", ":", "file", "=", "stdout", ")" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1691-L1701
[ "def", "print_help", "(", "self", ",", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stdout", "encoding", "=", "self", ".", "_get_encoding", "(", "file", ")", "# file.write(self.format_help().encode(encoding, \"repla...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
deque.reverse
reverse *IN PLACE*
third_party/pypy/_collections.py
def reverse(self): "reverse *IN PLACE*" leftblock = self.left rightblock = self.right leftindex = self.leftndx rightindex = self.rightndx for i in range(self.length // 2): # Validate that pointers haven't met in the middle assert leftblock != rightblock or leftindex < rightindex # Swap (rightblock[rightindex], leftblock[leftindex]) = ( leftblock[leftindex], rightblock[rightindex]) # Advance left block/index pair leftindex += 1 if leftindex == n: leftblock = leftblock[RGTLNK] assert leftblock is not None leftindex = 0 # Step backwards with the right block/index pair rightindex -= 1 if rightindex == -1: rightblock = rightblock[LFTLNK] assert rightblock is not None rightindex = n - 1
def reverse(self): "reverse *IN PLACE*" leftblock = self.left rightblock = self.right leftindex = self.leftndx rightindex = self.rightndx for i in range(self.length // 2): # Validate that pointers haven't met in the middle assert leftblock != rightblock or leftindex < rightindex # Swap (rightblock[rightindex], leftblock[leftindex]) = ( leftblock[leftindex], rightblock[rightindex]) # Advance left block/index pair leftindex += 1 if leftindex == n: leftblock = leftblock[RGTLNK] assert leftblock is not None leftindex = 0 # Step backwards with the right block/index pair rightindex -= 1 if rightindex == -1: rightblock = rightblock[LFTLNK] assert rightblock is not None rightindex = n - 1
[ "reverse", "*", "IN", "PLACE", "*" ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_collections.py#L181-L207
[ "def", "reverse", "(", "self", ")", ":", "leftblock", "=", "self", ".", "left", "rightblock", "=", "self", ".", "right", "leftindex", "=", "self", ".", "leftndx", "rightindex", "=", "self", ".", "rightndx", "for", "i", "in", "range", "(", "self", ".", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
insort_right
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
third_party/stdlib/bisect.py
def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
[ "Insert", "item", "x", "in", "list", "a", "and", "keep", "it", "sorted", "assuming", "a", "is", "sorted", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/bisect.py#L3-L20
[ "def", "insort_right", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "a", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
bisect_right
Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
third_party/stdlib/bisect.py
def bisect_right(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 return lo
def bisect_right(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 return lo
[ "Return", "the", "index", "where", "to", "insert", "item", "x", "in", "list", "a", "assuming", "a", "is", "sorted", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/bisect.py#L24-L43
[ "def", "bisect_right", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "a", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
bisect_left
Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
third_party/stdlib/bisect.py
def bisect_left(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid return lo
def bisect_left(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid return lo
[ "Return", "the", "index", "where", "to", "insert", "item", "x", "in", "list", "a", "assuming", "a", "is", "sorted", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/bisect.py#L67-L86
[ "def", "bisect_left", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "a", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
mutex.lock
Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.
third_party/stdlib/mutex.py
def lock(self, function, argument): """Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.""" if self.testandset(): function(argument) else: self.queue.append((function, argument))
def lock(self, function, argument): """Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.""" if self.testandset(): function(argument) else: self.queue.append((function, argument))
[ "Lock", "a", "mutex", "call", "the", "function", "with", "supplied", "argument", "when", "it", "is", "acquired", ".", "If", "the", "mutex", "is", "already", "locked", "place", "function", "and", "argument", "in", "the", "queue", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/mutex.py#L39-L46
[ "def", "lock", "(", "self", ",", "function", ",", "argument", ")", ":", "if", "self", ".", "testandset", "(", ")", ":", "function", "(", "argument", ")", "else", ":", "self", ".", "queue", ".", "append", "(", "(", "function", ",", "argument", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
mutex.unlock
Unlock a mutex. If the queue is not empty, call the next function with its argument.
third_party/stdlib/mutex.py
def unlock(self): """Unlock a mutex. If the queue is not empty, call the next function with its argument.""" if self.queue: function, argument = self.queue.popleft() function(argument) else: self.locked = False
def unlock(self): """Unlock a mutex. If the queue is not empty, call the next function with its argument.""" if self.queue: function, argument = self.queue.popleft() function(argument) else: self.locked = False
[ "Unlock", "a", "mutex", ".", "If", "the", "queue", "is", "not", "empty", "call", "the", "next", "function", "with", "its", "argument", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/mutex.py#L48-L55
[ "def", "unlock", "(", "self", ")", ":", "if", "self", ".", "queue", ":", "function", ",", "argument", "=", "self", ".", "queue", ".", "popleft", "(", ")", "function", "(", "argument", ")", "else", ":", "self", ".", "locked", "=", "False" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OrderedDict.clear
od.clear() -> None. Remove all items from od.
third_party/stdlib/collections.py
def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root[:] = [root, root, None] self.__map.clear() dict.clear(self)
def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root[:] = [root, root, None] self.__map.clear() dict.clear(self)
[ "od", ".", "clear", "()", "-", ">", "None", ".", "Remove", "all", "items", "from", "od", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L118-L123
[ "def", "clear", "(", "self", ")", ":", "root", "=", "self", ".", "__root", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", "]", "self", ".", "__map", ".", "clear", "(", ")", "dict", ".", "clear", "(", "self", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OrderedDict.popitem
od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false.
third_party/stdlib/collections.py
def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') key = next(reversed(self) if last else iter(self)) value = self.pop(key) return key, value
def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') key = next(reversed(self) if last else iter(self)) value = self.pop(key) return key, value
[ "od", ".", "popitem", "()", "-", ">", "(", "k", "v", ")", "return", "and", "remove", "a", "(", "key", "value", ")", "pair", ".", "Pairs", "are", "returned", "in", "LIFO", "order", "if", "last", "is", "true", "or", "FIFO", "order", "if", "false", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L180-L189
[ "def", "popitem", "(", "self", ",", "last", "=", "True", ")", ":", "if", "not", "self", ":", "raise", "KeyError", "(", "'dictionary is empty'", ")", "key", "=", "next", "(", "reversed", "(", "self", ")", "if", "last", "else", "iter", "(", "self", ")"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
OrderedDict.fromkeys
OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None.
third_party/stdlib/collections.py
def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self
def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self
[ "OD", ".", "fromkeys", "(", "S", "[", "v", "]", ")", "-", ">", "New", "ordered", "dictionary", "with", "keys", "from", "S", ".", "If", "not", "specified", "the", "value", "defaults", "to", "None", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L219-L227
[ "def", "fromkeys", "(", "cls", ",", "iterable", ",", "value", "=", "None", ")", ":", "self", "=", "cls", "(", ")", "for", "key", "in", "iterable", ":", "self", "[", "key", "]", "=", "value", "return", "self" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Counter.update
Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4
third_party/stdlib/collections.py
def update(*args, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if not args: raise TypeError("descriptor 'update' of 'Counter' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds)
def update(*args, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if not args: raise TypeError("descriptor 'update' of 'Counter' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds)
[ "Like", "dict", ".", "update", "()", "but", "add", "counts", "instead", "of", "replacing", "them", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L538-L579
[ "def", "update", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# The regular dict.update() operation makes no sense here because the", "# replace behavior results in the some of original untouched counts", "# being mixed-in with all of the other counts for a mismash that", "# doesn...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
Counter.subtract
Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1
third_party/stdlib/collections.py
def subtract(*args, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if not args: raise TypeError("descriptor 'subtract' of 'Counter' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: self_get = self.get if isinstance(iterable, Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds)
def subtract(*args, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if not args: raise TypeError("descriptor 'subtract' of 'Counter' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: self_get = self.get if isinstance(iterable, Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds)
[ "Like", "dict", ".", "update", "()", "but", "subtracts", "counts", "instead", "of", "replacing", "them", ".", "Counts", "can", "be", "reduced", "below", "zero", ".", "Both", "the", "inputs", "and", "outputs", "are", "allowed", "to", "contain", "zero", "and...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L581-L614
[ "def", "subtract", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "not", "args", ":", "raise", "TypeError", "(", "\"descriptor 'subtract' of 'Counter' object \"", "\"needs an argument\"", ")", "self", "=", "args", "[", "0", "]", "args", "=", "args"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MD5Type.digest
Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes.
third_party/pypy/_md5.py
def digest(self): """Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes. """ A = self.A B = self.B C = self.C D = self.D input = [] + self.input count = [] + self.count index = (self.count[0] >> 3) & 0x3f if index < 56: padLen = 56 - index else: padLen = 120 - index padding = [b'\200'] + [b'\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2long(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = struct.pack("<IIII", self.A, self.B, self.C, self.D) self.A = A self.B = B self.C = C self.D = D self.input = input self.count = count return digest
def digest(self): """Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes. """ A = self.A B = self.B C = self.C D = self.D input = [] + self.input count = [] + self.count index = (self.count[0] >> 3) & 0x3f if index < 56: padLen = 56 - index else: padLen = 120 - index padding = [b'\200'] + [b'\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2long(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = struct.pack("<IIII", self.A, self.B, self.C, self.D) self.A = A self.B = B self.C = C self.D = D self.input = input self.count = count return digest
[ "Terminate", "the", "message", "-", "digest", "computation", "and", "return", "digest", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_md5.py#L297-L337
[ "def", "digest", "(", "self", ")", ":", "A", "=", "self", ".", "A", "B", "=", "self", ".", "B", "C", "=", "self", ".", "C", "D", "=", "self", ".", "D", "input", "=", "[", "]", "+", "self", ".", "input", "count", "=", "[", "]", "+", "self"...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
MD5Type.copy
Return a clone object. Return a copy ('clone') of the md5 object. This can be used to efficiently compute the digests of strings that share a common initial substring.
third_party/pypy/_md5.py
def copy(self): """Return a clone object. Return a copy ('clone') of the md5 object. This can be used to efficiently compute the digests of strings that share a common initial substring. """ if 0: # set this to 1 to make the flow space crash return copy.deepcopy(self) clone = self.__class__() clone.length = self.length clone.count = [] + self.count[:] clone.input = [] + self.input clone.A = self.A clone.B = self.B clone.C = self.C clone.D = self.D return clone
def copy(self): """Return a clone object. Return a copy ('clone') of the md5 object. This can be used to efficiently compute the digests of strings that share a common initial substring. """ if 0: # set this to 1 to make the flow space crash return copy.deepcopy(self) clone = self.__class__() clone.length = self.length clone.count = [] + self.count[:] clone.input = [] + self.input clone.A = self.A clone.B = self.B clone.C = self.C clone.D = self.D return clone
[ "Return", "a", "clone", "object", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_md5.py#L350-L367
[ "def", "copy", "(", "self", ")", ":", "if", "0", ":", "# set this to 1 to make the flow space crash", "return", "copy", ".", "deepcopy", "(", "self", ")", "clone", "=", "self", ".", "__class__", "(", ")", "clone", ".", "length", "=", "self", ".", "length",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
compile
Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern object. Actual compilation to opcodes happens in sre_compile.
third_party/pypy/_sre.py
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): """Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern object. Actual compilation to opcodes happens in sre_compile.""" return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): """Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern object. Actual compilation to opcodes happens in sre_compile.""" return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
[ "Compiles", "(", "or", "rather", "just", "converts", ")", "a", "pattern", "descriptor", "to", "a", "SRE_Pattern", "object", ".", "Actual", "compilation", "to", "opcodes", "happens", "in", "sre_compile", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L40-L43
[ "def", "compile", "(", "pattern", ",", "flags", ",", "code", ",", "groups", "=", "0", ",", "groupindex", "=", "{", "}", ",", "indexgroup", "=", "[", "None", "]", ")", ":", "return", "SRE_Pattern", "(", "pattern", ",", "flags", ",", "code", ",", "gr...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.search
Scan through string looking for a location where this regular expression produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern.
third_party/pypy/_sre.py
def search(self, string, pos=0, endpos=sys.maxint): """Scan through string looking for a location where this regular expression produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern.""" state = _State(string, pos, endpos, self.flags) if state.search(self._code): return SRE_Match(self, state) else: return None
def search(self, string, pos=0, endpos=sys.maxint): """Scan through string looking for a location where this regular expression produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern.""" state = _State(string, pos, endpos, self.flags) if state.search(self._code): return SRE_Match(self, state) else: return None
[ "Scan", "through", "string", "looking", "for", "a", "location", "where", "this", "regular", "expression", "produces", "a", "match", "and", "return", "a", "corresponding", "MatchObject", "instance", ".", "Return", "None", "if", "no", "position", "in", "the", "s...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L74-L83
[ "def", "search", "(", "self", ",", "string", ",", "pos", "=", "0", ",", "endpos", "=", "sys", ".", "maxint", ")", ":", "state", "=", "_State", "(", "string", ",", "pos", ",", "endpos", ",", "self", ".", "flags", ")", "if", "state", ".", "search",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.findall
Return a list of all non-overlapping matches of pattern in string.
third_party/pypy/_sre.py
def findall(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" matchlist = [] state = _State(string, pos, endpos, self.flags) while state.start <= state.end: state.reset() state.string_position = state.start if not state.search(self._code): break match = SRE_Match(self, state) if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: item = match.groups("") matchlist.append(item) if state.string_position == state.start: state.start += 1 else: state.start = state.string_position return matchlist
def findall(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" matchlist = [] state = _State(string, pos, endpos, self.flags) while state.start <= state.end: state.reset() state.string_position = state.start if not state.search(self._code): break match = SRE_Match(self, state) if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: item = match.groups("") matchlist.append(item) if state.string_position == state.start: state.start += 1 else: state.start = state.string_position return matchlist
[ "Return", "a", "list", "of", "all", "non", "-", "overlapping", "matches", "of", "pattern", "in", "string", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L85-L104
[ "def", "findall", "(", "self", ",", "string", ",", "pos", "=", "0", ",", "endpos", "=", "sys", ".", "maxint", ")", ":", "matchlist", "=", "[", "]", "state", "=", "_State", "(", "string", ",", "pos", ",", "endpos", ",", "self", ".", "flags", ")", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.sub
Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl.
third_party/pypy/_sre.py
def sub(self, repl, string, count=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl.""" return self._subx(repl, string, count, False)
def sub(self, repl, string, count=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl.""" return self._subx(repl, string, count, False)
[ "Return", "the", "string", "obtained", "by", "replacing", "the", "leftmost", "non", "-", "overlapping", "occurrences", "of", "pattern", "in", "string", "by", "the", "replacement", "repl", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L144-L147
[ "def", "sub", "(", "self", ",", "repl", ",", "string", ",", "count", "=", "0", ")", ":", "return", "self", ".", "_subx", "(", "repl", ",", "string", ",", "count", ",", "False", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.subn
Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl.
third_party/pypy/_sre.py
def subn(self, repl, string, count=0): """Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl.""" return self._subx(repl, string, count, True)
def subn(self, repl, string, count=0): """Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl.""" return self._subx(repl, string, count, True)
[ "Return", "the", "tuple", "(", "new_string", "number_of_subs_made", ")", "found", "by", "replacing", "the", "leftmost", "non", "-", "overlapping", "occurrences", "of", "pattern", "with", "the", "replacement", "repl", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L149-L153
[ "def", "subn", "(", "self", ",", "repl", ",", "string", ",", "count", "=", "0", ")", ":", "return", "self", ".", "_subx", "(", "repl", ",", "string", ",", "count", ",", "True", ")" ]
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.split
Split string by the occurrences of pattern.
third_party/pypy/_sre.py
def split(self, string, maxsplit=0): """Split string by the occurrences of pattern.""" splitlist = [] state = _State(string, 0, sys.maxint, self.flags) n = 0 last = state.start while not maxsplit or n < maxsplit: state.reset() state.string_position = state.start if not state.search(self._code): break if state.start == state.string_position: # zero-width match if last == state.end: # or end of string break state.start += 1 continue splitlist.append(string[last:state.start]) # add groups (if any) if self.groups: match = SRE_Match(self, state) # TODO: Use .extend once it is implemented. # splitlist.extend(list(match.groups(None))) splitlist += (list(match.groups(None))) n += 1 last = state.start = state.string_position splitlist.append(string[last:state.end]) return splitlist
def split(self, string, maxsplit=0): """Split string by the occurrences of pattern.""" splitlist = [] state = _State(string, 0, sys.maxint, self.flags) n = 0 last = state.start while not maxsplit or n < maxsplit: state.reset() state.string_position = state.start if not state.search(self._code): break if state.start == state.string_position: # zero-width match if last == state.end: # or end of string break state.start += 1 continue splitlist.append(string[last:state.start]) # add groups (if any) if self.groups: match = SRE_Match(self, state) # TODO: Use .extend once it is implemented. # splitlist.extend(list(match.groups(None))) splitlist += (list(match.groups(None))) n += 1 last = state.start = state.string_position splitlist.append(string[last:state.end]) return splitlist
[ "Split", "string", "by", "the", "occurrences", "of", "pattern", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L155-L181
[ "def", "split", "(", "self", ",", "string", ",", "maxsplit", "=", "0", ")", ":", "splitlist", "=", "[", "]", "state", "=", "_State", "(", "string", ",", "0", ",", "sys", ".", "maxint", ",", "self", ".", "flags", ")", "n", "=", "0", "last", "=",...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Pattern.finditer
Return a list of all non-overlapping matches of pattern in string.
third_party/pypy/_sre.py
def finditer(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" scanner = self.scanner(string, pos, endpos) return iter(scanner.search, None)
def finditer(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" scanner = self.scanner(string, pos, endpos) return iter(scanner.search, None)
[ "Return", "a", "list", "of", "all", "non", "-", "overlapping", "matches", "of", "pattern", "in", "string", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L183-L186
[ "def", "finditer", "(", "self", ",", "string", ",", "pos", "=", "0", ",", "endpos", "=", "sys", ".", "maxint", ")", ":", "scanner", "=", "self", ".", "scanner", "(", "string", ",", "pos", ",", "endpos", ")", "return", "iter", "(", "scanner", ".", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Match._create_regs
Creates a tuple of index pairs representing matched groups.
third_party/pypy/_sre.py
def _create_regs(self, state): """Creates a tuple of index pairs representing matched groups.""" regs = [(state.start, state.string_position)] for group in range(self.re.groups): mark_index = 2 * group if mark_index + 1 < len(state.marks) \ and state.marks[mark_index] is not None \ and state.marks[mark_index + 1] is not None: regs.append((state.marks[mark_index], state.marks[mark_index + 1])) else: regs.append((-1, -1)) return tuple(regs)
def _create_regs(self, state): """Creates a tuple of index pairs representing matched groups.""" regs = [(state.start, state.string_position)] for group in range(self.re.groups): mark_index = 2 * group if mark_index + 1 < len(state.marks) \ and state.marks[mark_index] is not None \ and state.marks[mark_index + 1] is not None: regs.append((state.marks[mark_index], state.marks[mark_index + 1])) else: regs.append((-1, -1)) return tuple(regs)
[ "Creates", "a", "tuple", "of", "index", "pairs", "representing", "matched", "groups", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L246-L257
[ "def", "_create_regs", "(", "self", ",", "state", ")", ":", "regs", "=", "[", "(", "state", ".", "start", ",", "state", ".", "string_position", ")", "]", "for", "group", "in", "range", "(", "self", ".", "re", ".", "groups", ")", ":", "mark_index", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Match.groups
Returns a tuple containing all the subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).
third_party/pypy/_sre.py
def groups(self, default=None): """Returns a tuple containing all the subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groups = [] for indices in self.regs[1:]: if indices[0] >= 0: groups.append(self.string[indices[0]:indices[1]]) else: groups.append(default) return tuple(groups)
def groups(self, default=None): """Returns a tuple containing all the subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groups = [] for indices in self.regs[1:]: if indices[0] >= 0: groups.append(self.string[indices[0]:indices[1]]) else: groups.append(default) return tuple(groups)
[ "Returns", "a", "tuple", "containing", "all", "the", "subgroups", "of", "the", "match", ".", "The", "default", "argument", "is", "used", "for", "groups", "that", "did", "not", "participate", "in", "the", "match", "(", "defaults", "to", "None", ")", "." ]
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L296-L306
[ "def", "groups", "(", "self", ",", "default", "=", "None", ")", ":", "groups", "=", "[", "]", "for", "indices", "in", "self", ".", "regs", "[", "1", ":", "]", ":", "if", "indices", "[", "0", "]", ">=", "0", ":", "groups", ".", "append", "(", ...
3ec87959189cfcdeae82eb68a47648ac25ceb10b
valid
SRE_Match.groupdict
Return a dictionary containing all the named subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).
third_party/pypy/_sre.py
def groupdict(self, default=None): """Return a dictionary containing all the named subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groupdict = {} for key, value in self.re.groupindex.items(): groupdict[key] = self._get_slice(value, default) return groupdict
def groupdict(self, default=None): """Return a dictionary containing all the named subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groupdict = {} for key, value in self.re.groupindex.items(): groupdict[key] = self._get_slice(value, default) return groupdict
[ "Return", "a", "dictionary", "containing", "all", "the", "named", "subgroups", "of", "the", "match", ".", "The", "default", "argument", "is", "used", "for", "groups", "that", "did", "not", "participate", "in", "the", "match", "(", "defaults", "to", "None", ...
google/grumpy
python
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L308-L315
[ "def", "groupdict", "(", "self", ",", "default", "=", "None", ")", ":", "groupdict", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "re", ".", "groupindex", ".", "items", "(", ")", ":", "groupdict", "[", "key", "]", "=", "self", "....
3ec87959189cfcdeae82eb68a47648ac25ceb10b