id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
8,600
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/data.py
|
YamlLexer.set_block_scalar_indent
|
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
python
|
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
[
"def",
"set_block_scalar_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"context",
".",
"block_scalar_indent",
"=",
"None",
"if",
"not",
"text",
":",
"return",
"increment",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"increment",
":",
"current_indent",
"=",
"max",
"(",
"context",
".",
"indent",
",",
"0",
")",
"increment",
"=",
"int",
"(",
"increment",
")",
"context",
".",
"block_scalar_indent",
"=",
"current_indent",
"+",
"increment",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Set an explicit indentation level for a block scalar.
|
[
"Set",
"an",
"explicit",
"indentation",
"level",
"for",
"a",
"block",
"scalar",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L103-L118
|
8,601
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/data.py
|
YamlLexer.parse_block_scalar_indent
|
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
python
|
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
[
"def",
"parse_block_scalar_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"if",
"context",
".",
"block_scalar_indent",
"is",
"None",
":",
"if",
"len",
"(",
"text",
")",
"<=",
"max",
"(",
"context",
".",
"indent",
",",
"0",
")",
":",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"return",
"context",
".",
"block_scalar_indent",
"=",
"len",
"(",
"text",
")",
"else",
":",
"if",
"len",
"(",
"text",
")",
"<",
"context",
".",
"block_scalar_indent",
":",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"context",
".",
"stack",
".",
"pop",
"(",
")",
"return",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Process indentation spaces in a block scalar.
|
[
"Process",
"indentation",
"spaces",
"in",
"a",
"block",
"scalar",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L137-L155
|
8,602
|
wakatime/wakatime
|
wakatime/packages/requests/models.py
|
Response.content
|
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
|
python
|
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
|
[
"def",
"content",
"(",
"self",
")",
":",
"if",
"self",
".",
"_content",
"is",
"False",
":",
"# Read the contents.",
"if",
"self",
".",
"_content_consumed",
":",
"raise",
"RuntimeError",
"(",
"'The content for this response was already consumed'",
")",
"if",
"self",
".",
"status_code",
"==",
"0",
"or",
"self",
".",
"raw",
"is",
"None",
":",
"self",
".",
"_content",
"=",
"None",
"else",
":",
"self",
".",
"_content",
"=",
"bytes",
"(",
")",
".",
"join",
"(",
"self",
".",
"iter_content",
"(",
"CONTENT_CHUNK_SIZE",
")",
")",
"or",
"bytes",
"(",
")",
"self",
".",
"_content_consumed",
"=",
"True",
"# don't need to release the connection; that's been handled by urllib3",
"# since we exhausted the data.",
"return",
"self",
".",
"_content"
] |
Content of the response, in bytes.
|
[
"Content",
"of",
"the",
"response",
"in",
"bytes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/models.py#L811-L828
|
8,603
|
wakatime/wakatime
|
wakatime/packages/simplejson/decoder.py
|
py_scanstring
|
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
|
python
|
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
|
[
"def",
"py_scanstring",
"(",
"s",
",",
"end",
",",
"encoding",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"_b",
"=",
"BACKSLASH",
",",
"_m",
"=",
"STRINGCHUNK",
".",
"match",
",",
"_join",
"=",
"u",
"(",
"''",
")",
".",
"join",
",",
"_PY3",
"=",
"PY3",
",",
"_maxunicode",
"=",
"sys",
".",
"maxunicode",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"DEFAULT_ENCODING",
"chunks",
"=",
"[",
"]",
"_append",
"=",
"chunks",
".",
"append",
"begin",
"=",
"end",
"-",
"1",
"while",
"1",
":",
"chunk",
"=",
"_m",
"(",
"s",
",",
"end",
")",
"if",
"chunk",
"is",
"None",
":",
"raise",
"JSONDecodeError",
"(",
"\"Unterminated string starting at\"",
",",
"s",
",",
"begin",
")",
"end",
"=",
"chunk",
".",
"end",
"(",
")",
"content",
",",
"terminator",
"=",
"chunk",
".",
"groups",
"(",
")",
"# Content is contains zero or more unescaped string characters",
"if",
"content",
":",
"if",
"not",
"_PY3",
"and",
"not",
"isinstance",
"(",
"content",
",",
"text_type",
")",
":",
"content",
"=",
"text_type",
"(",
"content",
",",
"encoding",
")",
"_append",
"(",
"content",
")",
"# Terminator is the end of string, a literal control character,",
"# or a backslash denoting that an escape sequence follows",
"if",
"terminator",
"==",
"'\"'",
":",
"break",
"elif",
"terminator",
"!=",
"'\\\\'",
":",
"if",
"strict",
":",
"msg",
"=",
"\"Invalid control character %r at\"",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"else",
":",
"_append",
"(",
"terminator",
")",
"continue",
"try",
":",
"esc",
"=",
"s",
"[",
"end",
"]",
"except",
"IndexError",
":",
"raise",
"JSONDecodeError",
"(",
"\"Unterminated string starting at\"",
",",
"s",
",",
"begin",
")",
"# If not a unicode escape sequence, must be in the lookup table",
"if",
"esc",
"!=",
"'u'",
":",
"try",
":",
"char",
"=",
"_b",
"[",
"esc",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"\"Invalid \\\\X escape sequence %r\"",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"end",
"+=",
"1",
"else",
":",
"# Unicode escape sequence",
"msg",
"=",
"\"Invalid \\\\uXXXX escape sequence\"",
"esc",
"=",
"s",
"[",
"end",
"+",
"1",
":",
"end",
"+",
"5",
"]",
"escX",
"=",
"esc",
"[",
"1",
":",
"2",
"]",
"if",
"len",
"(",
"esc",
")",
"!=",
"4",
"or",
"escX",
"==",
"'x'",
"or",
"escX",
"==",
"'X'",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
"-",
"1",
")",
"try",
":",
"uni",
"=",
"int",
"(",
"esc",
",",
"16",
")",
"except",
"ValueError",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
"-",
"1",
")",
"end",
"+=",
"5",
"# Check for surrogate pair on UCS-4 systems",
"# Note that this will join high/low surrogate pairs",
"# but will also pass unpaired surrogates through",
"if",
"(",
"_maxunicode",
">",
"65535",
"and",
"uni",
"&",
"0xfc00",
"==",
"0xd800",
"and",
"s",
"[",
"end",
":",
"end",
"+",
"2",
"]",
"==",
"'\\\\u'",
")",
":",
"esc2",
"=",
"s",
"[",
"end",
"+",
"2",
":",
"end",
"+",
"6",
"]",
"escX",
"=",
"esc2",
"[",
"1",
":",
"2",
"]",
"if",
"len",
"(",
"esc2",
")",
"==",
"4",
"and",
"not",
"(",
"escX",
"==",
"'x'",
"or",
"escX",
"==",
"'X'",
")",
":",
"try",
":",
"uni2",
"=",
"int",
"(",
"esc2",
",",
"16",
")",
"except",
"ValueError",
":",
"raise",
"JSONDecodeError",
"(",
"msg",
",",
"s",
",",
"end",
")",
"if",
"uni2",
"&",
"0xfc00",
"==",
"0xdc00",
":",
"uni",
"=",
"0x10000",
"+",
"(",
"(",
"(",
"uni",
"-",
"0xd800",
")",
"<<",
"10",
")",
"|",
"(",
"uni2",
"-",
"0xdc00",
")",
")",
"end",
"+=",
"6",
"char",
"=",
"unichr",
"(",
"uni",
")",
"# Append the unescaped character",
"_append",
"(",
"char",
")",
"return",
"_join",
"(",
"chunks",
")",
",",
"end"
] |
Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote.
|
[
"Scan",
"the",
"string",
"s",
"for",
"a",
"JSON",
"string",
".",
"End",
"is",
"the",
"index",
"of",
"the",
"character",
"in",
"s",
"after",
"the",
"quote",
"that",
"started",
"the",
"JSON",
"string",
".",
"Unescapes",
"all",
"valid",
"JSON",
"string",
"escape",
"sequences",
"and",
"raises",
"ValueError",
"on",
"attempt",
"to",
"decode",
"an",
"invalid",
"string",
".",
"If",
"strict",
"is",
"False",
"then",
"literal",
"control",
"characters",
"are",
"allowed",
"in",
"the",
"string",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/simplejson/decoder.py#L49-L133
|
8,604
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter._get_css_class
|
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
|
python
|
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
|
[
"def",
"_get_css_class",
"(",
"self",
",",
"ttype",
")",
":",
"ttypeclass",
"=",
"_get_ttype_class",
"(",
"ttype",
")",
"if",
"ttypeclass",
":",
"return",
"self",
".",
"classprefix",
"+",
"ttypeclass",
"return",
"''"
] |
Return the css class of this token type prefixed with
the classprefix option.
|
[
"Return",
"the",
"css",
"class",
"of",
"this",
"token",
"type",
"prefixed",
"with",
"the",
"classprefix",
"option",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L430-L436
|
8,605
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter._get_css_classes
|
def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls
|
python
|
def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls
|
[
"def",
"_get_css_classes",
"(",
"self",
",",
"ttype",
")",
":",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"while",
"ttype",
"not",
"in",
"STANDARD_TYPES",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"+",
"' '",
"+",
"cls",
"return",
"cls"
] |
Return the css classes of this token type prefixed with
the classprefix option.
|
[
"Return",
"the",
"css",
"classes",
"of",
"this",
"token",
"type",
"prefixed",
"with",
"the",
"classprefix",
"option",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L438-L445
|
8,606
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter.get_style_defs
|
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
|
python
|
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
|
[
"def",
"get_style_defs",
"(",
"self",
",",
"arg",
"=",
"None",
")",
":",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"(",
"'cssclass'",
"in",
"self",
".",
"options",
"and",
"'.'",
"+",
"self",
".",
"cssclass",
"or",
"''",
")",
"if",
"isinstance",
"(",
"arg",
",",
"string_types",
")",
":",
"args",
"=",
"[",
"arg",
"]",
"else",
":",
"args",
"=",
"list",
"(",
"arg",
")",
"def",
"prefix",
"(",
"cls",
")",
":",
"if",
"cls",
":",
"cls",
"=",
"'.'",
"+",
"cls",
"tmp",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"tmp",
".",
"append",
"(",
"(",
"arg",
"and",
"arg",
"+",
"' '",
"or",
"''",
")",
"+",
"cls",
")",
"return",
"', '",
".",
"join",
"(",
"tmp",
")",
"styles",
"=",
"[",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"for",
"cls",
",",
"(",
"style",
",",
"ttype",
",",
"level",
")",
"in",
"iteritems",
"(",
"self",
".",
"class2style",
")",
"if",
"cls",
"and",
"style",
"]",
"styles",
".",
"sort",
"(",
")",
"lines",
"=",
"[",
"'%s { %s } /* %s */'",
"%",
"(",
"prefix",
"(",
"cls",
")",
",",
"style",
",",
"repr",
"(",
"ttype",
")",
"[",
"6",
":",
"]",
")",
"for",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"in",
"styles",
"]",
"if",
"arg",
"and",
"not",
"self",
".",
"nobackground",
"and",
"self",
".",
"style",
".",
"background_color",
"is",
"not",
"None",
":",
"text_style",
"=",
"''",
"if",
"Text",
"in",
"self",
".",
"ttype2class",
":",
"text_style",
"=",
"' '",
"+",
"self",
".",
"class2style",
"[",
"self",
".",
"ttype2class",
"[",
"Text",
"]",
"]",
"[",
"0",
"]",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s { background: %s;%s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"background_color",
",",
"text_style",
")",
")",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s.hll { background-color: %s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"highlight_color",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] |
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
|
[
"Return",
"CSS",
"style",
"definitions",
"for",
"the",
"classes",
"produced",
"by",
"the",
"current",
"highlighting",
"style",
".",
"arg",
"can",
"be",
"a",
"string",
"or",
"list",
"of",
"selectors",
"to",
"insert",
"before",
"the",
"token",
"type",
"classes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L471-L508
|
8,607
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter._format_lines
|
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
|
python
|
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
|
[
"def",
"_format_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"nocls",
"=",
"self",
".",
"noclasses",
"lsep",
"=",
"self",
".",
"lineseparator",
"# for <span style=\"\"> lookup only",
"getcls",
"=",
"self",
".",
"ttype2class",
".",
"get",
"c2s",
"=",
"self",
".",
"class2style",
"escape_table",
"=",
"_escape_html_table",
"tagsfile",
"=",
"self",
".",
"tagsfile",
"lspan",
"=",
"''",
"line",
"=",
"[",
"]",
"for",
"ttype",
",",
"value",
"in",
"tokensource",
":",
"if",
"nocls",
":",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"while",
"cclass",
"is",
"None",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"cspan",
"=",
"cclass",
"and",
"'<span style=\"%s\">'",
"%",
"c2s",
"[",
"cclass",
"]",
"[",
"0",
"]",
"or",
"''",
"else",
":",
"cls",
"=",
"self",
".",
"_get_css_classes",
"(",
"ttype",
")",
"cspan",
"=",
"cls",
"and",
"'<span class=\"%s\">'",
"%",
"cls",
"or",
"''",
"parts",
"=",
"value",
".",
"translate",
"(",
"escape_table",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"tagsfile",
"and",
"ttype",
"in",
"Token",
".",
"Name",
":",
"filename",
",",
"linenumber",
"=",
"self",
".",
"_lookup_ctag",
"(",
"value",
")",
"if",
"linenumber",
":",
"base",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"if",
"base",
":",
"base",
"+=",
"'/'",
"filename",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"url",
"=",
"self",
".",
"tagurlformat",
"%",
"{",
"'path'",
":",
"base",
",",
"'fname'",
":",
"filename",
",",
"'fext'",
":",
"extension",
"}",
"parts",
"[",
"0",
"]",
"=",
"\"<a href=\\\"%s#%s-%d\\\">%s\"",
"%",
"(",
"url",
",",
"self",
".",
"lineanchors",
",",
"linenumber",
",",
"parts",
"[",
"0",
"]",
")",
"parts",
"[",
"-",
"1",
"]",
"=",
"parts",
"[",
"-",
"1",
"]",
"+",
"\"</a>\"",
"# for all but the last line",
"for",
"part",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"if",
"line",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"# both are the same",
"line",
".",
"extend",
"(",
"(",
"part",
",",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")",
"line",
"=",
"[",
"]",
"elif",
"part",
":",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"(",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"yield",
"1",
",",
"lsep",
"# for the last line",
"if",
"line",
"and",
"parts",
"[",
"-",
"1",
"]",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
")",
")",
"lspan",
"=",
"cspan",
"else",
":",
"line",
".",
"append",
"(",
"parts",
"[",
"-",
"1",
"]",
")",
"elif",
"parts",
"[",
"-",
"1",
"]",
":",
"line",
"=",
"[",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
"]",
"lspan",
"=",
"cspan",
"# else we neither have to open a new span nor set lspan",
"if",
"line",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")"
] |
Just format the tokens, without any wrapping tags.
Yield individual lines.
|
[
"Just",
"format",
"the",
"tokens",
"without",
"any",
"wrapping",
"tags",
".",
"Yield",
"individual",
"lines",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L712-L781
|
8,608
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter._highlight_lines
|
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
|
python
|
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
|
[
"def",
"_highlight_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"hls",
"=",
"self",
".",
"hl_lines",
"for",
"i",
",",
"(",
"t",
",",
"value",
")",
"in",
"enumerate",
"(",
"tokensource",
")",
":",
"if",
"t",
"!=",
"1",
":",
"yield",
"t",
",",
"value",
"if",
"i",
"+",
"1",
"in",
"hls",
":",
"# i + 1 because Python indexes start at 0",
"if",
"self",
".",
"noclasses",
":",
"style",
"=",
"''",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"style",
"=",
"(",
"' style=\"background-color: %s\"'",
"%",
"(",
"self",
".",
"style",
".",
"highlight_color",
",",
")",
")",
"yield",
"1",
",",
"'<span%s>%s</span>'",
"%",
"(",
"style",
",",
"value",
")",
"else",
":",
"yield",
"1",
",",
"'<span class=\"hll\">%s</span>'",
"%",
"value",
"else",
":",
"yield",
"1",
",",
"value"
] |
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
|
[
"Highlighted",
"the",
"lines",
"specified",
"in",
"the",
"hl_lines",
"option",
"by",
"post",
"-",
"processing",
"the",
"token",
"stream",
"coming",
"from",
"_format_lines",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L790-L810
|
8,609
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/html.py
|
HtmlFormatter.format_unencoded
|
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
python
|
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
[
"def",
"format_unencoded",
"(",
"self",
",",
"tokensource",
",",
"outfile",
")",
":",
"source",
"=",
"self",
".",
"_format_lines",
"(",
"tokensource",
")",
"if",
"self",
".",
"hl_lines",
":",
"source",
"=",
"self",
".",
"_highlight_lines",
"(",
"source",
")",
"if",
"not",
"self",
".",
"nowrap",
":",
"if",
"self",
".",
"linenos",
"==",
"2",
":",
"source",
"=",
"self",
".",
"_wrap_inlinelinenos",
"(",
"source",
")",
"if",
"self",
".",
"lineanchors",
":",
"source",
"=",
"self",
".",
"_wrap_lineanchors",
"(",
"source",
")",
"if",
"self",
".",
"linespans",
":",
"source",
"=",
"self",
".",
"_wrap_linespans",
"(",
"source",
")",
"source",
"=",
"self",
".",
"wrap",
"(",
"source",
",",
"outfile",
")",
"if",
"self",
".",
"linenos",
"==",
"1",
":",
"source",
"=",
"self",
".",
"_wrap_tablelinenos",
"(",
"source",
")",
"if",
"self",
".",
"full",
":",
"source",
"=",
"self",
".",
"_wrap_full",
"(",
"source",
",",
"outfile",
")",
"for",
"t",
",",
"piece",
"in",
"source",
":",
"outfile",
".",
"write",
"(",
"piece",
")"
] |
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
|
[
"The",
"formatting",
"process",
"uses",
"several",
"nested",
"generators",
";",
"which",
"of",
"them",
"are",
"used",
"is",
"determined",
"by",
"the",
"user",
"s",
"options",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/html.py#L820-L851
|
8,610
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
bygroups
|
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
|
python
|
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
|
[
"def",
"bygroups",
"(",
"*",
"args",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"for",
"i",
",",
"action",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"action",
"is",
"None",
":",
"continue",
"elif",
"type",
"(",
"action",
")",
"is",
"_TokenType",
":",
"data",
"=",
"match",
".",
"group",
"(",
"i",
"+",
"1",
")",
"if",
"data",
":",
"yield",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
",",
"action",
",",
"data",
"else",
":",
"data",
"=",
"match",
".",
"group",
"(",
"i",
"+",
"1",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
"for",
"item",
"in",
"action",
"(",
"lexer",
",",
"_PseudoMatch",
"(",
"match",
".",
"start",
"(",
"i",
"+",
"1",
")",
",",
"data",
")",
",",
"ctx",
")",
":",
"if",
"item",
":",
"yield",
"item",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Callback that yields multiple actions for each group in the match.
|
[
"Callback",
"that",
"yields",
"multiple",
"actions",
"for",
"each",
"group",
"in",
"the",
"match",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L305-L328
|
8,611
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
using
|
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
|
python
|
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
|
[
"def",
"using",
"(",
"_other",
",",
"*",
"*",
"kwargs",
")",
":",
"gt_kwargs",
"=",
"{",
"}",
"if",
"'state'",
"in",
"kwargs",
":",
"s",
"=",
"kwargs",
".",
"pop",
"(",
"'state'",
")",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"gt_kwargs",
"[",
"'stack'",
"]",
"=",
"s",
"else",
":",
"gt_kwargs",
"[",
"'stack'",
"]",
"=",
"(",
"'root'",
",",
"s",
")",
"if",
"_other",
"is",
"this",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"# if keyword arguments are given the callback",
"# function has to create a new lexer instance",
"if",
"kwargs",
":",
"# XXX: cache that somehow",
"kwargs",
".",
"update",
"(",
"lexer",
".",
"options",
")",
"lx",
"=",
"lexer",
".",
"__class__",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"lx",
"=",
"lexer",
"s",
"=",
"match",
".",
"start",
"(",
")",
"for",
"i",
",",
"t",
",",
"v",
"in",
"lx",
".",
"get_tokens_unprocessed",
"(",
"match",
".",
"group",
"(",
")",
",",
"*",
"*",
"gt_kwargs",
")",
":",
"yield",
"i",
"+",
"s",
",",
"t",
",",
"v",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"else",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"# XXX: cache that somehow",
"kwargs",
".",
"update",
"(",
"lexer",
".",
"options",
")",
"lx",
"=",
"_other",
"(",
"*",
"*",
"kwargs",
")",
"s",
"=",
"match",
".",
"start",
"(",
")",
"for",
"i",
",",
"t",
",",
"v",
"in",
"lx",
".",
"get_tokens_unprocessed",
"(",
"match",
".",
"group",
"(",
")",
",",
"*",
"*",
"gt_kwargs",
")",
":",
"yield",
"i",
"+",
"s",
",",
"t",
",",
"v",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
|
[
"Callback",
"that",
"processes",
"the",
"match",
"with",
"a",
"different",
"lexer",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L339-L386
|
8,612
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
do_insertions
|
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break
|
python
|
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break
|
[
"def",
"do_insertions",
"(",
"insertions",
",",
"tokens",
")",
":",
"insertions",
"=",
"iter",
"(",
"insertions",
")",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"# no insertions",
"for",
"item",
"in",
"tokens",
":",
"yield",
"item",
"return",
"realpos",
"=",
"None",
"insleft",
"=",
"True",
"# iterate over the token stream where we want to insert",
"# the tokens from the insertion list.",
"for",
"i",
",",
"t",
",",
"v",
"in",
"tokens",
":",
"# first iteration. store the postition of first item",
"if",
"realpos",
"is",
"None",
":",
"realpos",
"=",
"i",
"oldi",
"=",
"0",
"while",
"insleft",
"and",
"i",
"+",
"len",
"(",
"v",
")",
">=",
"index",
":",
"tmpval",
"=",
"v",
"[",
"oldi",
":",
"index",
"-",
"i",
"]",
"yield",
"realpos",
",",
"t",
",",
"tmpval",
"realpos",
"+=",
"len",
"(",
"tmpval",
")",
"for",
"it_index",
",",
"it_token",
",",
"it_value",
"in",
"itokens",
":",
"yield",
"realpos",
",",
"it_token",
",",
"it_value",
"realpos",
"+=",
"len",
"(",
"it_value",
")",
"oldi",
"=",
"index",
"-",
"i",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"insleft",
"=",
"False",
"break",
"# not strictly necessary",
"yield",
"realpos",
",",
"t",
",",
"v",
"[",
"oldi",
":",
"]",
"realpos",
"+=",
"len",
"(",
"v",
")",
"-",
"oldi",
"# leftover tokens",
"while",
"insleft",
":",
"# no normal tokens, set realpos to zero",
"realpos",
"=",
"realpos",
"or",
"0",
"for",
"p",
",",
"t",
",",
"v",
"in",
"itokens",
":",
"yield",
"realpos",
",",
"t",
",",
"v",
"realpos",
"+=",
"len",
"(",
"v",
")",
"try",
":",
"index",
",",
"itokens",
"=",
"next",
"(",
"insertions",
")",
"except",
"StopIteration",
":",
"insleft",
"=",
"False",
"break"
] |
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
|
[
"Helper",
"for",
"lexers",
"which",
"must",
"combine",
"the",
"results",
"of",
"several",
"sublexers",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L758-L818
|
8,613
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta._process_regex
|
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
|
python
|
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
|
[
"def",
"_process_regex",
"(",
"cls",
",",
"regex",
",",
"rflags",
",",
"state",
")",
":",
"if",
"isinstance",
"(",
"regex",
",",
"Future",
")",
":",
"regex",
"=",
"regex",
".",
"get",
"(",
")",
"return",
"re",
".",
"compile",
"(",
"regex",
",",
"rflags",
")",
".",
"match"
] |
Preprocess the regular expression component of a token definition.
|
[
"Preprocess",
"the",
"regular",
"expression",
"component",
"of",
"a",
"token",
"definition",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L423-L427
|
8,614
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta._process_token
|
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
|
python
|
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
|
[
"def",
"_process_token",
"(",
"cls",
",",
"token",
")",
":",
"assert",
"type",
"(",
"token",
")",
"is",
"_TokenType",
"or",
"callable",
"(",
"token",
")",
",",
"'token type must be simple type or callable, not %r'",
"%",
"(",
"token",
",",
")",
"return",
"token"
] |
Preprocess the token component of a token definition.
|
[
"Preprocess",
"the",
"token",
"component",
"of",
"a",
"token",
"definition",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L429-L433
|
8,615
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta._process_new_state
|
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
|
python
|
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
|
[
"def",
"_process_new_state",
"(",
"cls",
",",
"new_state",
",",
"unprocessed",
",",
"processed",
")",
":",
"if",
"isinstance",
"(",
"new_state",
",",
"str",
")",
":",
"# an existing state",
"if",
"new_state",
"==",
"'#pop'",
":",
"return",
"-",
"1",
"elif",
"new_state",
"in",
"unprocessed",
":",
"return",
"(",
"new_state",
",",
")",
"elif",
"new_state",
"==",
"'#push'",
":",
"return",
"new_state",
"elif",
"new_state",
"[",
":",
"5",
"]",
"==",
"'#pop:'",
":",
"return",
"-",
"int",
"(",
"new_state",
"[",
"5",
":",
"]",
")",
"else",
":",
"assert",
"False",
",",
"'unknown new state %r'",
"%",
"new_state",
"elif",
"isinstance",
"(",
"new_state",
",",
"combined",
")",
":",
"# combine a new state from existing ones",
"tmp_state",
"=",
"'_tmp_%d'",
"%",
"cls",
".",
"_tmpname",
"cls",
".",
"_tmpname",
"+=",
"1",
"itokens",
"=",
"[",
"]",
"for",
"istate",
"in",
"new_state",
":",
"assert",
"istate",
"!=",
"new_state",
",",
"'circular state ref %r'",
"%",
"istate",
"itokens",
".",
"extend",
"(",
"cls",
".",
"_process_state",
"(",
"unprocessed",
",",
"processed",
",",
"istate",
")",
")",
"processed",
"[",
"tmp_state",
"]",
"=",
"itokens",
"return",
"(",
"tmp_state",
",",
")",
"elif",
"isinstance",
"(",
"new_state",
",",
"tuple",
")",
":",
"# push more than one state",
"for",
"istate",
"in",
"new_state",
":",
"assert",
"(",
"istate",
"in",
"unprocessed",
"or",
"istate",
"in",
"(",
"'#pop'",
",",
"'#push'",
")",
")",
",",
"'unknown new state '",
"+",
"istate",
"return",
"new_state",
"else",
":",
"assert",
"False",
",",
"'unknown new state def %r'",
"%",
"new_state"
] |
Preprocess the state transition action of a token definition.
|
[
"Preprocess",
"the",
"state",
"transition",
"action",
"of",
"a",
"token",
"definition",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L435-L468
|
8,616
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta._process_state
|
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
|
python
|
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
|
[
"def",
"_process_state",
"(",
"cls",
",",
"unprocessed",
",",
"processed",
",",
"state",
")",
":",
"assert",
"type",
"(",
"state",
")",
"is",
"str",
",",
"\"wrong state name %r\"",
"%",
"state",
"assert",
"state",
"[",
"0",
"]",
"!=",
"'#'",
",",
"\"invalid state name %r\"",
"%",
"state",
"if",
"state",
"in",
"processed",
":",
"return",
"processed",
"[",
"state",
"]",
"tokens",
"=",
"processed",
"[",
"state",
"]",
"=",
"[",
"]",
"rflags",
"=",
"cls",
".",
"flags",
"for",
"tdef",
"in",
"unprocessed",
"[",
"state",
"]",
":",
"if",
"isinstance",
"(",
"tdef",
",",
"include",
")",
":",
"# it's a state reference",
"assert",
"tdef",
"!=",
"state",
",",
"\"circular state reference %r\"",
"%",
"state",
"tokens",
".",
"extend",
"(",
"cls",
".",
"_process_state",
"(",
"unprocessed",
",",
"processed",
",",
"str",
"(",
"tdef",
")",
")",
")",
"continue",
"if",
"isinstance",
"(",
"tdef",
",",
"_inherit",
")",
":",
"# should be processed already, but may not in the case of:",
"# 1. the state has no counterpart in any parent",
"# 2. the state includes more than one 'inherit'",
"continue",
"if",
"isinstance",
"(",
"tdef",
",",
"default",
")",
":",
"new_state",
"=",
"cls",
".",
"_process_new_state",
"(",
"tdef",
".",
"state",
",",
"unprocessed",
",",
"processed",
")",
"tokens",
".",
"append",
"(",
"(",
"re",
".",
"compile",
"(",
"''",
")",
".",
"match",
",",
"None",
",",
"new_state",
")",
")",
"continue",
"assert",
"type",
"(",
"tdef",
")",
"is",
"tuple",
",",
"\"wrong rule def %r\"",
"%",
"tdef",
"try",
":",
"rex",
"=",
"cls",
".",
"_process_regex",
"(",
"tdef",
"[",
"0",
"]",
",",
"rflags",
",",
"state",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"ValueError",
"(",
"\"uncompilable regex %r in state %r of %r: %s\"",
"%",
"(",
"tdef",
"[",
"0",
"]",
",",
"state",
",",
"cls",
",",
"err",
")",
")",
"token",
"=",
"cls",
".",
"_process_token",
"(",
"tdef",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"tdef",
")",
"==",
"2",
":",
"new_state",
"=",
"None",
"else",
":",
"new_state",
"=",
"cls",
".",
"_process_new_state",
"(",
"tdef",
"[",
"2",
"]",
",",
"unprocessed",
",",
"processed",
")",
"tokens",
".",
"append",
"(",
"(",
"rex",
",",
"token",
",",
"new_state",
")",
")",
"return",
"tokens"
] |
Preprocess a single state definition.
|
[
"Preprocess",
"a",
"single",
"state",
"definition",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L470-L512
|
8,617
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta.process_tokendef
|
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
|
python
|
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
|
[
"def",
"process_tokendef",
"(",
"cls",
",",
"name",
",",
"tokendefs",
"=",
"None",
")",
":",
"processed",
"=",
"cls",
".",
"_all_tokens",
"[",
"name",
"]",
"=",
"{",
"}",
"tokendefs",
"=",
"tokendefs",
"or",
"cls",
".",
"tokens",
"[",
"name",
"]",
"for",
"state",
"in",
"list",
"(",
"tokendefs",
")",
":",
"cls",
".",
"_process_state",
"(",
"tokendefs",
",",
"processed",
",",
"state",
")",
"return",
"processed"
] |
Preprocess a dictionary of token definitions.
|
[
"Preprocess",
"a",
"dictionary",
"of",
"token",
"definitions",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L514-L520
|
8,618
|
wakatime/wakatime
|
wakatime/packages/pygments/lexer.py
|
RegexLexerMeta.get_tokendefs
|
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
|
python
|
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
|
[
"def",
"get_tokendefs",
"(",
"cls",
")",
":",
"tokens",
"=",
"{",
"}",
"inheritable",
"=",
"{",
"}",
"for",
"c",
"in",
"cls",
".",
"__mro__",
":",
"toks",
"=",
"c",
".",
"__dict__",
".",
"get",
"(",
"'tokens'",
",",
"{",
"}",
")",
"for",
"state",
",",
"items",
"in",
"iteritems",
"(",
"toks",
")",
":",
"curitems",
"=",
"tokens",
".",
"get",
"(",
"state",
")",
"if",
"curitems",
"is",
"None",
":",
"# N.b. because this is assigned by reference, sufficiently",
"# deep hierarchies are processed incrementally (e.g. for",
"# A(B), B(C), C(RegexLexer), B will be premodified so X(B)",
"# will not see any inherits in B).",
"tokens",
"[",
"state",
"]",
"=",
"items",
"try",
":",
"inherit_ndx",
"=",
"items",
".",
"index",
"(",
"inherit",
")",
"except",
"ValueError",
":",
"continue",
"inheritable",
"[",
"state",
"]",
"=",
"inherit_ndx",
"continue",
"inherit_ndx",
"=",
"inheritable",
".",
"pop",
"(",
"state",
",",
"None",
")",
"if",
"inherit_ndx",
"is",
"None",
":",
"continue",
"# Replace the \"inherit\" value with the items",
"curitems",
"[",
"inherit_ndx",
":",
"inherit_ndx",
"+",
"1",
"]",
"=",
"items",
"try",
":",
"# N.b. this is the index in items (that is, the superclass",
"# copy), so offset required when storing below.",
"new_inh_ndx",
"=",
"items",
".",
"index",
"(",
"inherit",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"inheritable",
"[",
"state",
"]",
"=",
"inherit_ndx",
"+",
"new_inh_ndx",
"return",
"tokens"
] |
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
|
[
"Merge",
"tokens",
"from",
"superclasses",
"in",
"MRO",
"order",
"returning",
"a",
"single",
"tokendef",
"dictionary",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexer.py#L522-L569
|
8,619
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
memorized_timedelta
|
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
|
python
|
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
|
[
"def",
"memorized_timedelta",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_timedelta_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"_timedelta_cache",
"[",
"seconds",
"]",
"=",
"delta",
"return",
"delta"
] |
Create only one instance of each distinct timedelta
|
[
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"timedelta"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L16-L23
|
8,620
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
memorized_datetime
|
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
|
python
|
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
|
[
"def",
"memorized_datetime",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_datetime_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"# NB. We can't just do datetime.utcfromtimestamp(seconds) as this",
"# fails with negative values under Windows (Bug #90096)",
"dt",
"=",
"_epoch",
"+",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"_datetime_cache",
"[",
"seconds",
"]",
"=",
"dt",
"return",
"dt"
] |
Create only one instance of each distinct datetime
|
[
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"datetime"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L27-L36
|
8,621
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
memorized_ttinfo
|
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
|
python
|
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
|
[
"def",
"memorized_ttinfo",
"(",
"*",
"args",
")",
":",
"try",
":",
"return",
"_ttinfo_cache",
"[",
"args",
"]",
"except",
"KeyError",
":",
"ttinfo",
"=",
"(",
"memorized_timedelta",
"(",
"args",
"[",
"0",
"]",
")",
",",
"memorized_timedelta",
"(",
"args",
"[",
"1",
"]",
")",
",",
"args",
"[",
"2",
"]",
")",
"_ttinfo_cache",
"[",
"args",
"]",
"=",
"ttinfo",
"return",
"ttinfo"
] |
Create only one instance of each distinct tuple
|
[
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"tuple"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L39-L50
|
8,622
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
unpickler
|
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
|
python
|
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
|
[
"def",
"unpickler",
"(",
"zone",
",",
"utcoffset",
"=",
"None",
",",
"dstoffset",
"=",
"None",
",",
"tzname",
"=",
"None",
")",
":",
"# Raises a KeyError if zone no longer exists, which should never happen",
"# and would be a bug.",
"tz",
"=",
"pytz",
".",
"timezone",
"(",
"zone",
")",
"# A StaticTzInfo - just return it",
"if",
"utcoffset",
"is",
"None",
":",
"return",
"tz",
"# This pickle was created from a DstTzInfo. We need to",
"# determine which of the list of tzinfo instances for this zone",
"# to use in order to restore the state of any datetime instances using",
"# it correctly.",
"utcoffset",
"=",
"memorized_timedelta",
"(",
"utcoffset",
")",
"dstoffset",
"=",
"memorized_timedelta",
"(",
"dstoffset",
")",
"try",
":",
"return",
"tz",
".",
"_tzinfos",
"[",
"(",
"utcoffset",
",",
"dstoffset",
",",
"tzname",
")",
"]",
"except",
"KeyError",
":",
"# The particular state requested in this timezone no longer exists.",
"# This indicates a corrupt pickle, or the timezone database has been",
"# corrected violently enough to make this particular",
"# (utcoffset,dstoffset) no longer exist in the zone, or the",
"# abbreviation has been changed.",
"pass",
"# See if we can find an entry differing only by tzname. Abbreviations",
"# get changed from the initial guess by the database maintainers to",
"# match reality when this information is discovered.",
"for",
"localized_tz",
"in",
"tz",
".",
"_tzinfos",
".",
"values",
"(",
")",
":",
"if",
"(",
"localized_tz",
".",
"_utcoffset",
"==",
"utcoffset",
"and",
"localized_tz",
".",
"_dst",
"==",
"dstoffset",
")",
":",
"return",
"localized_tz",
"# This (utcoffset, dstoffset) information has been removed from the",
"# zone. Add it back. This might occur when the database maintainers have",
"# corrected incorrect information. datetime instances using this",
"# incorrect information will continue to do so, exactly as they were",
"# before being pickled. This is purely an overly paranoid safety net - I",
"# doubt this will ever been needed in real life.",
"inf",
"=",
"(",
"utcoffset",
",",
"dstoffset",
",",
"tzname",
")",
"tz",
".",
"_tzinfos",
"[",
"inf",
"]",
"=",
"tz",
".",
"__class__",
"(",
"inf",
",",
"tz",
".",
"_tzinfos",
")",
"return",
"tz",
".",
"_tzinfos",
"[",
"inf",
"]"
] |
Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
|
[
"Factory",
"function",
"for",
"unpickling",
"pytz",
"tzinfo",
"instances",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L516-L564
|
8,623
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
DstTzInfo.utcoffset
|
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
|
python
|
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
|
[
"def",
"utcoffset",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"None",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_utcoffset",
"else",
":",
"return",
"self",
".",
"_utcoffset"
] |
See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
|
[
"See",
"datetime",
".",
"tzinfo",
".",
"utcoffset"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L382-L411
|
8,624
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
DstTzInfo.dst
|
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
|
python
|
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
|
[
"def",
"dst",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"None",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_dst",
"else",
":",
"return",
"self",
".",
"_dst"
] |
See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
|
[
"See",
"datetime",
".",
"tzinfo",
".",
"dst"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L413-L450
|
8,625
|
wakatime/wakatime
|
wakatime/packages/pytz/tzinfo.py
|
DstTzInfo.tzname
|
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
|
python
|
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
|
[
"def",
"tzname",
"(",
"self",
",",
"dt",
",",
"is_dst",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"self",
".",
"zone",
"elif",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"dt",
"=",
"self",
".",
"localize",
"(",
"dt",
",",
"is_dst",
")",
"return",
"dt",
".",
"tzinfo",
".",
"_tzname",
"else",
":",
"return",
"self",
".",
"_tzname"
] |
See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
|
[
"See",
"datetime",
".",
"tzinfo",
".",
"tzname"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L452-L488
|
8,626
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_ffmpeg_normalize.py
|
check_range
|
def check_range(number, min_r, max_r, name=""):
"""
Check if a number is within a given range
"""
try:
number = float(number)
if number < min_r or number > max_r:
raise FFmpegNormalizeError(
"{} must be within [{},{}]".format(
name, min_r, max_r
)
)
return number
pass
except Exception as e:
raise e
|
python
|
def check_range(number, min_r, max_r, name=""):
"""
Check if a number is within a given range
"""
try:
number = float(number)
if number < min_r or number > max_r:
raise FFmpegNormalizeError(
"{} must be within [{},{}]".format(
name, min_r, max_r
)
)
return number
pass
except Exception as e:
raise e
|
[
"def",
"check_range",
"(",
"number",
",",
"min_r",
",",
"max_r",
",",
"name",
"=",
"\"\"",
")",
":",
"try",
":",
"number",
"=",
"float",
"(",
"number",
")",
"if",
"number",
"<",
"min_r",
"or",
"number",
">",
"max_r",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"{} must be within [{},{}]\"",
".",
"format",
"(",
"name",
",",
"min_r",
",",
"max_r",
")",
")",
"return",
"number",
"pass",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] |
Check if a number is within a given range
|
[
"Check",
"if",
"a",
"number",
"is",
"within",
"a",
"given",
"range"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L16-L31
|
8,627
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_ffmpeg_normalize.py
|
FFmpegNormalize.add_media_file
|
def add_media_file(self, input_file, output_file):
"""
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
"""
if not os.path.exists(input_file):
raise FFmpegNormalizeError("file " + input_file + " does not exist")
ext = os.path.splitext(output_file)[1][1:]
if (self.audio_codec is None or 'pcm' in self.audio_codec) and ext in PCM_INCOMPATIBLE_EXTS:
raise FFmpegNormalizeError(
"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.".format(ext)
)
mf = MediaFile(self, input_file, output_file)
self.media_files.append(mf)
self.file_count += 1
|
python
|
def add_media_file(self, input_file, output_file):
"""
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
"""
if not os.path.exists(input_file):
raise FFmpegNormalizeError("file " + input_file + " does not exist")
ext = os.path.splitext(output_file)[1][1:]
if (self.audio_codec is None or 'pcm' in self.audio_codec) and ext in PCM_INCOMPATIBLE_EXTS:
raise FFmpegNormalizeError(
"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.".format(ext)
)
mf = MediaFile(self, input_file, output_file)
self.media_files.append(mf)
self.file_count += 1
|
[
"def",
"add_media_file",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"input_file",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"file \"",
"+",
"input_file",
"+",
"\" does not exist\"",
")",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"output_file",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"(",
"self",
".",
"audio_codec",
"is",
"None",
"or",
"'pcm'",
"in",
"self",
".",
"audio_codec",
")",
"and",
"ext",
"in",
"PCM_INCOMPATIBLE_EXTS",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.\"",
".",
"format",
"(",
"ext",
")",
")",
"mf",
"=",
"MediaFile",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
"self",
".",
"media_files",
".",
"append",
"(",
"mf",
")",
"self",
".",
"file_count",
"+=",
"1"
] |
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
|
[
"Add",
"a",
"media",
"file",
"to",
"normalize"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L141-L161
|
8,628
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_ffmpeg_normalize.py
|
FFmpegNormalize.run_normalization
|
def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file))
|
python
|
def run_normalization(self):
"""
Run the normalization procedures
"""
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file))
|
[
"def",
"run_normalization",
"(",
"self",
")",
":",
"for",
"index",
",",
"media_file",
"in",
"enumerate",
"(",
"tqdm",
"(",
"self",
".",
"media_files",
",",
"desc",
"=",
"\"File\"",
",",
"disable",
"=",
"not",
"self",
".",
"progress",
",",
"position",
"=",
"0",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"Normalizing file {} ({} of {})\"",
".",
"format",
"(",
"media_file",
",",
"index",
"+",
"1",
",",
"self",
".",
"file_count",
")",
")",
"media_file",
".",
"run_normalization",
"(",
")",
"logger",
".",
"info",
"(",
"\"Normalized file written to {}\"",
".",
"format",
"(",
"media_file",
".",
"output_file",
")",
")"
] |
Run the normalization procedures
|
[
"Run",
"the",
"normalization",
"procedures"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L163-L178
|
8,629
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_cmd_utils.py
|
get_ffmpeg_exe
|
def get_ffmpeg_exe():
"""
Return path to ffmpeg executable
"""
if 'FFMPEG_PATH' in os.environ:
ffmpeg_exe = os.environ['FFMPEG_PATH']
else:
ffmpeg_exe = which('ffmpeg')
if not ffmpeg_exe:
if which('avconv'):
raise FFmpegNormalizeError(
"avconv is not supported. "
"Please install ffmpeg from http://ffmpeg.org instead."
)
else:
raise FFmpegNormalizeError(
"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. "
"Please install ffmpeg from http://ffmpeg.org"
)
return ffmpeg_exe
|
python
|
def get_ffmpeg_exe():
"""
Return path to ffmpeg executable
"""
if 'FFMPEG_PATH' in os.environ:
ffmpeg_exe = os.environ['FFMPEG_PATH']
else:
ffmpeg_exe = which('ffmpeg')
if not ffmpeg_exe:
if which('avconv'):
raise FFmpegNormalizeError(
"avconv is not supported. "
"Please install ffmpeg from http://ffmpeg.org instead."
)
else:
raise FFmpegNormalizeError(
"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. "
"Please install ffmpeg from http://ffmpeg.org"
)
return ffmpeg_exe
|
[
"def",
"get_ffmpeg_exe",
"(",
")",
":",
"if",
"'FFMPEG_PATH'",
"in",
"os",
".",
"environ",
":",
"ffmpeg_exe",
"=",
"os",
".",
"environ",
"[",
"'FFMPEG_PATH'",
"]",
"else",
":",
"ffmpeg_exe",
"=",
"which",
"(",
"'ffmpeg'",
")",
"if",
"not",
"ffmpeg_exe",
":",
"if",
"which",
"(",
"'avconv'",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"avconv is not supported. \"",
"\"Please install ffmpeg from http://ffmpeg.org instead.\"",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. \"",
"\"Please install ffmpeg from http://ffmpeg.org\"",
")",
"return",
"ffmpeg_exe"
] |
Return path to ffmpeg executable
|
[
"Return",
"path",
"to",
"ffmpeg",
"executable"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_cmd_utils.py#L153-L174
|
8,630
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_cmd_utils.py
|
ffmpeg_has_loudnorm
|
def ffmpeg_has_loudnorm():
"""
Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter
"""
cmd_runner = CommandRunner([get_ffmpeg_exe(), '-filters'])
cmd_runner.run_command()
output = cmd_runner.get_output()
if 'loudnorm' in output:
return True
else:
logger.warning(
"Your ffmpeg version does not support the 'loudnorm' filter. "
"Please make sure you are running ffmpeg v3.1 or above."
)
return False
|
python
|
def ffmpeg_has_loudnorm():
"""
Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter
"""
cmd_runner = CommandRunner([get_ffmpeg_exe(), '-filters'])
cmd_runner.run_command()
output = cmd_runner.get_output()
if 'loudnorm' in output:
return True
else:
logger.warning(
"Your ffmpeg version does not support the 'loudnorm' filter. "
"Please make sure you are running ffmpeg v3.1 or above."
)
return False
|
[
"def",
"ffmpeg_has_loudnorm",
"(",
")",
":",
"cmd_runner",
"=",
"CommandRunner",
"(",
"[",
"get_ffmpeg_exe",
"(",
")",
",",
"'-filters'",
"]",
")",
"cmd_runner",
".",
"run_command",
"(",
")",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"if",
"'loudnorm'",
"in",
"output",
":",
"return",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Your ffmpeg version does not support the 'loudnorm' filter. \"",
"\"Please make sure you are running ffmpeg v3.1 or above.\"",
")",
"return",
"False"
] |
Run feature detection on ffmpeg, returns True if ffmpeg supports
the loudnorm filter
|
[
"Run",
"feature",
"detection",
"on",
"ffmpeg",
"returns",
"True",
"if",
"ffmpeg",
"supports",
"the",
"loudnorm",
"filter"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_cmd_utils.py#L176-L191
|
8,631
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_media_file.py
|
MediaFile.parse_streams
|
def parse_streams(self):
"""
Try to parse all input streams from file
"""
logger.debug("Parsing streams of {}".format(self.input_file))
cmd = [
self.ffmpeg_normalize.ffmpeg_exe, '-i', self.input_file,
'-c', 'copy', '-t', '0', '-map', '0',
'-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
cmd_runner.run_command()
output = cmd_runner.get_output()
logger.debug("Stream parsing command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
for line in output_lines:
if not line.startswith('Stream'):
continue
stream_id_match = re.search(r'#0:([\d]+)', line)
if stream_id_match:
stream_id = int(stream_id_match.group(1))
if stream_id in self._stream_ids():
continue
else:
continue
if 'Audio' in line:
logger.debug("Found audio stream at index {}".format(stream_id))
sample_rate_match = re.search(r'(\d+) Hz', line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
bit_depth_match = re.search(r's(\d+)p?,', line)
bit_depth = int(bit_depth_match.group(1)) if bit_depth_match else None
self.streams['audio'][stream_id] = AudioStream(self, stream_id, sample_rate, bit_depth)
elif 'Video' in line:
logger.debug("Found video stream at index {}".format(stream_id))
self.streams['video'][stream_id] = VideoStream(self, stream_id)
elif 'Subtitle' in line:
logger.debug("Found subtitle stream at index {}".format(stream_id))
self.streams['subtitle'][stream_id] = SubtitleStream(self, stream_id)
if not self.streams['audio']:
raise FFmpegNormalizeError(
"Input file {} does not contain any audio streams"
.format(self.input_file))
if os.path.splitext(self.output_file)[1].lower() in ['.wav', '.mp3', '.aac']:
logger.warning(
"Output file only supports one stream. "
"Keeping only first audio stream."
)
first_stream = list(self.streams['audio'].values())[0]
self.streams['audio'] = {first_stream.stream_id: first_stream}
self.streams['video'] = {}
self.streams['subtitle'] = {}
|
python
|
def parse_streams(self):
"""
Try to parse all input streams from file
"""
logger.debug("Parsing streams of {}".format(self.input_file))
cmd = [
self.ffmpeg_normalize.ffmpeg_exe, '-i', self.input_file,
'-c', 'copy', '-t', '0', '-map', '0',
'-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
cmd_runner.run_command()
output = cmd_runner.get_output()
logger.debug("Stream parsing command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
for line in output_lines:
if not line.startswith('Stream'):
continue
stream_id_match = re.search(r'#0:([\d]+)', line)
if stream_id_match:
stream_id = int(stream_id_match.group(1))
if stream_id in self._stream_ids():
continue
else:
continue
if 'Audio' in line:
logger.debug("Found audio stream at index {}".format(stream_id))
sample_rate_match = re.search(r'(\d+) Hz', line)
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
bit_depth_match = re.search(r's(\d+)p?,', line)
bit_depth = int(bit_depth_match.group(1)) if bit_depth_match else None
self.streams['audio'][stream_id] = AudioStream(self, stream_id, sample_rate, bit_depth)
elif 'Video' in line:
logger.debug("Found video stream at index {}".format(stream_id))
self.streams['video'][stream_id] = VideoStream(self, stream_id)
elif 'Subtitle' in line:
logger.debug("Found subtitle stream at index {}".format(stream_id))
self.streams['subtitle'][stream_id] = SubtitleStream(self, stream_id)
if not self.streams['audio']:
raise FFmpegNormalizeError(
"Input file {} does not contain any audio streams"
.format(self.input_file))
if os.path.splitext(self.output_file)[1].lower() in ['.wav', '.mp3', '.aac']:
logger.warning(
"Output file only supports one stream. "
"Keeping only first audio stream."
)
first_stream = list(self.streams['audio'].values())[0]
self.streams['audio'] = {first_stream.stream_id: first_stream}
self.streams['video'] = {}
self.streams['subtitle'] = {}
|
[
"def",
"parse_streams",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Parsing streams of {}\"",
".",
"format",
"(",
"self",
".",
"input_file",
")",
")",
"cmd",
"=",
"[",
"self",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-i'",
",",
"self",
".",
"input_file",
",",
"'-c'",
",",
"'copy'",
",",
"'-t'",
",",
"'0'",
",",
"'-map'",
",",
"'0'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"cmd_runner",
".",
"run_command",
"(",
")",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Stream parsing command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"output_lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"]",
"for",
"line",
"in",
"output_lines",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'Stream'",
")",
":",
"continue",
"stream_id_match",
"=",
"re",
".",
"search",
"(",
"r'#0:([\\d]+)'",
",",
"line",
")",
"if",
"stream_id_match",
":",
"stream_id",
"=",
"int",
"(",
"stream_id_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"stream_id",
"in",
"self",
".",
"_stream_ids",
"(",
")",
":",
"continue",
"else",
":",
"continue",
"if",
"'Audio'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found audio stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"sample_rate_match",
"=",
"re",
".",
"search",
"(",
"r'(\\d+) Hz'",
",",
"line",
")",
"sample_rate",
"=",
"int",
"(",
"sample_rate_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"sample_rate_match",
"else",
"None",
"bit_depth_match",
"=",
"re",
".",
"search",
"(",
"r's(\\d+)p?,'",
",",
"line",
")",
"bit_depth",
"=",
"int",
"(",
"bit_depth_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"bit_depth_match",
"else",
"None",
"self",
".",
"streams",
"[",
"'audio'",
"]",
"[",
"stream_id",
"]",
"=",
"AudioStream",
"(",
"self",
",",
"stream_id",
",",
"sample_rate",
",",
"bit_depth",
")",
"elif",
"'Video'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found video stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"self",
".",
"streams",
"[",
"'video'",
"]",
"[",
"stream_id",
"]",
"=",
"VideoStream",
"(",
"self",
",",
"stream_id",
")",
"elif",
"'Subtitle'",
"in",
"line",
":",
"logger",
".",
"debug",
"(",
"\"Found subtitle stream at index {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"self",
".",
"streams",
"[",
"'subtitle'",
"]",
"[",
"stream_id",
"]",
"=",
"SubtitleStream",
"(",
"self",
",",
"stream_id",
")",
"if",
"not",
"self",
".",
"streams",
"[",
"'audio'",
"]",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Input file {} does not contain any audio streams\"",
".",
"format",
"(",
"self",
".",
"input_file",
")",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"output_file",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"'.wav'",
",",
"'.mp3'",
",",
"'.aac'",
"]",
":",
"logger",
".",
"warning",
"(",
"\"Output file only supports one stream. \"",
"\"Keeping only first audio stream.\"",
")",
"first_stream",
"=",
"list",
"(",
"self",
".",
"streams",
"[",
"'audio'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"self",
".",
"streams",
"[",
"'audio'",
"]",
"=",
"{",
"first_stream",
".",
"stream_id",
":",
"first_stream",
"}",
"self",
".",
"streams",
"[",
"'video'",
"]",
"=",
"{",
"}",
"self",
".",
"streams",
"[",
"'subtitle'",
"]",
"=",
"{",
"}"
] |
Try to parse all input streams from file
|
[
"Try",
"to",
"parse",
"all",
"input",
"streams",
"from",
"file"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_media_file.py#L51-L114
|
8,632
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_media_file.py
|
MediaFile._get_audio_filter_cmd
|
def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu()
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label)
filter_complex_cmd = ';'.join(all_filters)
return filter_complex_cmd, output_labels
|
python
|
def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu()
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label)
filter_complex_cmd = ';'.join(all_filters)
return filter_complex_cmd, output_labels
|
[
"def",
"_get_audio_filter_cmd",
"(",
"self",
")",
":",
"all_filters",
"=",
"[",
"]",
"output_labels",
"=",
"[",
"]",
"for",
"audio_stream",
"in",
"self",
".",
"streams",
"[",
"'audio'",
"]",
".",
"values",
"(",
")",
":",
"if",
"self",
".",
"ffmpeg_normalize",
".",
"normalization_type",
"==",
"'ebu'",
":",
"stream_filter",
"=",
"audio_stream",
".",
"get_second_pass_opts_ebu",
"(",
")",
"else",
":",
"stream_filter",
"=",
"audio_stream",
".",
"get_second_pass_opts_peakrms",
"(",
")",
"input_label",
"=",
"'[0:{}]'",
".",
"format",
"(",
"audio_stream",
".",
"stream_id",
")",
"output_label",
"=",
"'[norm{}]'",
".",
"format",
"(",
"audio_stream",
".",
"stream_id",
")",
"output_labels",
".",
"append",
"(",
"output_label",
")",
"all_filters",
".",
"append",
"(",
"input_label",
"+",
"stream_filter",
"+",
"output_label",
")",
"filter_complex_cmd",
"=",
"';'",
".",
"join",
"(",
"all_filters",
")",
"return",
"filter_complex_cmd",
",",
"output_labels"
] |
Return filter_complex command and output labels needed
|
[
"Return",
"filter_complex",
"command",
"and",
"output",
"labels",
"needed"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_media_file.py#L160-L179
|
8,633
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_streams.py
|
AudioStream.parse_volumedetect_stats
|
def parse_volumedetect_stats(self):
"""
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
"""
logger.info(
"Running first pass volumedetect filter for stream {}".format(self.stream_id)
)
filter_str = '[0:{}]volumedetect'.format(self.stream_id)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Volumedetect command output:")
logger.debug(output)
mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output)
if mean_volume_matches:
self.loudness_statistics['mean'] = float(mean_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get mean volume for {}".format(self.media_file.input_file)
)
max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output)
if max_volume_matches:
self.loudness_statistics['max'] = float(max_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get max volume for {}".format(self.media_file.input_file)
)
|
python
|
def parse_volumedetect_stats(self):
"""
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
"""
logger.info(
"Running first pass volumedetect filter for stream {}".format(self.stream_id)
)
filter_str = '[0:{}]volumedetect'.format(self.stream_id)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Volumedetect command output:")
logger.debug(output)
mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output)
if mean_volume_matches:
self.loudness_statistics['mean'] = float(mean_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get mean volume for {}".format(self.media_file.input_file)
)
max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output)
if max_volume_matches:
self.loudness_statistics['max'] = float(max_volume_matches[0])
else:
raise FFmpegNormalizeError(
"Could not get max volume for {}".format(self.media_file.input_file)
)
|
[
"def",
"parse_volumedetect_stats",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Running first pass volumedetect filter for stream {}\"",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
")",
"filter_str",
"=",
"'[0:{}]volumedetect'",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
"cmd",
"=",
"[",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-nostdin'",
",",
"'-y'",
",",
"'-i'",
",",
"self",
".",
"media_file",
".",
"input_file",
",",
"'-filter_complex'",
",",
"filter_str",
",",
"'-vn'",
",",
"'-sn'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"for",
"progress",
"in",
"cmd_runner",
".",
"run_ffmpeg_command",
"(",
")",
":",
"yield",
"progress",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Volumedetect command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"mean_volume_matches",
"=",
"re",
".",
"findall",
"(",
"r\"mean_volume: ([\\-\\d\\.]+) dB\"",
",",
"output",
")",
"if",
"mean_volume_matches",
":",
"self",
".",
"loudness_statistics",
"[",
"'mean'",
"]",
"=",
"float",
"(",
"mean_volume_matches",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not get mean volume for {}\"",
".",
"format",
"(",
"self",
".",
"media_file",
".",
"input_file",
")",
")",
"max_volume_matches",
"=",
"re",
".",
"findall",
"(",
"r\"max_volume: ([\\-\\d\\.]+) dB\"",
",",
"output",
")",
"if",
"max_volume_matches",
":",
"self",
".",
"loudness_statistics",
"[",
"'max'",
"]",
"=",
"float",
"(",
"max_volume_matches",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not get max volume for {}\"",
".",
"format",
"(",
"self",
".",
"media_file",
".",
"input_file",
")",
")"
] |
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
|
[
"Use",
"ffmpeg",
"with",
"volumedetect",
"filter",
"to",
"get",
"the",
"mean",
"volume",
"of",
"the",
"input",
"file",
"."
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L78-L117
|
8,634
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_streams.py
|
AudioStream.parse_loudnorm_stats
|
def parse_loudnorm_stats(self):
"""
Run a first pass loudnorm filter to get measured data.
"""
logger.info(
"Running first pass loudnorm filter for stream {}".format(self.stream_id)
)
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
filter_str = '[0:{}]'.format(self.stream_id) + \
'loudnorm=' + dict_to_filter_opts(opts)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Loudnorm first pass command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
loudnorm_start = False
loudnorm_end = False
for index, line in enumerate(output_lines):
if line.startswith('[Parsed_loudnorm'):
loudnorm_start = index + 1
continue
if loudnorm_start and line.startswith('}'):
loudnorm_end = index + 1
break
if not (loudnorm_start and loudnorm_end):
raise FFmpegNormalizeError("Could not parse loudnorm stats; no loudnorm-related output found")
try:
loudnorm_stats = json.loads('\n'.join(output_lines[loudnorm_start:loudnorm_end]))
except Exception as e:
raise FFmpegNormalizeError("Could not parse loudnorm stats; wrong JSON format in string: {}".format(e))
logger.debug("Loudnorm stats parsed: {}".format(json.dumps(loudnorm_stats)))
self.loudness_statistics['ebu'] = loudnorm_stats
for key, val in self.loudness_statistics['ebu'].items():
if key == 'normalization_type':
continue
# FIXME: drop Python 2 support and just use math.inf
if float(val) == -float("inf"):
self.loudness_statistics['ebu'][key] = -99
elif float(val) == float("inf"):
self.loudness_statistics['ebu'][key] = 0
|
python
|
def parse_loudnorm_stats(self):
"""
Run a first pass loudnorm filter to get measured data.
"""
logger.info(
"Running first pass loudnorm filter for stream {}".format(self.stream_id)
)
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
filter_str = '[0:{}]'.format(self.stream_id) + \
'loudnorm=' + dict_to_filter_opts(opts)
cmd = [
self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y',
'-i', self.media_file.input_file,
'-filter_complex', filter_str,
'-vn', '-sn', '-f', 'null', NUL
]
cmd_runner = CommandRunner(cmd)
for progress in cmd_runner.run_ffmpeg_command():
yield progress
output = cmd_runner.get_output()
logger.debug("Loudnorm first pass command output:")
logger.debug(output)
output_lines = [line.strip() for line in output.split('\n')]
loudnorm_start = False
loudnorm_end = False
for index, line in enumerate(output_lines):
if line.startswith('[Parsed_loudnorm'):
loudnorm_start = index + 1
continue
if loudnorm_start and line.startswith('}'):
loudnorm_end = index + 1
break
if not (loudnorm_start and loudnorm_end):
raise FFmpegNormalizeError("Could not parse loudnorm stats; no loudnorm-related output found")
try:
loudnorm_stats = json.loads('\n'.join(output_lines[loudnorm_start:loudnorm_end]))
except Exception as e:
raise FFmpegNormalizeError("Could not parse loudnorm stats; wrong JSON format in string: {}".format(e))
logger.debug("Loudnorm stats parsed: {}".format(json.dumps(loudnorm_stats)))
self.loudness_statistics['ebu'] = loudnorm_stats
for key, val in self.loudness_statistics['ebu'].items():
if key == 'normalization_type':
continue
# FIXME: drop Python 2 support and just use math.inf
if float(val) == -float("inf"):
self.loudness_statistics['ebu'][key] = -99
elif float(val) == float("inf"):
self.loudness_statistics['ebu'][key] = 0
|
[
"def",
"parse_loudnorm_stats",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Running first pass loudnorm filter for stream {}\"",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
")",
"opts",
"=",
"{",
"'i'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"target_level",
",",
"'lra'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"loudness_range_target",
",",
"'tp'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"true_peak",
",",
"'offset'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"offset",
",",
"'print_format'",
":",
"'json'",
"}",
"if",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"dual_mono",
":",
"opts",
"[",
"'dual_mono'",
"]",
"=",
"'true'",
"filter_str",
"=",
"'[0:{}]'",
".",
"format",
"(",
"self",
".",
"stream_id",
")",
"+",
"'loudnorm='",
"+",
"dict_to_filter_opts",
"(",
"opts",
")",
"cmd",
"=",
"[",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"ffmpeg_exe",
",",
"'-nostdin'",
",",
"'-y'",
",",
"'-i'",
",",
"self",
".",
"media_file",
".",
"input_file",
",",
"'-filter_complex'",
",",
"filter_str",
",",
"'-vn'",
",",
"'-sn'",
",",
"'-f'",
",",
"'null'",
",",
"NUL",
"]",
"cmd_runner",
"=",
"CommandRunner",
"(",
"cmd",
")",
"for",
"progress",
"in",
"cmd_runner",
".",
"run_ffmpeg_command",
"(",
")",
":",
"yield",
"progress",
"output",
"=",
"cmd_runner",
".",
"get_output",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Loudnorm first pass command output:\"",
")",
"logger",
".",
"debug",
"(",
"output",
")",
"output_lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"]",
"loudnorm_start",
"=",
"False",
"loudnorm_end",
"=",
"False",
"for",
"index",
",",
"line",
"in",
"enumerate",
"(",
"output_lines",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'[Parsed_loudnorm'",
")",
":",
"loudnorm_start",
"=",
"index",
"+",
"1",
"continue",
"if",
"loudnorm_start",
"and",
"line",
".",
"startswith",
"(",
"'}'",
")",
":",
"loudnorm_end",
"=",
"index",
"+",
"1",
"break",
"if",
"not",
"(",
"loudnorm_start",
"and",
"loudnorm_end",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not parse loudnorm stats; no loudnorm-related output found\"",
")",
"try",
":",
"loudnorm_stats",
"=",
"json",
".",
"loads",
"(",
"'\\n'",
".",
"join",
"(",
"output_lines",
"[",
"loudnorm_start",
":",
"loudnorm_end",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Could not parse loudnorm stats; wrong JSON format in string: {}\"",
".",
"format",
"(",
"e",
")",
")",
"logger",
".",
"debug",
"(",
"\"Loudnorm stats parsed: {}\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"loudnorm_stats",
")",
")",
")",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"=",
"loudnorm_stats",
"for",
"key",
",",
"val",
"in",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'normalization_type'",
":",
"continue",
"# FIXME: drop Python 2 support and just use math.inf",
"if",
"float",
"(",
"val",
")",
"==",
"-",
"float",
"(",
"\"inf\"",
")",
":",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"key",
"]",
"=",
"-",
"99",
"elif",
"float",
"(",
"val",
")",
"==",
"float",
"(",
"\"inf\"",
")",
":",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"key",
"]",
"=",
"0"
] |
Run a first pass loudnorm filter to get measured data.
|
[
"Run",
"a",
"first",
"pass",
"loudnorm",
"filter",
"to",
"get",
"measured",
"data",
"."
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L119-L185
|
8,635
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_streams.py
|
AudioStream.get_second_pass_opts_ebu
|
def get_second_pass_opts_ebu(self):
"""
Return second pass loudnorm filter options string for ffmpeg
"""
if not self.loudness_statistics['ebu']:
raise FFmpegNormalizeError(
"First pass not run, you must call parse_loudnorm_stats first"
)
input_i = float(self.loudness_statistics['ebu']["input_i"])
if input_i > 0:
logger.warn("Input file had measured input loudness greater than zero ({}), capping at 0".format("input_i"))
self.loudness_statistics['ebu']['input_i'] = 0
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'measured_i': float(self.loudness_statistics['ebu']['input_i']),
'measured_lra': float(self.loudness_statistics['ebu']['input_lra']),
'measured_tp': float(self.loudness_statistics['ebu']['input_tp']),
'measured_thresh': float(self.loudness_statistics['ebu']['input_thresh']),
'linear': 'true',
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
return 'loudnorm=' + dict_to_filter_opts(opts)
|
python
|
def get_second_pass_opts_ebu(self):
"""
Return second pass loudnorm filter options string for ffmpeg
"""
if not self.loudness_statistics['ebu']:
raise FFmpegNormalizeError(
"First pass not run, you must call parse_loudnorm_stats first"
)
input_i = float(self.loudness_statistics['ebu']["input_i"])
if input_i > 0:
logger.warn("Input file had measured input loudness greater than zero ({}), capping at 0".format("input_i"))
self.loudness_statistics['ebu']['input_i'] = 0
opts = {
'i': self.media_file.ffmpeg_normalize.target_level,
'lra': self.media_file.ffmpeg_normalize.loudness_range_target,
'tp': self.media_file.ffmpeg_normalize.true_peak,
'offset': self.media_file.ffmpeg_normalize.offset,
'measured_i': float(self.loudness_statistics['ebu']['input_i']),
'measured_lra': float(self.loudness_statistics['ebu']['input_lra']),
'measured_tp': float(self.loudness_statistics['ebu']['input_tp']),
'measured_thresh': float(self.loudness_statistics['ebu']['input_thresh']),
'linear': 'true',
'print_format': 'json'
}
if self.media_file.ffmpeg_normalize.dual_mono:
opts['dual_mono'] = 'true'
return 'loudnorm=' + dict_to_filter_opts(opts)
|
[
"def",
"get_second_pass_opts_ebu",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"First pass not run, you must call parse_loudnorm_stats first\"",
")",
"input_i",
"=",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"\"input_i\"",
"]",
")",
"if",
"input_i",
">",
"0",
":",
"logger",
".",
"warn",
"(",
"\"Input file had measured input loudness greater than zero ({}), capping at 0\"",
".",
"format",
"(",
"\"input_i\"",
")",
")",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_i'",
"]",
"=",
"0",
"opts",
"=",
"{",
"'i'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"target_level",
",",
"'lra'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"loudness_range_target",
",",
"'tp'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"true_peak",
",",
"'offset'",
":",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"offset",
",",
"'measured_i'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_i'",
"]",
")",
",",
"'measured_lra'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_lra'",
"]",
")",
",",
"'measured_tp'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_tp'",
"]",
")",
",",
"'measured_thresh'",
":",
"float",
"(",
"self",
".",
"loudness_statistics",
"[",
"'ebu'",
"]",
"[",
"'input_thresh'",
"]",
")",
",",
"'linear'",
":",
"'true'",
",",
"'print_format'",
":",
"'json'",
"}",
"if",
"self",
".",
"media_file",
".",
"ffmpeg_normalize",
".",
"dual_mono",
":",
"opts",
"[",
"'dual_mono'",
"]",
"=",
"'true'",
"return",
"'loudnorm='",
"+",
"dict_to_filter_opts",
"(",
"opts",
")"
] |
Return second pass loudnorm filter options string for ffmpeg
|
[
"Return",
"second",
"pass",
"loudnorm",
"filter",
"options",
"string",
"for",
"ffmpeg"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L187-L218
|
8,636
|
slhck/ffmpeg-normalize
|
ffmpeg_normalize/_logger.py
|
setup_custom_logger
|
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
# handler = logging.StreamHandler()
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
# \033[1;30m - black
# \033[1;31m - red
# \033[1;32m - green
# \033[1;33m - yellow
# \033[1;34m - blue
# \033[1;35m - magenta
# \033[1;36m - cyan
# \033[1;37m - white
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
|
python
|
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
# handler = logging.StreamHandler()
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
# \033[1;30m - black
# \033[1;31m - red
# \033[1;32m - green
# \033[1;33m - yellow
# \033[1;34m - blue
# \033[1;35m - magenta
# \033[1;36m - cyan
# \033[1;37m - white
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
|
[
"def",
"setup_custom_logger",
"(",
"name",
")",
":",
"global",
"loggers",
"if",
"loggers",
".",
"get",
"(",
"name",
")",
":",
"return",
"loggers",
".",
"get",
"(",
"name",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
"=",
"'%(levelname)s: %(message)s'",
")",
"# handler = logging.StreamHandler()",
"handler",
"=",
"TqdmLoggingHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"# \\033[1;30m - black",
"# \\033[1;31m - red",
"# \\033[1;32m - green",
"# \\033[1;33m - yellow",
"# \\033[1;34m - blue",
"# \\033[1;35m - magenta",
"# \\033[1;36m - cyan",
"# \\033[1;37m - white",
"if",
"system",
"(",
")",
"not",
"in",
"[",
"'Windows'",
",",
"'cli'",
"]",
":",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"ERROR",
",",
"\"\\033[1;31m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"ERROR",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"WARNING",
",",
"\"\\033[1;33m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"WARNING",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"INFO",
",",
"\"\\033[1;34m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"INFO",
")",
")",
"logging",
".",
"addLevelName",
"(",
"logging",
".",
"DEBUG",
",",
"\"\\033[1;35m%s\\033[1;0m\"",
"%",
"logging",
".",
"getLevelName",
"(",
"logging",
".",
"DEBUG",
")",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"# if (logger.hasHandlers()):",
"# logger.handlers.clear()",
"if",
"logger",
".",
"handlers",
":",
"logger",
".",
"handlers",
"=",
"[",
"]",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"loggers",
".",
"update",
"(",
"dict",
"(",
"name",
"=",
"logger",
")",
")",
"return",
"logger"
] |
Create a logger with a certain name and level
|
[
"Create",
"a",
"logger",
"with",
"a",
"certain",
"name",
"and",
"level"
] |
18477a7f2d092777ee238340be40c04ecb45c132
|
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_logger.py#L26-L68
|
8,637
|
PyCQA/pylint-django
|
pylint_django/checkers/__init__.py
|
register_checkers
|
def register_checkers(linter):
"""Register checkers."""
linter.register_checker(ModelChecker(linter))
linter.register_checker(DjangoInstalledChecker(linter))
linter.register_checker(JsonResponseChecker(linter))
linter.register_checker(FormChecker(linter))
|
python
|
def register_checkers(linter):
"""Register checkers."""
linter.register_checker(ModelChecker(linter))
linter.register_checker(DjangoInstalledChecker(linter))
linter.register_checker(JsonResponseChecker(linter))
linter.register_checker(FormChecker(linter))
|
[
"def",
"register_checkers",
"(",
"linter",
")",
":",
"linter",
".",
"register_checker",
"(",
"ModelChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"DjangoInstalledChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"JsonResponseChecker",
"(",
"linter",
")",
")",
"linter",
".",
"register_checker",
"(",
"FormChecker",
"(",
"linter",
")",
")"
] |
Register checkers.
|
[
"Register",
"checkers",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/__init__.py#L8-L13
|
8,638
|
PyCQA/pylint-django
|
pylint_django/checkers/db_performance.py
|
register
|
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
python
|
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
[
"def",
"register",
"(",
"linter",
")",
":",
"linter",
".",
"register_checker",
"(",
"NewDbFieldWithDefaultChecker",
"(",
"linter",
")",
")",
"if",
"not",
"compat",
".",
"LOAD_CONFIGURATION_SUPPORTED",
":",
"load_configuration",
"(",
"linter",
")"
] |
Required method to auto register this checker.
|
[
"Required",
"method",
"to",
"auto",
"register",
"this",
"checker",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/checkers/db_performance.py#L125-L129
|
8,639
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
ignore_import_warnings_for_related_fields
|
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
|
python
|
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
consumer = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = consumer.to_consume.items if PY3 else consumer.to_consume.iteritems
for name, stmts in iterat():
if isinstance(stmts[0], ImportFrom):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
consumer._atomic = ScopeConsumer(new_things, consumer.consumed, consumer.scope_type) # pylint: disable=W0212
self._to_consume = [consumer] # pylint: disable=W0212
return orig_method(self, node)
|
[
"def",
"ignore_import_warnings_for_related_fields",
"(",
"orig_method",
",",
"self",
",",
"node",
")",
":",
"consumer",
"=",
"self",
".",
"_to_consume",
"[",
"0",
"]",
"# pylint: disable=W0212",
"# we can disable this warning ('Access to a protected member _to_consume of a client class')",
"# as it's not actually a client class, but rather, this method is being monkey patched",
"# onto the class and so the access is valid",
"new_things",
"=",
"{",
"}",
"iterat",
"=",
"consumer",
".",
"to_consume",
".",
"items",
"if",
"PY3",
"else",
"consumer",
".",
"to_consume",
".",
"iteritems",
"for",
"name",
",",
"stmts",
"in",
"iterat",
"(",
")",
":",
"if",
"isinstance",
"(",
"stmts",
"[",
"0",
"]",
",",
"ImportFrom",
")",
":",
"if",
"any",
"(",
"[",
"n",
"[",
"0",
"]",
"in",
"(",
"'ForeignKey'",
",",
"'OneToOneField'",
")",
"for",
"n",
"in",
"stmts",
"[",
"0",
"]",
".",
"names",
"]",
")",
":",
"continue",
"new_things",
"[",
"name",
"]",
"=",
"stmts",
"consumer",
".",
"_atomic",
"=",
"ScopeConsumer",
"(",
"new_things",
",",
"consumer",
".",
"consumed",
",",
"consumer",
".",
"scope_type",
")",
"# pylint: disable=W0212",
"self",
".",
"_to_consume",
"=",
"[",
"consumer",
"]",
"# pylint: disable=W0212",
"return",
"orig_method",
"(",
"self",
",",
"node",
")"
] |
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
|
[
"Replaces",
"the",
"leave_module",
"method",
"on",
"the",
"VariablesChecker",
"class",
"to",
"prevent",
"unused",
"-",
"import",
"warnings",
"which",
"are",
"caused",
"by",
"the",
"ForeignKey",
"and",
"OneToOneField",
"transformations",
".",
"By",
"replacing",
"the",
"nodes",
"in",
"the",
"AST",
"with",
"their",
"type",
"rather",
"than",
"the",
"django",
"field",
"imports",
"of",
"the",
"form",
"from",
"django",
".",
"db",
".",
"models",
"import",
"OneToOneField",
"raise",
"an",
"unused",
"-",
"import",
"warning"
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L291-L317
|
8,640
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_admin_subclass
|
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
|
python
|
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, ClassDef):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
|
[
"def",
"is_model_admin_subclass",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"[",
"-",
"5",
":",
"]",
"!=",
"'Admin'",
"or",
"isinstance",
"(",
"node",
".",
"parent",
",",
"ClassDef",
")",
":",
"return",
"False",
"return",
"node_is_subclass",
"(",
"node",
",",
"'django.contrib.admin.options.ModelAdmin'",
")"
] |
Checks that node is derivative of ModelAdmin class.
|
[
"Checks",
"that",
"node",
"is",
"derivative",
"of",
"ModelAdmin",
"class",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L382-L387
|
8,641
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_factory
|
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
|
python
|
def is_model_factory(node):
"""Checks that node is derivative of DjangoModelFactory or SubFactory class."""
try:
parent_classes = node.expr.inferred()
except: # noqa: E722, pylint: disable=bare-except
return False
parents = ('factory.declarations.LazyFunction',
'factory.declarations.SubFactory',
'factory.django.DjangoModelFactory')
for parent_class in parent_classes:
try:
if parent_class.qname() in parents:
return True
if node_is_subclass(parent_class, *parents):
return True
except AttributeError:
continue
return False
|
[
"def",
"is_model_factory",
"(",
"node",
")",
":",
"try",
":",
"parent_classes",
"=",
"node",
".",
"expr",
".",
"inferred",
"(",
")",
"except",
":",
"# noqa: E722, pylint: disable=bare-except",
"return",
"False",
"parents",
"=",
"(",
"'factory.declarations.LazyFunction'",
",",
"'factory.declarations.SubFactory'",
",",
"'factory.django.DjangoModelFactory'",
")",
"for",
"parent_class",
"in",
"parent_classes",
":",
"try",
":",
"if",
"parent_class",
".",
"qname",
"(",
")",
"in",
"parents",
":",
"return",
"True",
"if",
"node_is_subclass",
"(",
"parent_class",
",",
"*",
"parents",
")",
":",
"return",
"True",
"except",
"AttributeError",
":",
"continue",
"return",
"False"
] |
Checks that node is derivative of DjangoModelFactory or SubFactory class.
|
[
"Checks",
"that",
"node",
"is",
"derivative",
"of",
"DjangoModelFactory",
"or",
"SubFactory",
"class",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L428-L449
|
8,642
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_mpttmeta_subclass
|
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
|
python
|
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef):
return False
parents = ('django.db.models.base.Model',
'.Model', # for the transformed version used in this plugin
'django.forms.forms.Form',
'.Form',
'django.forms.models.ModelForm',
'.ModelForm')
return node_is_subclass(node.parent, *parents)
|
[
"def",
"is_model_mpttmeta_subclass",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"!=",
"'MPTTMeta'",
"or",
"not",
"isinstance",
"(",
"node",
".",
"parent",
",",
"ClassDef",
")",
":",
"return",
"False",
"parents",
"=",
"(",
"'django.db.models.base.Model'",
",",
"'.Model'",
",",
"# for the transformed version used in this plugin",
"'django.forms.forms.Form'",
",",
"'.Form'",
",",
"'django.forms.models.ModelForm'",
",",
"'.ModelForm'",
")",
"return",
"node_is_subclass",
"(",
"node",
".",
"parent",
",",
"*",
"parents",
")"
] |
Checks that node is derivative of MPTTMeta class.
|
[
"Checks",
"that",
"node",
"is",
"derivative",
"of",
"MPTTMeta",
"class",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L469-L480
|
8,643
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
_attribute_is_magic
|
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
|
python
|
def _attribute_is_magic(node, attrs, parents):
"""Checks that node is an attribute used inside one of allowed parents"""
if node.attrname not in attrs:
return False
if not node.last_child():
return False
try:
for cls in node.last_child().inferred():
if isinstance(cls, Super):
cls = cls._self_class # pylint: disable=protected-access
if node_is_subclass(cls, *parents) or cls.qname() in parents:
return True
except InferenceError:
pass
return False
|
[
"def",
"_attribute_is_magic",
"(",
"node",
",",
"attrs",
",",
"parents",
")",
":",
"if",
"node",
".",
"attrname",
"not",
"in",
"attrs",
":",
"return",
"False",
"if",
"not",
"node",
".",
"last_child",
"(",
")",
":",
"return",
"False",
"try",
":",
"for",
"cls",
"in",
"node",
".",
"last_child",
"(",
")",
".",
"inferred",
"(",
")",
":",
"if",
"isinstance",
"(",
"cls",
",",
"Super",
")",
":",
"cls",
"=",
"cls",
".",
"_self_class",
"# pylint: disable=protected-access",
"if",
"node_is_subclass",
"(",
"cls",
",",
"*",
"parents",
")",
"or",
"cls",
".",
"qname",
"(",
")",
"in",
"parents",
":",
"return",
"True",
"except",
"InferenceError",
":",
"pass",
"return",
"False"
] |
Checks that node is an attribute used inside one of allowed parents
|
[
"Checks",
"that",
"node",
"is",
"an",
"attribute",
"used",
"inside",
"one",
"of",
"allowed",
"parents"
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L483-L498
|
8,644
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
generic_is_view_attribute
|
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
|
python
|
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute
|
[
"def",
"generic_is_view_attribute",
"(",
"parents",
",",
"attrs",
")",
":",
"def",
"is_attribute",
"(",
"node",
")",
":",
"return",
"_attribute_is_magic",
"(",
"node",
",",
"attrs",
",",
"parents",
")",
"return",
"is_attribute"
] |
Generates is_X_attribute function for given parents and attrs.
|
[
"Generates",
"is_X_attribute",
"function",
"for",
"given",
"parents",
"and",
"attrs",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L621-L625
|
8,645
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_view_subclass_method_shouldnt_be_function
|
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
|
python
|
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = ('django.views.View',
'django.views.generic.View',
'django.views.generic.base.View',)
return parent is not None and node_is_subclass(parent, *subclass)
|
[
"def",
"is_model_view_subclass_method_shouldnt_be_function",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"not",
"in",
"(",
"'get'",
",",
"'post'",
")",
":",
"return",
"False",
"parent",
"=",
"node",
".",
"parent",
"while",
"parent",
"and",
"not",
"isinstance",
"(",
"parent",
",",
"ScopedClass",
")",
":",
"parent",
"=",
"parent",
".",
"parent",
"subclass",
"=",
"(",
"'django.views.View'",
",",
"'django.views.generic.View'",
",",
"'django.views.generic.base.View'",
",",
")",
"return",
"parent",
"is",
"not",
"None",
"and",
"node_is_subclass",
"(",
"parent",
",",
"*",
"subclass",
")"
] |
Checks that node is get or post method of the View class.
|
[
"Checks",
"that",
"node",
"is",
"get",
"or",
"post",
"method",
"of",
"the",
"View",
"class",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L628-L641
|
8,646
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_model_media_valid_attributes
|
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
|
python
|
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
|
[
"def",
"is_model_media_valid_attributes",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"not",
"in",
"(",
"'js'",
",",
")",
":",
"return",
"False",
"parent",
"=",
"node",
".",
"parent",
"while",
"parent",
"and",
"not",
"isinstance",
"(",
"parent",
",",
"ScopedClass",
")",
":",
"parent",
"=",
"parent",
".",
"parent",
"if",
"parent",
"is",
"None",
"or",
"parent",
".",
"name",
"!=",
"\"Media\"",
":",
"return",
"False",
"return",
"True"
] |
Suppress warnings for valid attributes of Media class.
|
[
"Suppress",
"warnings",
"for",
"valid",
"attributes",
"of",
"Media",
"class",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L682-L694
|
8,647
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_templatetags_module_valid_constant
|
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
|
python
|
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
|
[
"def",
"is_templatetags_module_valid_constant",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"not",
"in",
"(",
"'register'",
",",
")",
":",
"return",
"False",
"parent",
"=",
"node",
".",
"parent",
"while",
"not",
"isinstance",
"(",
"parent",
",",
"Module",
")",
":",
"parent",
"=",
"parent",
".",
"parent",
"if",
"\"templatetags.\"",
"not",
"in",
"parent",
".",
"name",
":",
"return",
"False",
"return",
"True"
] |
Suppress warnings for valid constants in templatetags module.
|
[
"Suppress",
"warnings",
"for",
"valid",
"constants",
"in",
"templatetags",
"module",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L697-L709
|
8,648
|
PyCQA/pylint-django
|
pylint_django/augmentations/__init__.py
|
is_urls_module_valid_constant
|
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
|
python
|
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', 'app_name'):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
|
[
"def",
"is_urls_module_valid_constant",
"(",
"node",
")",
":",
"if",
"node",
".",
"name",
"not",
"in",
"(",
"'urlpatterns'",
",",
"'app_name'",
")",
":",
"return",
"False",
"parent",
"=",
"node",
".",
"parent",
"while",
"not",
"isinstance",
"(",
"parent",
",",
"Module",
")",
":",
"parent",
"=",
"parent",
".",
"parent",
"if",
"not",
"parent",
".",
"name",
".",
"endswith",
"(",
"'urls'",
")",
":",
"return",
"False",
"return",
"True"
] |
Suppress warnings for valid constants in urls module.
|
[
"Suppress",
"warnings",
"for",
"valid",
"constants",
"in",
"urls",
"module",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L712-L724
|
8,649
|
PyCQA/pylint-django
|
pylint_django/plugin.py
|
load_configuration
|
def load_configuration(linter):
"""
Amend existing checker config.
"""
name_checker = get_checker(linter, NameChecker)
name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')
# we don't care about South migrations
linter.config.black_list += ('migrations', 'south_migrations')
|
python
|
def load_configuration(linter):
"""
Amend existing checker config.
"""
name_checker = get_checker(linter, NameChecker)
name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')
# we don't care about South migrations
linter.config.black_list += ('migrations', 'south_migrations')
|
[
"def",
"load_configuration",
"(",
"linter",
")",
":",
"name_checker",
"=",
"get_checker",
"(",
"linter",
",",
"NameChecker",
")",
"name_checker",
".",
"config",
".",
"good_names",
"+=",
"(",
"'qs'",
",",
"'urlpatterns'",
",",
"'register'",
",",
"'app_name'",
",",
"'handler500'",
")",
"# we don't care about South migrations",
"linter",
".",
"config",
".",
"black_list",
"+=",
"(",
"'migrations'",
",",
"'south_migrations'",
")"
] |
Amend existing checker config.
|
[
"Amend",
"existing",
"checker",
"config",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/plugin.py#L13-L21
|
8,650
|
PyCQA/pylint-django
|
pylint_django/plugin.py
|
register
|
def register(linter):
"""
Registering additional checkers.
"""
# add all of the checkers
register_checkers(linter)
# register any checking fiddlers
try:
from pylint_django.augmentations import apply_augmentations
apply_augmentations(linter)
except ImportError:
# probably trying to execute pylint_django when Django isn't installed
# in this case the django-not-installed checker will kick-in
pass
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
python
|
def register(linter):
"""
Registering additional checkers.
"""
# add all of the checkers
register_checkers(linter)
# register any checking fiddlers
try:
from pylint_django.augmentations import apply_augmentations
apply_augmentations(linter)
except ImportError:
# probably trying to execute pylint_django when Django isn't installed
# in this case the django-not-installed checker will kick-in
pass
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter)
|
[
"def",
"register",
"(",
"linter",
")",
":",
"# add all of the checkers",
"register_checkers",
"(",
"linter",
")",
"# register any checking fiddlers",
"try",
":",
"from",
"pylint_django",
".",
"augmentations",
"import",
"apply_augmentations",
"apply_augmentations",
"(",
"linter",
")",
"except",
"ImportError",
":",
"# probably trying to execute pylint_django when Django isn't installed",
"# in this case the django-not-installed checker will kick-in",
"pass",
"if",
"not",
"compat",
".",
"LOAD_CONFIGURATION_SUPPORTED",
":",
"load_configuration",
"(",
"linter",
")"
] |
Registering additional checkers.
|
[
"Registering",
"additional",
"checkers",
"."
] |
0bbee433519f48134df4a797341c4196546a454e
|
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/plugin.py#L24-L41
|
8,651
|
05bit/peewee-async
|
peewee_async.py
|
create_object
|
async def create_object(model, **data):
"""Create object asynchronously.
:param model: mode class
:param data: data for initializing object
:return: new object saved to database
"""
# NOTE! Here are internals involved:
#
# - obj._data
# - obj._get_pk_value()
# - obj._set_pk_value()
# - obj._prepare_instance()
#
warnings.warn("create_object() is deprecated, Manager.create() "
"should be used instead",
DeprecationWarning)
obj = model(**data)
pk = await insert(model.insert(**dict(obj.__data__)))
if obj._pk is None:
obj._pk = pk
return obj
|
python
|
async def create_object(model, **data):
"""Create object asynchronously.
:param model: mode class
:param data: data for initializing object
:return: new object saved to database
"""
# NOTE! Here are internals involved:
#
# - obj._data
# - obj._get_pk_value()
# - obj._set_pk_value()
# - obj._prepare_instance()
#
warnings.warn("create_object() is deprecated, Manager.create() "
"should be used instead",
DeprecationWarning)
obj = model(**data)
pk = await insert(model.insert(**dict(obj.__data__)))
if obj._pk is None:
obj._pk = pk
return obj
|
[
"async",
"def",
"create_object",
"(",
"model",
",",
"*",
"*",
"data",
")",
":",
"# NOTE! Here are internals involved:",
"#",
"# - obj._data",
"# - obj._get_pk_value()",
"# - obj._set_pk_value()",
"# - obj._prepare_instance()",
"#",
"warnings",
".",
"warn",
"(",
"\"create_object() is deprecated, Manager.create() \"",
"\"should be used instead\"",
",",
"DeprecationWarning",
")",
"obj",
"=",
"model",
"(",
"*",
"*",
"data",
")",
"pk",
"=",
"await",
"insert",
"(",
"model",
".",
"insert",
"(",
"*",
"*",
"dict",
"(",
"obj",
".",
"__data__",
")",
")",
")",
"if",
"obj",
".",
"_pk",
"is",
"None",
":",
"obj",
".",
"_pk",
"=",
"pk",
"return",
"obj"
] |
Create object asynchronously.
:param model: mode class
:param data: data for initializing object
:return: new object saved to database
|
[
"Create",
"object",
"asynchronously",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L430-L454
|
8,652
|
05bit/peewee-async
|
peewee_async.py
|
get_object
|
async def get_object(source, *args):
"""Get object asynchronously.
:param source: mode class or query to get object from
:param args: lookup parameters
:return: model instance or raises ``peewee.DoesNotExist`` if object not
found
"""
warnings.warn("get_object() is deprecated, Manager.get() "
"should be used instead",
DeprecationWarning)
if isinstance(source, peewee.Query):
query = source
model = query.model
else:
query = source.select()
model = source
# Return first object from query
for obj in (await select(query.where(*args))):
return obj
# No objects found
raise model.DoesNotExist
|
python
|
async def get_object(source, *args):
"""Get object asynchronously.
:param source: mode class or query to get object from
:param args: lookup parameters
:return: model instance or raises ``peewee.DoesNotExist`` if object not
found
"""
warnings.warn("get_object() is deprecated, Manager.get() "
"should be used instead",
DeprecationWarning)
if isinstance(source, peewee.Query):
query = source
model = query.model
else:
query = source.select()
model = source
# Return first object from query
for obj in (await select(query.where(*args))):
return obj
# No objects found
raise model.DoesNotExist
|
[
"async",
"def",
"get_object",
"(",
"source",
",",
"*",
"args",
")",
":",
"warnings",
".",
"warn",
"(",
"\"get_object() is deprecated, Manager.get() \"",
"\"should be used instead\"",
",",
"DeprecationWarning",
")",
"if",
"isinstance",
"(",
"source",
",",
"peewee",
".",
"Query",
")",
":",
"query",
"=",
"source",
"model",
"=",
"query",
".",
"model",
"else",
":",
"query",
"=",
"source",
".",
"select",
"(",
")",
"model",
"=",
"source",
"# Return first object from query",
"for",
"obj",
"in",
"(",
"await",
"select",
"(",
"query",
".",
"where",
"(",
"*",
"args",
")",
")",
")",
":",
"return",
"obj",
"# No objects found",
"raise",
"model",
".",
"DoesNotExist"
] |
Get object asynchronously.
:param source: mode class or query to get object from
:param args: lookup parameters
:return: model instance or raises ``peewee.DoesNotExist`` if object not
found
|
[
"Get",
"object",
"asynchronously",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L457-L481
|
8,653
|
05bit/peewee-async
|
peewee_async.py
|
delete_object
|
async def delete_object(obj, recursive=False, delete_nullable=False):
"""Delete object asynchronously.
:param obj: object to delete
:param recursive: if ``True`` also delete all other objects depends on
object
:param delete_nullable: if `True` and delete is recursive then delete even
'nullable' dependencies
For details please check out `Model.delete_instance()`_ in peewee docs.
.. _Model.delete_instance(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.delete_instance
"""
warnings.warn("delete_object() is deprecated, Manager.delete() "
"should be used instead",
DeprecationWarning)
# Here are private calls involved:
# - obj._pk_expr()
if recursive:
dependencies = obj.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model
if fk.null and not delete_nullable:
await update(model.update(**{fk.name: None}).where(query))
else:
await delete(model.delete().where(query))
result = await delete(obj.delete().where(obj._pk_expr()))
return result
|
python
|
async def delete_object(obj, recursive=False, delete_nullable=False):
"""Delete object asynchronously.
:param obj: object to delete
:param recursive: if ``True`` also delete all other objects depends on
object
:param delete_nullable: if `True` and delete is recursive then delete even
'nullable' dependencies
For details please check out `Model.delete_instance()`_ in peewee docs.
.. _Model.delete_instance(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.delete_instance
"""
warnings.warn("delete_object() is deprecated, Manager.delete() "
"should be used instead",
DeprecationWarning)
# Here are private calls involved:
# - obj._pk_expr()
if recursive:
dependencies = obj.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model
if fk.null and not delete_nullable:
await update(model.update(**{fk.name: None}).where(query))
else:
await delete(model.delete().where(query))
result = await delete(obj.delete().where(obj._pk_expr()))
return result
|
[
"async",
"def",
"delete_object",
"(",
"obj",
",",
"recursive",
"=",
"False",
",",
"delete_nullable",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"delete_object() is deprecated, Manager.delete() \"",
"\"should be used instead\"",
",",
"DeprecationWarning",
")",
"# Here are private calls involved:",
"# - obj._pk_expr()",
"if",
"recursive",
":",
"dependencies",
"=",
"obj",
".",
"dependencies",
"(",
"delete_nullable",
")",
"for",
"query",
",",
"fk",
"in",
"reversed",
"(",
"list",
"(",
"dependencies",
")",
")",
":",
"model",
"=",
"fk",
".",
"model",
"if",
"fk",
".",
"null",
"and",
"not",
"delete_nullable",
":",
"await",
"update",
"(",
"model",
".",
"update",
"(",
"*",
"*",
"{",
"fk",
".",
"name",
":",
"None",
"}",
")",
".",
"where",
"(",
"query",
")",
")",
"else",
":",
"await",
"delete",
"(",
"model",
".",
"delete",
"(",
")",
".",
"where",
"(",
"query",
")",
")",
"result",
"=",
"await",
"delete",
"(",
"obj",
".",
"delete",
"(",
")",
".",
"where",
"(",
"obj",
".",
"_pk_expr",
"(",
")",
")",
")",
"return",
"result"
] |
Delete object asynchronously.
:param obj: object to delete
:param recursive: if ``True`` also delete all other objects depends on
object
:param delete_nullable: if `True` and delete is recursive then delete even
'nullable' dependencies
For details please check out `Model.delete_instance()`_ in peewee docs.
.. _Model.delete_instance(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.delete_instance
|
[
"Delete",
"object",
"asynchronously",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L484-L513
|
8,654
|
05bit/peewee-async
|
peewee_async.py
|
update_object
|
async def update_object(obj, only=None):
"""Update object asynchronously.
:param obj: object to update
:param only: list or tuple of fields to updata, is `None` then all fields
updated
This function does the same as `Model.save()`_ for already saved object,
but it doesn't invoke ``save()`` method on model class. That is
important to know if you overrided save method for your model.
.. _Model.save(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.save
"""
# Here are private calls involved:
#
# - obj._data
# - obj._meta
# - obj._prune_fields()
# - obj._pk_expr()
# - obj._dirty.clear()
#
warnings.warn("update_object() is deprecated, Manager.update() "
"should be used instead",
DeprecationWarning)
field_dict = dict(obj.__data__)
pk_field = obj._meta.primary_key
if only:
field_dict = obj._prune_fields(field_dict, only)
if not isinstance(pk_field, peewee.CompositeKey):
field_dict.pop(pk_field.name, None)
else:
field_dict = obj._prune_fields(field_dict, obj.dirty_fields)
rows = await update(obj.update(**field_dict).where(obj._pk_expr()))
obj._dirty.clear()
return rows
|
python
|
async def update_object(obj, only=None):
"""Update object asynchronously.
:param obj: object to update
:param only: list or tuple of fields to updata, is `None` then all fields
updated
This function does the same as `Model.save()`_ for already saved object,
but it doesn't invoke ``save()`` method on model class. That is
important to know if you overrided save method for your model.
.. _Model.save(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.save
"""
# Here are private calls involved:
#
# - obj._data
# - obj._meta
# - obj._prune_fields()
# - obj._pk_expr()
# - obj._dirty.clear()
#
warnings.warn("update_object() is deprecated, Manager.update() "
"should be used instead",
DeprecationWarning)
field_dict = dict(obj.__data__)
pk_field = obj._meta.primary_key
if only:
field_dict = obj._prune_fields(field_dict, only)
if not isinstance(pk_field, peewee.CompositeKey):
field_dict.pop(pk_field.name, None)
else:
field_dict = obj._prune_fields(field_dict, obj.dirty_fields)
rows = await update(obj.update(**field_dict).where(obj._pk_expr()))
obj._dirty.clear()
return rows
|
[
"async",
"def",
"update_object",
"(",
"obj",
",",
"only",
"=",
"None",
")",
":",
"# Here are private calls involved:",
"#",
"# - obj._data",
"# - obj._meta",
"# - obj._prune_fields()",
"# - obj._pk_expr()",
"# - obj._dirty.clear()",
"#",
"warnings",
".",
"warn",
"(",
"\"update_object() is deprecated, Manager.update() \"",
"\"should be used instead\"",
",",
"DeprecationWarning",
")",
"field_dict",
"=",
"dict",
"(",
"obj",
".",
"__data__",
")",
"pk_field",
"=",
"obj",
".",
"_meta",
".",
"primary_key",
"if",
"only",
":",
"field_dict",
"=",
"obj",
".",
"_prune_fields",
"(",
"field_dict",
",",
"only",
")",
"if",
"not",
"isinstance",
"(",
"pk_field",
",",
"peewee",
".",
"CompositeKey",
")",
":",
"field_dict",
".",
"pop",
"(",
"pk_field",
".",
"name",
",",
"None",
")",
"else",
":",
"field_dict",
"=",
"obj",
".",
"_prune_fields",
"(",
"field_dict",
",",
"obj",
".",
"dirty_fields",
")",
"rows",
"=",
"await",
"update",
"(",
"obj",
".",
"update",
"(",
"*",
"*",
"field_dict",
")",
".",
"where",
"(",
"obj",
".",
"_pk_expr",
"(",
")",
")",
")",
"obj",
".",
"_dirty",
".",
"clear",
"(",
")",
"return",
"rows"
] |
Update object asynchronously.
:param obj: object to update
:param only: list or tuple of fields to updata, is `None` then all fields
updated
This function does the same as `Model.save()`_ for already saved object,
but it doesn't invoke ``save()`` method on model class. That is
important to know if you overrided save method for your model.
.. _Model.save(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.save
|
[
"Update",
"object",
"asynchronously",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L516-L555
|
8,655
|
05bit/peewee-async
|
peewee_async.py
|
select
|
async def select(query):
"""Perform SELECT query asynchronously.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
result = AsyncQueryWrapper(cursor=cursor, query=query)
try:
while True:
await result.fetchone()
except GeneratorExit:
pass
finally:
await cursor.release()
return result
|
python
|
async def select(query):
"""Perform SELECT query asynchronously.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
result = AsyncQueryWrapper(cursor=cursor, query=query)
try:
while True:
await result.fetchone()
except GeneratorExit:
pass
finally:
await cursor.release()
return result
|
[
"async",
"def",
"select",
"(",
"query",
")",
":",
"assert",
"isinstance",
"(",
"query",
",",
"peewee",
".",
"SelectQuery",
")",
",",
"(",
"\"Error, trying to run select coroutine\"",
"\"with wrong query class %s\"",
"%",
"str",
"(",
"query",
")",
")",
"cursor",
"=",
"await",
"_execute_query_async",
"(",
"query",
")",
"result",
"=",
"AsyncQueryWrapper",
"(",
"cursor",
"=",
"cursor",
",",
"query",
"=",
"query",
")",
"try",
":",
"while",
"True",
":",
"await",
"result",
".",
"fetchone",
"(",
")",
"except",
"GeneratorExit",
":",
"pass",
"finally",
":",
"await",
"cursor",
".",
"release",
"(",
")",
"return",
"result"
] |
Perform SELECT query asynchronously.
|
[
"Perform",
"SELECT",
"query",
"asynchronously",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L558-L577
|
8,656
|
05bit/peewee-async
|
peewee_async.py
|
insert
|
async def insert(query):
"""Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only.
"""
assert isinstance(query, peewee.Insert),\
("Error, trying to run insert coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
try:
if query._returning:
row = await cursor.fetchone()
result = row[0]
else:
database = _query_db(query)
last_id = await database.last_insert_id_async(cursor)
result = last_id
finally:
await cursor.release()
return result
|
python
|
async def insert(query):
"""Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only.
"""
assert isinstance(query, peewee.Insert),\
("Error, trying to run insert coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
try:
if query._returning:
row = await cursor.fetchone()
result = row[0]
else:
database = _query_db(query)
last_id = await database.last_insert_id_async(cursor)
result = last_id
finally:
await cursor.release()
return result
|
[
"async",
"def",
"insert",
"(",
"query",
")",
":",
"assert",
"isinstance",
"(",
"query",
",",
"peewee",
".",
"Insert",
")",
",",
"(",
"\"Error, trying to run insert coroutine\"",
"\"with wrong query class %s\"",
"%",
"str",
"(",
"query",
")",
")",
"cursor",
"=",
"await",
"_execute_query_async",
"(",
"query",
")",
"try",
":",
"if",
"query",
".",
"_returning",
":",
"row",
"=",
"await",
"cursor",
".",
"fetchone",
"(",
")",
"result",
"=",
"row",
"[",
"0",
"]",
"else",
":",
"database",
"=",
"_query_db",
"(",
"query",
")",
"last_id",
"=",
"await",
"database",
".",
"last_insert_id_async",
"(",
"cursor",
")",
"result",
"=",
"last_id",
"finally",
":",
"await",
"cursor",
".",
"release",
"(",
")",
"return",
"result"
] |
Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only.
|
[
"Perform",
"INSERT",
"query",
"asynchronously",
".",
"Returns",
"last",
"insert",
"ID",
".",
"This",
"function",
"is",
"called",
"by",
"object",
".",
"create",
"for",
"single",
"objects",
"only",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L580-L601
|
8,657
|
05bit/peewee-async
|
peewee_async.py
|
update
|
async def update(query):
"""Perform UPDATE query asynchronously. Returns number of rows updated.
"""
assert isinstance(query, peewee.Update),\
("Error, trying to run update coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount
|
python
|
async def update(query):
"""Perform UPDATE query asynchronously. Returns number of rows updated.
"""
assert isinstance(query, peewee.Update),\
("Error, trying to run update coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount
|
[
"async",
"def",
"update",
"(",
"query",
")",
":",
"assert",
"isinstance",
"(",
"query",
",",
"peewee",
".",
"Update",
")",
",",
"(",
"\"Error, trying to run update coroutine\"",
"\"with wrong query class %s\"",
"%",
"str",
"(",
"query",
")",
")",
"cursor",
"=",
"await",
"_execute_query_async",
"(",
"query",
")",
"rowcount",
"=",
"cursor",
".",
"rowcount",
"await",
"cursor",
".",
"release",
"(",
")",
"return",
"rowcount"
] |
Perform UPDATE query asynchronously. Returns number of rows updated.
|
[
"Perform",
"UPDATE",
"query",
"asynchronously",
".",
"Returns",
"number",
"of",
"rows",
"updated",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L604-L615
|
8,658
|
05bit/peewee-async
|
peewee_async.py
|
delete
|
async def delete(query):
"""Perform DELETE query asynchronously. Returns number of rows deleted.
"""
assert isinstance(query, peewee.Delete),\
("Error, trying to run delete coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount
|
python
|
async def delete(query):
"""Perform DELETE query asynchronously. Returns number of rows deleted.
"""
assert isinstance(query, peewee.Delete),\
("Error, trying to run delete coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount
|
[
"async",
"def",
"delete",
"(",
"query",
")",
":",
"assert",
"isinstance",
"(",
"query",
",",
"peewee",
".",
"Delete",
")",
",",
"(",
"\"Error, trying to run delete coroutine\"",
"\"with wrong query class %s\"",
"%",
"str",
"(",
"query",
")",
")",
"cursor",
"=",
"await",
"_execute_query_async",
"(",
"query",
")",
"rowcount",
"=",
"cursor",
".",
"rowcount",
"await",
"cursor",
".",
"release",
"(",
")",
"return",
"rowcount"
] |
Perform DELETE query asynchronously. Returns number of rows deleted.
|
[
"Perform",
"DELETE",
"query",
"asynchronously",
".",
"Returns",
"number",
"of",
"rows",
"deleted",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L618-L629
|
8,659
|
05bit/peewee-async
|
peewee_async.py
|
sync_unwanted
|
def sync_unwanted(database):
"""Context manager for preventing unwanted sync queries.
`UnwantedSyncQueryError` exception will raise on such query.
NOTE: sync_unwanted() context manager is **deprecated**, use
database's `.allow_sync()` context manager or `Manager.allow_sync()`
context manager.
"""
warnings.warn("sync_unwanted() context manager is deprecated, "
"use database's `.allow_sync()` context manager or "
"`Manager.allow_sync()` context manager. ",
DeprecationWarning)
old_allow_sync = database._allow_sync
database._allow_sync = False
yield
database._allow_sync = old_allow_sync
|
python
|
def sync_unwanted(database):
"""Context manager for preventing unwanted sync queries.
`UnwantedSyncQueryError` exception will raise on such query.
NOTE: sync_unwanted() context manager is **deprecated**, use
database's `.allow_sync()` context manager or `Manager.allow_sync()`
context manager.
"""
warnings.warn("sync_unwanted() context manager is deprecated, "
"use database's `.allow_sync()` context manager or "
"`Manager.allow_sync()` context manager. ",
DeprecationWarning)
old_allow_sync = database._allow_sync
database._allow_sync = False
yield
database._allow_sync = old_allow_sync
|
[
"def",
"sync_unwanted",
"(",
"database",
")",
":",
"warnings",
".",
"warn",
"(",
"\"sync_unwanted() context manager is deprecated, \"",
"\"use database's `.allow_sync()` context manager or \"",
"\"`Manager.allow_sync()` context manager. \"",
",",
"DeprecationWarning",
")",
"old_allow_sync",
"=",
"database",
".",
"_allow_sync",
"database",
".",
"_allow_sync",
"=",
"False",
"yield",
"database",
".",
"_allow_sync",
"=",
"old_allow_sync"
] |
Context manager for preventing unwanted sync queries.
`UnwantedSyncQueryError` exception will raise on such query.
NOTE: sync_unwanted() context manager is **deprecated**, use
database's `.allow_sync()` context manager or `Manager.allow_sync()`
context manager.
|
[
"Context",
"manager",
"for",
"preventing",
"unwanted",
"sync",
"queries",
".",
"UnwantedSyncQueryError",
"exception",
"will",
"raise",
"on",
"such",
"query",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1287-L1302
|
8,660
|
05bit/peewee-async
|
peewee_async.py
|
Manager.get
|
async def get(self, source_, *args, **kwargs):
"""Get the model instance.
:param source_: model or base query for lookup
Example::
async def my_async_func():
obj1 = await objects.get(MyModel, id=1)
obj2 = await objects.get(MyModel, MyModel.id==1)
obj3 = await objects.get(MyModel.select().where(MyModel.id==1))
All will return `MyModel` instance with `id = 1`
"""
await self.connect()
if isinstance(source_, peewee.Query):
query = source_
model = query.model
else:
query = source_.select()
model = source_
conditions = list(args) + [(getattr(model, k) == v)
for k, v in kwargs.items()]
if conditions:
query = query.where(*conditions)
try:
result = await self.execute(query)
return list(result)[0]
except IndexError:
raise model.DoesNotExist
|
python
|
async def get(self, source_, *args, **kwargs):
"""Get the model instance.
:param source_: model or base query for lookup
Example::
async def my_async_func():
obj1 = await objects.get(MyModel, id=1)
obj2 = await objects.get(MyModel, MyModel.id==1)
obj3 = await objects.get(MyModel.select().where(MyModel.id==1))
All will return `MyModel` instance with `id = 1`
"""
await self.connect()
if isinstance(source_, peewee.Query):
query = source_
model = query.model
else:
query = source_.select()
model = source_
conditions = list(args) + [(getattr(model, k) == v)
for k, v in kwargs.items()]
if conditions:
query = query.where(*conditions)
try:
result = await self.execute(query)
return list(result)[0]
except IndexError:
raise model.DoesNotExist
|
[
"async",
"def",
"get",
"(",
"self",
",",
"source_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"await",
"self",
".",
"connect",
"(",
")",
"if",
"isinstance",
"(",
"source_",
",",
"peewee",
".",
"Query",
")",
":",
"query",
"=",
"source_",
"model",
"=",
"query",
".",
"model",
"else",
":",
"query",
"=",
"source_",
".",
"select",
"(",
")",
"model",
"=",
"source_",
"conditions",
"=",
"list",
"(",
"args",
")",
"+",
"[",
"(",
"getattr",
"(",
"model",
",",
"k",
")",
"==",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
"if",
"conditions",
":",
"query",
"=",
"query",
".",
"where",
"(",
"*",
"conditions",
")",
"try",
":",
"result",
"=",
"await",
"self",
".",
"execute",
"(",
"query",
")",
"return",
"list",
"(",
"result",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"model",
".",
"DoesNotExist"
] |
Get the model instance.
:param source_: model or base query for lookup
Example::
async def my_async_func():
obj1 = await objects.get(MyModel, id=1)
obj2 = await objects.get(MyModel, MyModel.id==1)
obj3 = await objects.get(MyModel.select().where(MyModel.id==1))
All will return `MyModel` instance with `id = 1`
|
[
"Get",
"the",
"model",
"instance",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L147-L180
|
8,661
|
05bit/peewee-async
|
peewee_async.py
|
Manager.create
|
async def create(self, model_, **data):
"""Create a new object saved to database.
"""
inst = model_(**data)
query = model_.insert(**dict(inst.__data__))
pk = await self.execute(query)
if inst._pk is None:
inst._pk = pk
return inst
|
python
|
async def create(self, model_, **data):
"""Create a new object saved to database.
"""
inst = model_(**data)
query = model_.insert(**dict(inst.__data__))
pk = await self.execute(query)
if inst._pk is None:
inst._pk = pk
return inst
|
[
"async",
"def",
"create",
"(",
"self",
",",
"model_",
",",
"*",
"*",
"data",
")",
":",
"inst",
"=",
"model_",
"(",
"*",
"*",
"data",
")",
"query",
"=",
"model_",
".",
"insert",
"(",
"*",
"*",
"dict",
"(",
"inst",
".",
"__data__",
")",
")",
"pk",
"=",
"await",
"self",
".",
"execute",
"(",
"query",
")",
"if",
"inst",
".",
"_pk",
"is",
"None",
":",
"inst",
".",
"_pk",
"=",
"pk",
"return",
"inst"
] |
Create a new object saved to database.
|
[
"Create",
"a",
"new",
"object",
"saved",
"to",
"database",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L182-L191
|
8,662
|
05bit/peewee-async
|
peewee_async.py
|
Manager.get_or_create
|
async def get_or_create(self, model_, defaults=None, **kwargs):
"""Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created.
"""
try:
return (await self.get(model_, **kwargs)), False
except model_.DoesNotExist:
data = defaults or {}
data.update({k: v for k, v in kwargs.items() if '__' not in k})
return (await self.create(model_, **data)), True
|
python
|
async def get_or_create(self, model_, defaults=None, **kwargs):
"""Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created.
"""
try:
return (await self.get(model_, **kwargs)), False
except model_.DoesNotExist:
data = defaults or {}
data.update({k: v for k, v in kwargs.items() if '__' not in k})
return (await self.create(model_, **data)), True
|
[
"async",
"def",
"get_or_create",
"(",
"self",
",",
"model_",
",",
"defaults",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"(",
"await",
"self",
".",
"get",
"(",
"model_",
",",
"*",
"*",
"kwargs",
")",
")",
",",
"False",
"except",
"model_",
".",
"DoesNotExist",
":",
"data",
"=",
"defaults",
"or",
"{",
"}",
"data",
".",
"update",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"'__'",
"not",
"in",
"k",
"}",
")",
"return",
"(",
"await",
"self",
".",
"create",
"(",
"model_",
",",
"*",
"*",
"data",
")",
")",
",",
"True"
] |
Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created.
|
[
"Try",
"to",
"get",
"an",
"object",
"or",
"create",
"it",
"with",
"the",
"specified",
"defaults",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L193-L204
|
8,663
|
05bit/peewee-async
|
peewee_async.py
|
Manager.create_or_get
|
async def create_or_get(self, model_, **kwargs):
"""Try to create new object with specified data. If object already
exists, then try to get it by unique fields.
"""
try:
return (await self.create(model_, **kwargs)), True
except IntegrityErrors:
query = []
for field_name, value in kwargs.items():
field = getattr(model_, field_name)
if field.unique or field.primary_key:
query.append(field == value)
return (await self.get(model_, *query)), False
|
python
|
async def create_or_get(self, model_, **kwargs):
"""Try to create new object with specified data. If object already
exists, then try to get it by unique fields.
"""
try:
return (await self.create(model_, **kwargs)), True
except IntegrityErrors:
query = []
for field_name, value in kwargs.items():
field = getattr(model_, field_name)
if field.unique or field.primary_key:
query.append(field == value)
return (await self.get(model_, *query)), False
|
[
"async",
"def",
"create_or_get",
"(",
"self",
",",
"model_",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"(",
"await",
"self",
".",
"create",
"(",
"model_",
",",
"*",
"*",
"kwargs",
")",
")",
",",
"True",
"except",
"IntegrityErrors",
":",
"query",
"=",
"[",
"]",
"for",
"field_name",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"field",
"=",
"getattr",
"(",
"model_",
",",
"field_name",
")",
"if",
"field",
".",
"unique",
"or",
"field",
".",
"primary_key",
":",
"query",
".",
"append",
"(",
"field",
"==",
"value",
")",
"return",
"(",
"await",
"self",
".",
"get",
"(",
"model_",
",",
"*",
"query",
")",
")",
",",
"False"
] |
Try to create new object with specified data. If object already
exists, then try to get it by unique fields.
|
[
"Try",
"to",
"create",
"new",
"object",
"with",
"specified",
"data",
".",
"If",
"object",
"already",
"exists",
"then",
"try",
"to",
"get",
"it",
"by",
"unique",
"fields",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L249-L261
|
8,664
|
05bit/peewee-async
|
peewee_async.py
|
Manager._subclassed
|
def _subclassed(base, *classes):
"""Check if all classes are subclassed from base.
"""
return all(map(lambda obj: isinstance(obj, base), classes))
|
python
|
def _subclassed(base, *classes):
"""Check if all classes are subclassed from base.
"""
return all(map(lambda obj: isinstance(obj, base), classes))
|
[
"def",
"_subclassed",
"(",
"base",
",",
"*",
"classes",
")",
":",
"return",
"all",
"(",
"map",
"(",
"lambda",
"obj",
":",
"isinstance",
"(",
"obj",
",",
"base",
")",
",",
"classes",
")",
")"
] |
Check if all classes are subclassed from base.
|
[
"Check",
"if",
"all",
"classes",
"are",
"subclassed",
"from",
"base",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L382-L385
|
8,665
|
05bit/peewee-async
|
peewee_async.py
|
AsyncQueryWrapper._get_result_wrapper
|
def _get_result_wrapper(self, query):
"""Get result wrapper class.
"""
cursor = RowsCursor(self._rows, self._cursor.description)
return query._get_cursor_wrapper(cursor)
|
python
|
def _get_result_wrapper(self, query):
"""Get result wrapper class.
"""
cursor = RowsCursor(self._rows, self._cursor.description)
return query._get_cursor_wrapper(cursor)
|
[
"def",
"_get_result_wrapper",
"(",
"self",
",",
"query",
")",
":",
"cursor",
"=",
"RowsCursor",
"(",
"self",
".",
"_rows",
",",
"self",
".",
"_cursor",
".",
"description",
")",
"return",
"query",
".",
"_get_cursor_wrapper",
"(",
"cursor",
")"
] |
Get result wrapper class.
|
[
"Get",
"result",
"wrapper",
"class",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L775-L779
|
8,666
|
05bit/peewee-async
|
peewee_async.py
|
AsyncQueryWrapper.fetchone
|
async def fetchone(self):
"""Fetch single row from the cursor.
"""
row = await self._cursor.fetchone()
if not row:
raise GeneratorExit
self._rows.append(row)
|
python
|
async def fetchone(self):
"""Fetch single row from the cursor.
"""
row = await self._cursor.fetchone()
if not row:
raise GeneratorExit
self._rows.append(row)
|
[
"async",
"def",
"fetchone",
"(",
"self",
")",
":",
"row",
"=",
"await",
"self",
".",
"_cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"raise",
"GeneratorExit",
"self",
".",
"_rows",
".",
"append",
"(",
"row",
")"
] |
Fetch single row from the cursor.
|
[
"Fetch",
"single",
"row",
"from",
"the",
"cursor",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L781-L787
|
8,667
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.connect_async
|
async def connect_async(self, loop=None, timeout=None):
"""Set up async connection on specified event loop or
on default event loop.
"""
if self.deferred:
raise Exception("Error, database not properly initialized "
"before opening connection")
if self._async_conn:
return
elif self._async_wait:
await self._async_wait
else:
self._loop = loop
self._async_wait = asyncio.Future(loop=self._loop)
conn = self._async_conn_cls(
database=self.database,
loop=self._loop,
timeout=timeout,
**self.connect_params_async)
try:
await conn.connect()
except Exception as e:
if not self._async_wait.done():
self._async_wait.set_exception(e)
self._async_wait = None
raise
else:
self._task_data = TaskLocals(loop=self._loop)
self._async_conn = conn
self._async_wait.set_result(True)
|
python
|
async def connect_async(self, loop=None, timeout=None):
"""Set up async connection on specified event loop or
on default event loop.
"""
if self.deferred:
raise Exception("Error, database not properly initialized "
"before opening connection")
if self._async_conn:
return
elif self._async_wait:
await self._async_wait
else:
self._loop = loop
self._async_wait = asyncio.Future(loop=self._loop)
conn = self._async_conn_cls(
database=self.database,
loop=self._loop,
timeout=timeout,
**self.connect_params_async)
try:
await conn.connect()
except Exception as e:
if not self._async_wait.done():
self._async_wait.set_exception(e)
self._async_wait = None
raise
else:
self._task_data = TaskLocals(loop=self._loop)
self._async_conn = conn
self._async_wait.set_result(True)
|
[
"async",
"def",
"connect_async",
"(",
"self",
",",
"loop",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"self",
".",
"deferred",
":",
"raise",
"Exception",
"(",
"\"Error, database not properly initialized \"",
"\"before opening connection\"",
")",
"if",
"self",
".",
"_async_conn",
":",
"return",
"elif",
"self",
".",
"_async_wait",
":",
"await",
"self",
".",
"_async_wait",
"else",
":",
"self",
".",
"_loop",
"=",
"loop",
"self",
".",
"_async_wait",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"conn",
"=",
"self",
".",
"_async_conn_cls",
"(",
"database",
"=",
"self",
".",
"database",
",",
"loop",
"=",
"self",
".",
"_loop",
",",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"self",
".",
"connect_params_async",
")",
"try",
":",
"await",
"conn",
".",
"connect",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"self",
".",
"_async_wait",
".",
"done",
"(",
")",
":",
"self",
".",
"_async_wait",
".",
"set_exception",
"(",
"e",
")",
"self",
".",
"_async_wait",
"=",
"None",
"raise",
"else",
":",
"self",
".",
"_task_data",
"=",
"TaskLocals",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"self",
".",
"_async_conn",
"=",
"conn",
"self",
".",
"_async_wait",
".",
"set_result",
"(",
"True",
")"
] |
Set up async connection on specified event loop or
on default event loop.
|
[
"Set",
"up",
"async",
"connection",
"on",
"specified",
"event",
"loop",
"or",
"on",
"default",
"event",
"loop",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L820-L852
|
8,668
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.cursor_async
|
async def cursor_async(self):
"""Acquire async cursor.
"""
await self.connect_async(loop=self._loop)
if self.transaction_depth_async() > 0:
conn = self.transaction_conn_async()
else:
conn = None
try:
return (await self._async_conn.cursor(conn=conn))
except:
await self.close_async()
raise
|
python
|
async def cursor_async(self):
"""Acquire async cursor.
"""
await self.connect_async(loop=self._loop)
if self.transaction_depth_async() > 0:
conn = self.transaction_conn_async()
else:
conn = None
try:
return (await self._async_conn.cursor(conn=conn))
except:
await self.close_async()
raise
|
[
"async",
"def",
"cursor_async",
"(",
"self",
")",
":",
"await",
"self",
".",
"connect_async",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"if",
"self",
".",
"transaction_depth_async",
"(",
")",
">",
"0",
":",
"conn",
"=",
"self",
".",
"transaction_conn_async",
"(",
")",
"else",
":",
"conn",
"=",
"None",
"try",
":",
"return",
"(",
"await",
"self",
".",
"_async_conn",
".",
"cursor",
"(",
"conn",
"=",
"conn",
")",
")",
"except",
":",
"await",
"self",
".",
"close_async",
"(",
")",
"raise"
] |
Acquire async cursor.
|
[
"Acquire",
"async",
"cursor",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L854-L868
|
8,669
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.close_async
|
async def close_async(self):
"""Close async connection.
"""
if self._async_wait:
await self._async_wait
if self._async_conn:
conn = self._async_conn
self._async_conn = None
self._async_wait = None
self._task_data = None
await conn.close()
|
python
|
async def close_async(self):
"""Close async connection.
"""
if self._async_wait:
await self._async_wait
if self._async_conn:
conn = self._async_conn
self._async_conn = None
self._async_wait = None
self._task_data = None
await conn.close()
|
[
"async",
"def",
"close_async",
"(",
"self",
")",
":",
"if",
"self",
".",
"_async_wait",
":",
"await",
"self",
".",
"_async_wait",
"if",
"self",
".",
"_async_conn",
":",
"conn",
"=",
"self",
".",
"_async_conn",
"self",
".",
"_async_conn",
"=",
"None",
"self",
".",
"_async_wait",
"=",
"None",
"self",
".",
"_task_data",
"=",
"None",
"await",
"conn",
".",
"close",
"(",
")"
] |
Close async connection.
|
[
"Close",
"async",
"connection",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L870-L880
|
8,670
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.push_transaction_async
|
async def push_transaction_async(self):
"""Increment async transaction depth.
"""
await self.connect_async(loop=self.loop)
depth = self.transaction_depth_async()
if not depth:
conn = await self._async_conn.acquire()
self._task_data.set('conn', conn)
self._task_data.set('depth', depth + 1)
|
python
|
async def push_transaction_async(self):
"""Increment async transaction depth.
"""
await self.connect_async(loop=self.loop)
depth = self.transaction_depth_async()
if not depth:
conn = await self._async_conn.acquire()
self._task_data.set('conn', conn)
self._task_data.set('depth', depth + 1)
|
[
"async",
"def",
"push_transaction_async",
"(",
"self",
")",
":",
"await",
"self",
".",
"connect_async",
"(",
"loop",
"=",
"self",
".",
"loop",
")",
"depth",
"=",
"self",
".",
"transaction_depth_async",
"(",
")",
"if",
"not",
"depth",
":",
"conn",
"=",
"await",
"self",
".",
"_async_conn",
".",
"acquire",
"(",
")",
"self",
".",
"_task_data",
".",
"set",
"(",
"'conn'",
",",
"conn",
")",
"self",
".",
"_task_data",
".",
"set",
"(",
"'depth'",
",",
"depth",
"+",
"1",
")"
] |
Increment async transaction depth.
|
[
"Increment",
"async",
"transaction",
"depth",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L882-L890
|
8,671
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.pop_transaction_async
|
async def pop_transaction_async(self):
"""Decrement async transaction depth.
"""
depth = self.transaction_depth_async()
if depth > 0:
depth -= 1
self._task_data.set('depth', depth)
if depth == 0:
conn = self._task_data.get('conn')
self._async_conn.release(conn)
else:
raise ValueError("Invalid async transaction depth value")
|
python
|
async def pop_transaction_async(self):
"""Decrement async transaction depth.
"""
depth = self.transaction_depth_async()
if depth > 0:
depth -= 1
self._task_data.set('depth', depth)
if depth == 0:
conn = self._task_data.get('conn')
self._async_conn.release(conn)
else:
raise ValueError("Invalid async transaction depth value")
|
[
"async",
"def",
"pop_transaction_async",
"(",
"self",
")",
":",
"depth",
"=",
"self",
".",
"transaction_depth_async",
"(",
")",
"if",
"depth",
">",
"0",
":",
"depth",
"-=",
"1",
"self",
".",
"_task_data",
".",
"set",
"(",
"'depth'",
",",
"depth",
")",
"if",
"depth",
"==",
"0",
":",
"conn",
"=",
"self",
".",
"_task_data",
".",
"get",
"(",
"'conn'",
")",
"self",
".",
"_async_conn",
".",
"release",
"(",
"conn",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid async transaction depth value\"",
")"
] |
Decrement async transaction depth.
|
[
"Decrement",
"async",
"transaction",
"depth",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L892-L903
|
8,672
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.allow_sync
|
def allow_sync(self):
"""Allow sync queries within context. Close sync
connection on exit if connected.
Example::
with database.allow_sync():
PageBlock.create_table(True)
"""
old_allow_sync = self._allow_sync
self._allow_sync = True
try:
yield
except:
raise
finally:
try:
self.close()
except self.Error:
pass # already closed
self._allow_sync = old_allow_sync
|
python
|
def allow_sync(self):
"""Allow sync queries within context. Close sync
connection on exit if connected.
Example::
with database.allow_sync():
PageBlock.create_table(True)
"""
old_allow_sync = self._allow_sync
self._allow_sync = True
try:
yield
except:
raise
finally:
try:
self.close()
except self.Error:
pass # already closed
self._allow_sync = old_allow_sync
|
[
"def",
"allow_sync",
"(",
"self",
")",
":",
"old_allow_sync",
"=",
"self",
".",
"_allow_sync",
"self",
".",
"_allow_sync",
"=",
"True",
"try",
":",
"yield",
"except",
":",
"raise",
"finally",
":",
"try",
":",
"self",
".",
"close",
"(",
")",
"except",
"self",
".",
"Error",
":",
"pass",
"# already closed",
"self",
".",
"_allow_sync",
"=",
"old_allow_sync"
] |
Allow sync queries within context. Close sync
connection on exit if connected.
Example::
with database.allow_sync():
PageBlock.create_table(True)
|
[
"Allow",
"sync",
"queries",
"within",
"context",
".",
"Close",
"sync",
"connection",
"on",
"exit",
"if",
"connected",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L940-L962
|
8,673
|
05bit/peewee-async
|
peewee_async.py
|
AsyncDatabase.execute_sql
|
def execute_sql(self, *args, **kwargs):
"""Sync execute SQL query, `allow_sync` must be set to True.
"""
assert self._allow_sync, (
"Error, sync query is not allowed! Call the `.set_allow_sync()` "
"or use the `.allow_sync()` context manager.")
if self._allow_sync in (logging.ERROR, logging.WARNING):
logging.log(self._allow_sync,
"Error, sync query is not allowed: %s %s" %
(str(args), str(kwargs)))
return super().execute_sql(*args, **kwargs)
|
python
|
def execute_sql(self, *args, **kwargs):
"""Sync execute SQL query, `allow_sync` must be set to True.
"""
assert self._allow_sync, (
"Error, sync query is not allowed! Call the `.set_allow_sync()` "
"or use the `.allow_sync()` context manager.")
if self._allow_sync in (logging.ERROR, logging.WARNING):
logging.log(self._allow_sync,
"Error, sync query is not allowed: %s %s" %
(str(args), str(kwargs)))
return super().execute_sql(*args, **kwargs)
|
[
"def",
"execute_sql",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"_allow_sync",
",",
"(",
"\"Error, sync query is not allowed! Call the `.set_allow_sync()` \"",
"\"or use the `.allow_sync()` context manager.\"",
")",
"if",
"self",
".",
"_allow_sync",
"in",
"(",
"logging",
".",
"ERROR",
",",
"logging",
".",
"WARNING",
")",
":",
"logging",
".",
"log",
"(",
"self",
".",
"_allow_sync",
",",
"\"Error, sync query is not allowed: %s %s\"",
"%",
"(",
"str",
"(",
"args",
")",
",",
"str",
"(",
"kwargs",
")",
")",
")",
"return",
"super",
"(",
")",
".",
"execute_sql",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Sync execute SQL query, `allow_sync` must be set to True.
|
[
"Sync",
"execute",
"SQL",
"query",
"allow_sync",
"must",
"be",
"set",
"to",
"True",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L964-L974
|
8,674
|
05bit/peewee-async
|
peewee_async.py
|
AsyncPostgresqlConnection.cursor
|
async def cursor(self, conn=None, *args, **kwargs):
"""Get a cursor for the specified transaction connection
or acquire from the pool.
"""
in_transaction = conn is not None
if not conn:
conn = await self.acquire()
cursor = await conn.cursor(*args, **kwargs)
cursor.release = functools.partial(
self.release_cursor, cursor,
in_transaction=in_transaction)
return cursor
|
python
|
async def cursor(self, conn=None, *args, **kwargs):
"""Get a cursor for the specified transaction connection
or acquire from the pool.
"""
in_transaction = conn is not None
if not conn:
conn = await self.acquire()
cursor = await conn.cursor(*args, **kwargs)
cursor.release = functools.partial(
self.release_cursor, cursor,
in_transaction=in_transaction)
return cursor
|
[
"async",
"def",
"cursor",
"(",
"self",
",",
"conn",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"in_transaction",
"=",
"conn",
"is",
"not",
"None",
"if",
"not",
"conn",
":",
"conn",
"=",
"await",
"self",
".",
"acquire",
"(",
")",
"cursor",
"=",
"await",
"conn",
".",
"cursor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cursor",
".",
"release",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"release_cursor",
",",
"cursor",
",",
"in_transaction",
"=",
"in_transaction",
")",
"return",
"cursor"
] |
Get a cursor for the specified transaction connection
or acquire from the pool.
|
[
"Get",
"a",
"cursor",
"for",
"the",
"specified",
"transaction",
"connection",
"or",
"acquire",
"from",
"the",
"pool",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1017-L1028
|
8,675
|
05bit/peewee-async
|
peewee_async.py
|
AsyncPostgresqlMixin.connect_params_async
|
def connect_params_async(self):
"""Connection parameters for `aiopg.Connection`
"""
kwargs = self.connect_params.copy()
kwargs.update({
'minsize': self.min_connections,
'maxsize': self.max_connections,
'enable_json': self._enable_json,
'enable_hstore': self._enable_hstore,
})
return kwargs
|
python
|
def connect_params_async(self):
"""Connection parameters for `aiopg.Connection`
"""
kwargs = self.connect_params.copy()
kwargs.update({
'minsize': self.min_connections,
'maxsize': self.max_connections,
'enable_json': self._enable_json,
'enable_hstore': self._enable_hstore,
})
return kwargs
|
[
"def",
"connect_params_async",
"(",
"self",
")",
":",
"kwargs",
"=",
"self",
".",
"connect_params",
".",
"copy",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'minsize'",
":",
"self",
".",
"min_connections",
",",
"'maxsize'",
":",
"self",
".",
"max_connections",
",",
"'enable_json'",
":",
"self",
".",
"_enable_json",
",",
"'enable_hstore'",
":",
"self",
".",
"_enable_hstore",
",",
"}",
")",
"return",
"kwargs"
] |
Connection parameters for `aiopg.Connection`
|
[
"Connection",
"parameters",
"for",
"aiopg",
".",
"Connection"
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1057-L1067
|
8,676
|
05bit/peewee-async
|
peewee_async.py
|
AsyncMySQLConnection.release_cursor
|
async def release_cursor(self, cursor, in_transaction=False):
"""Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
"""
conn = cursor.connection
await cursor.close()
if not in_transaction:
self.release(conn)
|
python
|
async def release_cursor(self, cursor, in_transaction=False):
"""Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
"""
conn = cursor.connection
await cursor.close()
if not in_transaction:
self.release(conn)
|
[
"async",
"def",
"release_cursor",
"(",
"self",
",",
"cursor",
",",
"in_transaction",
"=",
"False",
")",
":",
"conn",
"=",
"cursor",
".",
"connection",
"await",
"cursor",
".",
"close",
"(",
")",
"if",
"not",
"in_transaction",
":",
"self",
".",
"release",
"(",
"conn",
")"
] |
Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
|
[
"Release",
"cursor",
"coroutine",
".",
"Unless",
"in",
"transaction",
"the",
"connection",
"is",
"also",
"released",
"back",
"to",
"the",
"pool",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1195-L1202
|
8,677
|
05bit/peewee-async
|
peewee_async.py
|
MySQLDatabase.connect_params_async
|
def connect_params_async(self):
"""Connection parameters for `aiomysql.Connection`
"""
kwargs = self.connect_params.copy()
kwargs.update({
'minsize': self.min_connections,
'maxsize': self.max_connections,
'autocommit': True,
})
return kwargs
|
python
|
def connect_params_async(self):
"""Connection parameters for `aiomysql.Connection`
"""
kwargs = self.connect_params.copy()
kwargs.update({
'minsize': self.min_connections,
'maxsize': self.max_connections,
'autocommit': True,
})
return kwargs
|
[
"def",
"connect_params_async",
"(",
"self",
")",
":",
"kwargs",
"=",
"self",
".",
"connect_params",
".",
"copy",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'minsize'",
":",
"self",
".",
"min_connections",
",",
"'maxsize'",
":",
"self",
".",
"max_connections",
",",
"'autocommit'",
":",
"True",
",",
"}",
")",
"return",
"kwargs"
] |
Connection parameters for `aiomysql.Connection`
|
[
"Connection",
"parameters",
"for",
"aiomysql",
".",
"Connection"
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1229-L1238
|
8,678
|
05bit/peewee-async
|
peewee_async.py
|
TaskLocals.get
|
def get(self, key, *val):
"""Get value stored for current running task. Optionally
you may provide the default value. Raises `KeyError` when
can't get the value and no default one is provided.
"""
data = self.get_data()
if data is not None:
return data.get(key, *val)
if val:
return val[0]
raise KeyError(key)
|
python
|
def get(self, key, *val):
"""Get value stored for current running task. Optionally
you may provide the default value. Raises `KeyError` when
can't get the value and no default one is provided.
"""
data = self.get_data()
if data is not None:
return data.get(key, *val)
if val:
return val[0]
raise KeyError(key)
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"*",
"val",
")",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
")",
"if",
"data",
"is",
"not",
"None",
":",
"return",
"data",
".",
"get",
"(",
"key",
",",
"*",
"val",
")",
"if",
"val",
":",
"return",
"val",
"[",
"0",
"]",
"raise",
"KeyError",
"(",
"key",
")"
] |
Get value stored for current running task. Optionally
you may provide the default value. Raises `KeyError` when
can't get the value and no default one is provided.
|
[
"Get",
"value",
"stored",
"for",
"current",
"running",
"task",
".",
"Optionally",
"you",
"may",
"provide",
"the",
"default",
"value",
".",
"Raises",
"KeyError",
"when",
"can",
"t",
"get",
"the",
"value",
"and",
"no",
"default",
"one",
"is",
"provided",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1475-L1485
|
8,679
|
05bit/peewee-async
|
peewee_async.py
|
TaskLocals.set
|
def set(self, key, val):
"""Set value stored for current running task.
"""
data = self.get_data(True)
if data is not None:
data[key] = val
else:
raise RuntimeError("No task is currently running")
|
python
|
def set(self, key, val):
"""Set value stored for current running task.
"""
data = self.get_data(True)
if data is not None:
data[key] = val
else:
raise RuntimeError("No task is currently running")
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
"True",
")",
"if",
"data",
"is",
"not",
"None",
":",
"data",
"[",
"key",
"]",
"=",
"val",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No task is currently running\"",
")"
] |
Set value stored for current running task.
|
[
"Set",
"value",
"stored",
"for",
"current",
"running",
"task",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1487-L1494
|
8,680
|
05bit/peewee-async
|
peewee_async.py
|
TaskLocals.get_data
|
def get_data(self, create=False):
"""Get dict stored for current running task. Return `None`
or an empty dict if no data was found depending on the
`create` argument value.
:param create: if argument is `True`, create empty dict
for task, default: `False`
"""
task = asyncio_current_task(loop=self.loop)
if task:
task_id = id(task)
if create and task_id not in self.data:
self.data[task_id] = {}
task.add_done_callback(self.del_data)
return self.data.get(task_id)
return None
|
python
|
def get_data(self, create=False):
"""Get dict stored for current running task. Return `None`
or an empty dict if no data was found depending on the
`create` argument value.
:param create: if argument is `True`, create empty dict
for task, default: `False`
"""
task = asyncio_current_task(loop=self.loop)
if task:
task_id = id(task)
if create and task_id not in self.data:
self.data[task_id] = {}
task.add_done_callback(self.del_data)
return self.data.get(task_id)
return None
|
[
"def",
"get_data",
"(",
"self",
",",
"create",
"=",
"False",
")",
":",
"task",
"=",
"asyncio_current_task",
"(",
"loop",
"=",
"self",
".",
"loop",
")",
"if",
"task",
":",
"task_id",
"=",
"id",
"(",
"task",
")",
"if",
"create",
"and",
"task_id",
"not",
"in",
"self",
".",
"data",
":",
"self",
".",
"data",
"[",
"task_id",
"]",
"=",
"{",
"}",
"task",
".",
"add_done_callback",
"(",
"self",
".",
"del_data",
")",
"return",
"self",
".",
"data",
".",
"get",
"(",
"task_id",
")",
"return",
"None"
] |
Get dict stored for current running task. Return `None`
or an empty dict if no data was found depending on the
`create` argument value.
:param create: if argument is `True`, create empty dict
for task, default: `False`
|
[
"Get",
"dict",
"stored",
"for",
"current",
"running",
"task",
".",
"Return",
"None",
"or",
"an",
"empty",
"dict",
"if",
"no",
"data",
"was",
"found",
"depending",
"on",
"the",
"create",
"argument",
"value",
"."
] |
d15f4629da1d9975da4ec37306188e68d288c862
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1496-L1511
|
8,681
|
jpype-project/jpype
|
jpype/_linux.py
|
LinuxJVMFinder._get_from_bin
|
def _get_from_bin(self):
"""
Retrieves the Java library path according to the real installation of
the java executable
:return: The path to the JVM library, or None
"""
# Find the real interpreter installation path
java_bin = os.path.realpath(self._java)
if os.path.exists(java_bin):
# Get to the home directory
java_home = os.path.abspath(os.path.join(os.path.dirname(java_bin),
'..'))
# Look for the JVM library
return self.find_libjvm(java_home)
|
python
|
def _get_from_bin(self):
"""
Retrieves the Java library path according to the real installation of
the java executable
:return: The path to the JVM library, or None
"""
# Find the real interpreter installation path
java_bin = os.path.realpath(self._java)
if os.path.exists(java_bin):
# Get to the home directory
java_home = os.path.abspath(os.path.join(os.path.dirname(java_bin),
'..'))
# Look for the JVM library
return self.find_libjvm(java_home)
|
[
"def",
"_get_from_bin",
"(",
"self",
")",
":",
"# Find the real interpreter installation path",
"java_bin",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"self",
".",
"_java",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"java_bin",
")",
":",
"# Get to the home directory",
"java_home",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"java_bin",
")",
",",
"'..'",
")",
")",
"# Look for the JVM library",
"return",
"self",
".",
"find_libjvm",
"(",
"java_home",
")"
] |
Retrieves the Java library path according to the real installation of
the java executable
:return: The path to the JVM library, or None
|
[
"Retrieves",
"the",
"Java",
"library",
"path",
"according",
"to",
"the",
"real",
"installation",
"of",
"the",
"java",
"executable"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_linux.py#L49-L64
|
8,682
|
jpype-project/jpype
|
setupext/build_ext.py
|
BuildExtCommand.initialize_options
|
def initialize_options(self, *args):
"""omit -Wstrict-prototypes from CFLAGS since its only valid for C code."""
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
# if 'CFLAGS' in cfg_vars:
# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')
for k,v in cfg_vars.items():
if isinstance(v,str) and v.find("-Wstrict-prototypes"):
v=v.replace('-Wstrict-prototypes', '')
cfg_vars[k]=v
if isinstance(v,str) and v.find("-Wimplicit-function-declaration"):
v=v.replace('-Wimplicit-function-declaration', '')
cfg_vars[k]=v
build_ext.initialize_options(self)
|
python
|
def initialize_options(self, *args):
"""omit -Wstrict-prototypes from CFLAGS since its only valid for C code."""
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
# if 'CFLAGS' in cfg_vars:
# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')
for k,v in cfg_vars.items():
if isinstance(v,str) and v.find("-Wstrict-prototypes"):
v=v.replace('-Wstrict-prototypes', '')
cfg_vars[k]=v
if isinstance(v,str) and v.find("-Wimplicit-function-declaration"):
v=v.replace('-Wimplicit-function-declaration', '')
cfg_vars[k]=v
build_ext.initialize_options(self)
|
[
"def",
"initialize_options",
"(",
"self",
",",
"*",
"args",
")",
":",
"import",
"distutils",
".",
"sysconfig",
"cfg_vars",
"=",
"distutils",
".",
"sysconfig",
".",
"get_config_vars",
"(",
")",
"# if 'CFLAGS' in cfg_vars:",
"# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')",
"for",
"k",
",",
"v",
"in",
"cfg_vars",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"v",
".",
"find",
"(",
"\"-Wstrict-prototypes\"",
")",
":",
"v",
"=",
"v",
".",
"replace",
"(",
"'-Wstrict-prototypes'",
",",
"''",
")",
"cfg_vars",
"[",
"k",
"]",
"=",
"v",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"v",
".",
"find",
"(",
"\"-Wimplicit-function-declaration\"",
")",
":",
"v",
"=",
"v",
".",
"replace",
"(",
"'-Wimplicit-function-declaration'",
",",
"''",
")",
"cfg_vars",
"[",
"k",
"]",
"=",
"v",
"build_ext",
".",
"initialize_options",
"(",
"self",
")"
] |
omit -Wstrict-prototypes from CFLAGS since its only valid for C code.
|
[
"omit",
"-",
"Wstrict",
"-",
"prototypes",
"from",
"CFLAGS",
"since",
"its",
"only",
"valid",
"for",
"C",
"code",
"."
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/setupext/build_ext.py#L37-L51
|
8,683
|
jpype-project/jpype
|
jpype/_classpath.py
|
addClassPath
|
def addClassPath(path1):
""" Add a path to the java class path"""
global _CLASSPATHS
path1=_os.path.abspath(path1)
if _sys.platform=='cygwin':
path1=_posix2win(path1)
_CLASSPATHS.add(str(path1))
|
python
|
def addClassPath(path1):
""" Add a path to the java class path"""
global _CLASSPATHS
path1=_os.path.abspath(path1)
if _sys.platform=='cygwin':
path1=_posix2win(path1)
_CLASSPATHS.add(str(path1))
|
[
"def",
"addClassPath",
"(",
"path1",
")",
":",
"global",
"_CLASSPATHS",
"path1",
"=",
"_os",
".",
"path",
".",
"abspath",
"(",
"path1",
")",
"if",
"_sys",
".",
"platform",
"==",
"'cygwin'",
":",
"path1",
"=",
"_posix2win",
"(",
"path1",
")",
"_CLASSPATHS",
".",
"add",
"(",
"str",
"(",
"path1",
")",
")"
] |
Add a path to the java class path
|
[
"Add",
"a",
"path",
"to",
"the",
"java",
"class",
"path"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_classpath.py#L57-L63
|
8,684
|
jpype-project/jpype
|
jpype/_classpath.py
|
getClassPath
|
def getClassPath():
""" Get the full java class path.
Includes user added paths and the environment CLASSPATH.
"""
global _CLASSPATHS
global _SEP
out=[]
for path in _CLASSPATHS:
if path=='':
continue
if path.endswith('*'):
paths=_glob.glob(path+".jar")
if len(path)==0:
continue
out.extend(paths)
else:
out.append(path)
return _SEP.join(out)
|
python
|
def getClassPath():
""" Get the full java class path.
Includes user added paths and the environment CLASSPATH.
"""
global _CLASSPATHS
global _SEP
out=[]
for path in _CLASSPATHS:
if path=='':
continue
if path.endswith('*'):
paths=_glob.glob(path+".jar")
if len(path)==0:
continue
out.extend(paths)
else:
out.append(path)
return _SEP.join(out)
|
[
"def",
"getClassPath",
"(",
")",
":",
"global",
"_CLASSPATHS",
"global",
"_SEP",
"out",
"=",
"[",
"]",
"for",
"path",
"in",
"_CLASSPATHS",
":",
"if",
"path",
"==",
"''",
":",
"continue",
"if",
"path",
".",
"endswith",
"(",
"'*'",
")",
":",
"paths",
"=",
"_glob",
".",
"glob",
"(",
"path",
"+",
"\".jar\"",
")",
"if",
"len",
"(",
"path",
")",
"==",
"0",
":",
"continue",
"out",
".",
"extend",
"(",
"paths",
")",
"else",
":",
"out",
".",
"append",
"(",
"path",
")",
"return",
"_SEP",
".",
"join",
"(",
"out",
")"
] |
Get the full java class path.
Includes user added paths and the environment CLASSPATH.
|
[
"Get",
"the",
"full",
"java",
"class",
"path",
"."
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_classpath.py#L65-L83
|
8,685
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder.find_libjvm
|
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
|
python
|
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
|
[
"def",
"find_libjvm",
"(",
"self",
",",
"java_home",
")",
":",
"found_jamvm",
"=",
"False",
"non_supported_jvm",
"=",
"(",
"'cacao'",
",",
"'jamvm'",
")",
"found_non_supported_jvm",
"=",
"False",
"# Look for the file",
"for",
"root",
",",
"_",
",",
"names",
"in",
"os",
".",
"walk",
"(",
"java_home",
")",
":",
"if",
"self",
".",
"_libfile",
"in",
"names",
":",
"# Found it, but check for non supported jvms",
"candidate",
"=",
"os",
".",
"path",
".",
"split",
"(",
"root",
")",
"[",
"1",
"]",
"if",
"candidate",
"in",
"non_supported_jvm",
":",
"found_non_supported_jvm",
"=",
"True",
"continue",
"# maybe we will find another one?",
"return",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"self",
".",
"_libfile",
")",
"else",
":",
"if",
"found_non_supported_jvm",
":",
"raise",
"JVMNotSupportedException",
"(",
"\"Sorry '{0}' is known to be \"",
"\"broken. Please ensure your \"",
"\"JAVA_HOME contains at least \"",
"\"another JVM implementation \"",
"\"(eg. server)\"",
".",
"format",
"(",
"candidate",
")",
")",
"# File not found",
"raise",
"JVMNotFoundException",
"(",
"\"Sorry no JVM could be found. \"",
"\"Please ensure your JAVA_HOME \"",
"\"environment variable is pointing \"",
"\"to correct installation.\"",
")"
] |
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
|
[
"Recursively",
"looks",
"for",
"the",
"given",
"file"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L48-L82
|
8,686
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder.find_possible_homes
|
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
|
python
|
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
|
[
"def",
"find_possible_homes",
"(",
"self",
",",
"parents",
")",
":",
"homes",
"=",
"[",
"]",
"java_names",
"=",
"(",
"'jre'",
",",
"'jdk'",
",",
"'java'",
")",
"for",
"parent",
"in",
"parents",
":",
"for",
"childname",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"parent",
")",
")",
":",
"# Compute the real path",
"path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent",
",",
"childname",
")",
")",
"if",
"path",
"in",
"homes",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"# Already known path, or not a directory -> ignore",
"continue",
"# Check if the path seems OK",
"real_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
".",
"lower",
"(",
")",
"for",
"java_name",
"in",
"java_names",
":",
"if",
"java_name",
"in",
"real_name",
":",
"# Correct JVM folder name",
"homes",
".",
"append",
"(",
"path",
")",
"yield",
"path",
"break"
] |
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
|
[
"Generator",
"that",
"looks",
"for",
"the",
"first",
"-",
"level",
"children",
"folders",
"that",
"could",
"be",
"Java",
"installations",
"according",
"to",
"their",
"name"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L85-L111
|
8,687
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder._get_from_java_home
|
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
|
python
|
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
|
[
"def",
"_get_from_java_home",
"(",
"self",
")",
":",
"# Get the environment variable",
"java_home",
"=",
"os",
".",
"getenv",
"(",
"\"JAVA_HOME\"",
")",
"if",
"java_home",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"java_home",
")",
":",
"# Get the real installation path",
"java_home",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"java_home",
")",
"# Cygwin has a bug in realpath",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"java_home",
")",
":",
"java_home",
"=",
"os",
".",
"getenv",
"(",
"\"JAVA_HOME\"",
")",
"# Look for the library file",
"return",
"self",
".",
"find_libjvm",
"(",
"java_home",
")"
] |
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
|
[
"Retrieves",
"the",
"Java",
"library",
"path",
"according",
"to",
"the",
"JAVA_HOME",
"environment",
"variable"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L156-L174
|
8,688
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder._get_from_known_locations
|
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
python
|
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
[
"def",
"_get_from_known_locations",
"(",
"self",
")",
":",
"for",
"home",
"in",
"self",
".",
"find_possible_homes",
"(",
"self",
".",
"_locations",
")",
":",
"jvm",
"=",
"self",
".",
"find_libjvm",
"(",
"home",
")",
"if",
"jvm",
"is",
"not",
"None",
":",
"return",
"jvm"
] |
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
|
[
"Retrieves",
"the",
"first",
"existing",
"Java",
"library",
"path",
"in",
"the",
"predefined",
"known",
"locations"
] |
3ce953ae7b35244077249ce650b9acd0a7010d17
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L177-L187
|
8,689
|
graphql-python/gql
|
gql-checker/gql_checker/__init__.py
|
ImportVisitor.node_query
|
def node_query(self, node):
"""
Return the query for the gql call node
"""
if isinstance(node, ast.Call):
assert node.args
arg = node.args[0]
if not isinstance(arg, ast.Str):
return
else:
raise TypeError(type(node))
return arg.s
|
python
|
def node_query(self, node):
"""
Return the query for the gql call node
"""
if isinstance(node, ast.Call):
assert node.args
arg = node.args[0]
if not isinstance(arg, ast.Str):
return
else:
raise TypeError(type(node))
return arg.s
|
[
"def",
"node_query",
"(",
"self",
",",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Call",
")",
":",
"assert",
"node",
".",
"args",
"arg",
"=",
"node",
".",
"args",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"ast",
".",
"Str",
")",
":",
"return",
"else",
":",
"raise",
"TypeError",
"(",
"type",
"(",
"node",
")",
")",
"return",
"arg",
".",
"s"
] |
Return the query for the gql call node
|
[
"Return",
"the",
"query",
"for",
"the",
"gql",
"call",
"node"
] |
3653bb5260b60a6c72d0bb0137874fb40969a826
|
https://github.com/graphql-python/gql/blob/3653bb5260b60a6c72d0bb0137874fb40969a826/gql-checker/gql_checker/__init__.py#L36-L49
|
8,690
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/namers.py
|
default
|
def default(thumbnailer, prepared_options, source_filename,
thumbnail_extension, **kwargs):
"""
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
"""
filename_parts = [source_filename]
if ('%(opts)s' in thumbnailer.thumbnail_basedir or
'%(opts)s' in thumbnailer.thumbnail_subdir):
if thumbnail_extension != os.path.splitext(source_filename)[1][1:]:
filename_parts.append(thumbnail_extension)
else:
filename_parts += ['_'.join(prepared_options), thumbnail_extension]
return '.'.join(filename_parts)
|
python
|
def default(thumbnailer, prepared_options, source_filename,
thumbnail_extension, **kwargs):
"""
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
"""
filename_parts = [source_filename]
if ('%(opts)s' in thumbnailer.thumbnail_basedir or
'%(opts)s' in thumbnailer.thumbnail_subdir):
if thumbnail_extension != os.path.splitext(source_filename)[1][1:]:
filename_parts.append(thumbnail_extension)
else:
filename_parts += ['_'.join(prepared_options), thumbnail_extension]
return '.'.join(filename_parts)
|
[
"def",
"default",
"(",
"thumbnailer",
",",
"prepared_options",
",",
"source_filename",
",",
"thumbnail_extension",
",",
"*",
"*",
"kwargs",
")",
":",
"filename_parts",
"=",
"[",
"source_filename",
"]",
"if",
"(",
"'%(opts)s'",
"in",
"thumbnailer",
".",
"thumbnail_basedir",
"or",
"'%(opts)s'",
"in",
"thumbnailer",
".",
"thumbnail_subdir",
")",
":",
"if",
"thumbnail_extension",
"!=",
"os",
".",
"path",
".",
"splitext",
"(",
"source_filename",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
":",
"filename_parts",
".",
"append",
"(",
"thumbnail_extension",
")",
"else",
":",
"filename_parts",
"+=",
"[",
"'_'",
".",
"join",
"(",
"prepared_options",
")",
",",
"thumbnail_extension",
"]",
"return",
"'.'",
".",
"join",
"(",
"filename_parts",
")"
] |
Easy-thumbnails' default name processor.
For example: ``source.jpg.100x100_q80_crop_upscale.jpg``
|
[
"Easy",
"-",
"thumbnails",
"default",
"name",
"processor",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/namers.py#L7-L21
|
8,691
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/namers.py
|
hashed
|
def hashed(source_filename, prepared_options, thumbnail_extension, **kwargs):
"""
Generate a short hashed thumbnail filename.
Creates a 12 character url-safe base64 sha1 filename (plus the extension),
for example: ``6qW1buHgLaZ9.jpg``.
"""
parts = ':'.join([source_filename] + prepared_options)
short_sha = hashlib.sha1(parts.encode('utf-8')).digest()
short_hash = base64.urlsafe_b64encode(short_sha[:9]).decode('utf-8')
return '.'.join([short_hash, thumbnail_extension])
|
python
|
def hashed(source_filename, prepared_options, thumbnail_extension, **kwargs):
"""
Generate a short hashed thumbnail filename.
Creates a 12 character url-safe base64 sha1 filename (plus the extension),
for example: ``6qW1buHgLaZ9.jpg``.
"""
parts = ':'.join([source_filename] + prepared_options)
short_sha = hashlib.sha1(parts.encode('utf-8')).digest()
short_hash = base64.urlsafe_b64encode(short_sha[:9]).decode('utf-8')
return '.'.join([short_hash, thumbnail_extension])
|
[
"def",
"hashed",
"(",
"source_filename",
",",
"prepared_options",
",",
"thumbnail_extension",
",",
"*",
"*",
"kwargs",
")",
":",
"parts",
"=",
"':'",
".",
"join",
"(",
"[",
"source_filename",
"]",
"+",
"prepared_options",
")",
"short_sha",
"=",
"hashlib",
".",
"sha1",
"(",
"parts",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"short_hash",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"short_sha",
"[",
":",
"9",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"'.'",
".",
"join",
"(",
"[",
"short_hash",
",",
"thumbnail_extension",
"]",
")"
] |
Generate a short hashed thumbnail filename.
Creates a 12 character url-safe base64 sha1 filename (plus the extension),
for example: ``6qW1buHgLaZ9.jpg``.
|
[
"Generate",
"a",
"short",
"hashed",
"thumbnail",
"filename",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/namers.py#L34-L44
|
8,692
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/namers.py
|
source_hashed
|
def source_hashed(source_filename, prepared_options, thumbnail_extension,
**kwargs):
"""
Generate a thumbnail filename of the source filename and options separately
hashed, along with the size.
The format of the filename is a 12 character base64 sha1 hash of the source
filename, the size surrounded by underscores, and an 8 character options
base64 sha1 hash of the thumbnail options. For example:
``1xedFtqllFo9_100x100_QHCa6G1l.jpg``.
"""
source_sha = hashlib.sha1(source_filename.encode('utf-8')).digest()
source_hash = base64.urlsafe_b64encode(source_sha[:9]).decode('utf-8')
parts = ':'.join(prepared_options[1:])
parts_sha = hashlib.sha1(parts.encode('utf-8')).digest()
options_hash = base64.urlsafe_b64encode(parts_sha[:6]).decode('utf-8')
return '%s_%s_%s.%s' % (
source_hash, prepared_options[0], options_hash, thumbnail_extension)
|
python
|
def source_hashed(source_filename, prepared_options, thumbnail_extension,
**kwargs):
"""
Generate a thumbnail filename of the source filename and options separately
hashed, along with the size.
The format of the filename is a 12 character base64 sha1 hash of the source
filename, the size surrounded by underscores, and an 8 character options
base64 sha1 hash of the thumbnail options. For example:
``1xedFtqllFo9_100x100_QHCa6G1l.jpg``.
"""
source_sha = hashlib.sha1(source_filename.encode('utf-8')).digest()
source_hash = base64.urlsafe_b64encode(source_sha[:9]).decode('utf-8')
parts = ':'.join(prepared_options[1:])
parts_sha = hashlib.sha1(parts.encode('utf-8')).digest()
options_hash = base64.urlsafe_b64encode(parts_sha[:6]).decode('utf-8')
return '%s_%s_%s.%s' % (
source_hash, prepared_options[0], options_hash, thumbnail_extension)
|
[
"def",
"source_hashed",
"(",
"source_filename",
",",
"prepared_options",
",",
"thumbnail_extension",
",",
"*",
"*",
"kwargs",
")",
":",
"source_sha",
"=",
"hashlib",
".",
"sha1",
"(",
"source_filename",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"source_hash",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"source_sha",
"[",
":",
"9",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"parts",
"=",
"':'",
".",
"join",
"(",
"prepared_options",
"[",
"1",
":",
"]",
")",
"parts_sha",
"=",
"hashlib",
".",
"sha1",
"(",
"parts",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"options_hash",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"parts_sha",
"[",
":",
"6",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"'%s_%s_%s.%s'",
"%",
"(",
"source_hash",
",",
"prepared_options",
"[",
"0",
"]",
",",
"options_hash",
",",
"thumbnail_extension",
")"
] |
Generate a thumbnail filename of the source filename and options separately
hashed, along with the size.
The format of the filename is a 12 character base64 sha1 hash of the source
filename, the size surrounded by underscores, and an 8 character options
base64 sha1 hash of the thumbnail options. For example:
``1xedFtqllFo9_100x100_QHCa6G1l.jpg``.
|
[
"Generate",
"a",
"thumbnail",
"filename",
"of",
"the",
"source",
"filename",
"and",
"options",
"separately",
"hashed",
"along",
"with",
"the",
"size",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/namers.py#L47-L64
|
8,693
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/engine.py
|
save_image
|
def save_image(image, destination=None, filename=None, **options):
"""
Save a PIL image.
"""
if destination is None:
destination = BytesIO()
filename = filename or ''
# Ensure plugins are fully loaded so that Image.EXTENSION is populated.
Image.init()
format = Image.EXTENSION.get(os.path.splitext(filename)[1].lower(), 'JPEG')
if format in ('JPEG', 'WEBP'):
options.setdefault('quality', 85)
saved = False
if format == 'JPEG':
if image.mode.endswith('A'):
# From PIL 4.2, saving an image with a transparency layer raises an
# IOError, so explicitly remove it.
image = image.convert(image.mode[:-1])
if settings.THUMBNAIL_PROGRESSIVE and (
max(image.size) >= settings.THUMBNAIL_PROGRESSIVE):
options['progressive'] = True
try:
image.save(destination, format=format, optimize=1, **options)
saved = True
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default). This
# shouldn't be triggered very often these days, as recent versions
# of pillow avoid the MAXBLOCK limitation.
pass
if not saved:
image.save(destination, format=format, **options)
if hasattr(destination, 'seek'):
destination.seek(0)
return destination
|
python
|
def save_image(image, destination=None, filename=None, **options):
"""
Save a PIL image.
"""
if destination is None:
destination = BytesIO()
filename = filename or ''
# Ensure plugins are fully loaded so that Image.EXTENSION is populated.
Image.init()
format = Image.EXTENSION.get(os.path.splitext(filename)[1].lower(), 'JPEG')
if format in ('JPEG', 'WEBP'):
options.setdefault('quality', 85)
saved = False
if format == 'JPEG':
if image.mode.endswith('A'):
# From PIL 4.2, saving an image with a transparency layer raises an
# IOError, so explicitly remove it.
image = image.convert(image.mode[:-1])
if settings.THUMBNAIL_PROGRESSIVE and (
max(image.size) >= settings.THUMBNAIL_PROGRESSIVE):
options['progressive'] = True
try:
image.save(destination, format=format, optimize=1, **options)
saved = True
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default). This
# shouldn't be triggered very often these days, as recent versions
# of pillow avoid the MAXBLOCK limitation.
pass
if not saved:
image.save(destination, format=format, **options)
if hasattr(destination, 'seek'):
destination.seek(0)
return destination
|
[
"def",
"save_image",
"(",
"image",
",",
"destination",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"destination",
"is",
"None",
":",
"destination",
"=",
"BytesIO",
"(",
")",
"filename",
"=",
"filename",
"or",
"''",
"# Ensure plugins are fully loaded so that Image.EXTENSION is populated.",
"Image",
".",
"init",
"(",
")",
"format",
"=",
"Image",
".",
"EXTENSION",
".",
"get",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
",",
"'JPEG'",
")",
"if",
"format",
"in",
"(",
"'JPEG'",
",",
"'WEBP'",
")",
":",
"options",
".",
"setdefault",
"(",
"'quality'",
",",
"85",
")",
"saved",
"=",
"False",
"if",
"format",
"==",
"'JPEG'",
":",
"if",
"image",
".",
"mode",
".",
"endswith",
"(",
"'A'",
")",
":",
"# From PIL 4.2, saving an image with a transparency layer raises an",
"# IOError, so explicitly remove it.",
"image",
"=",
"image",
".",
"convert",
"(",
"image",
".",
"mode",
"[",
":",
"-",
"1",
"]",
")",
"if",
"settings",
".",
"THUMBNAIL_PROGRESSIVE",
"and",
"(",
"max",
"(",
"image",
".",
"size",
")",
">=",
"settings",
".",
"THUMBNAIL_PROGRESSIVE",
")",
":",
"options",
"[",
"'progressive'",
"]",
"=",
"True",
"try",
":",
"image",
".",
"save",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"optimize",
"=",
"1",
",",
"*",
"*",
"options",
")",
"saved",
"=",
"True",
"except",
"IOError",
":",
"# Try again, without optimization (PIL can't optimize an image",
"# larger than ImageFile.MAXBLOCK, which is 64k by default). This",
"# shouldn't be triggered very often these days, as recent versions",
"# of pillow avoid the MAXBLOCK limitation.",
"pass",
"if",
"not",
"saved",
":",
"image",
".",
"save",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"*",
"*",
"options",
")",
"if",
"hasattr",
"(",
"destination",
",",
"'seek'",
")",
":",
"destination",
".",
"seek",
"(",
"0",
")",
"return",
"destination"
] |
Save a PIL image.
|
[
"Save",
"a",
"PIL",
"image",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/engine.py#L44-L78
|
8,694
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/engine.py
|
generate_source_image
|
def generate_source_image(source_file, processor_options, generators=None,
fail_silently=True):
"""
Processes a source ``File`` through a series of source generators, stopping
once a generator returns an image.
The return value is this image instance or ``None`` if no generators
return an image.
If the source file cannot be opened, it will be set to ``None`` and still
passed to the generators.
"""
processor_options = ThumbnailOptions(processor_options)
# Keep record of whether the source file was originally closed. Not all
# file-like objects provide this attribute, so just fall back to False.
was_closed = getattr(source_file, 'closed', False)
if generators is None:
generators = [
utils.dynamic_import(name)
for name in settings.THUMBNAIL_SOURCE_GENERATORS]
exceptions = []
try:
for generator in generators:
source = source_file
# First try to open the file.
try:
source.open()
except Exception:
# If that failed, maybe the file-like object doesn't support
# reopening so just try seeking back to the start of the file.
try:
source.seek(0)
except Exception:
source = None
try:
image = generator(source, **processor_options)
except Exception as e:
if not fail_silently:
if len(generators) == 1:
raise
exceptions.append(e)
image = None
if image:
return image
finally:
# Attempt to close the file if it was closed originally (but fail
# silently).
if was_closed:
try:
source_file.close()
except Exception:
pass
if exceptions and not fail_silently:
raise NoSourceGenerator(*exceptions)
|
python
|
def generate_source_image(source_file, processor_options, generators=None,
fail_silently=True):
"""
Processes a source ``File`` through a series of source generators, stopping
once a generator returns an image.
The return value is this image instance or ``None`` if no generators
return an image.
If the source file cannot be opened, it will be set to ``None`` and still
passed to the generators.
"""
processor_options = ThumbnailOptions(processor_options)
# Keep record of whether the source file was originally closed. Not all
# file-like objects provide this attribute, so just fall back to False.
was_closed = getattr(source_file, 'closed', False)
if generators is None:
generators = [
utils.dynamic_import(name)
for name in settings.THUMBNAIL_SOURCE_GENERATORS]
exceptions = []
try:
for generator in generators:
source = source_file
# First try to open the file.
try:
source.open()
except Exception:
# If that failed, maybe the file-like object doesn't support
# reopening so just try seeking back to the start of the file.
try:
source.seek(0)
except Exception:
source = None
try:
image = generator(source, **processor_options)
except Exception as e:
if not fail_silently:
if len(generators) == 1:
raise
exceptions.append(e)
image = None
if image:
return image
finally:
# Attempt to close the file if it was closed originally (but fail
# silently).
if was_closed:
try:
source_file.close()
except Exception:
pass
if exceptions and not fail_silently:
raise NoSourceGenerator(*exceptions)
|
[
"def",
"generate_source_image",
"(",
"source_file",
",",
"processor_options",
",",
"generators",
"=",
"None",
",",
"fail_silently",
"=",
"True",
")",
":",
"processor_options",
"=",
"ThumbnailOptions",
"(",
"processor_options",
")",
"# Keep record of whether the source file was originally closed. Not all",
"# file-like objects provide this attribute, so just fall back to False.",
"was_closed",
"=",
"getattr",
"(",
"source_file",
",",
"'closed'",
",",
"False",
")",
"if",
"generators",
"is",
"None",
":",
"generators",
"=",
"[",
"utils",
".",
"dynamic_import",
"(",
"name",
")",
"for",
"name",
"in",
"settings",
".",
"THUMBNAIL_SOURCE_GENERATORS",
"]",
"exceptions",
"=",
"[",
"]",
"try",
":",
"for",
"generator",
"in",
"generators",
":",
"source",
"=",
"source_file",
"# First try to open the file.",
"try",
":",
"source",
".",
"open",
"(",
")",
"except",
"Exception",
":",
"# If that failed, maybe the file-like object doesn't support",
"# reopening so just try seeking back to the start of the file.",
"try",
":",
"source",
".",
"seek",
"(",
"0",
")",
"except",
"Exception",
":",
"source",
"=",
"None",
"try",
":",
"image",
"=",
"generator",
"(",
"source",
",",
"*",
"*",
"processor_options",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"fail_silently",
":",
"if",
"len",
"(",
"generators",
")",
"==",
"1",
":",
"raise",
"exceptions",
".",
"append",
"(",
"e",
")",
"image",
"=",
"None",
"if",
"image",
":",
"return",
"image",
"finally",
":",
"# Attempt to close the file if it was closed originally (but fail",
"# silently).",
"if",
"was_closed",
":",
"try",
":",
"source_file",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"pass",
"if",
"exceptions",
"and",
"not",
"fail_silently",
":",
"raise",
"NoSourceGenerator",
"(",
"*",
"exceptions",
")"
] |
Processes a source ``File`` through a series of source generators, stopping
once a generator returns an image.
The return value is this image instance or ``None`` if no generators
return an image.
If the source file cannot be opened, it will be set to ``None`` and still
passed to the generators.
|
[
"Processes",
"a",
"source",
"File",
"through",
"a",
"series",
"of",
"source",
"generators",
"stopping",
"once",
"a",
"generator",
"returns",
"an",
"image",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/engine.py#L81-L134
|
8,695
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/conf.py
|
AppSettings.revert
|
def revert(self):
"""
Revert any changes made to settings.
"""
for attr, value in self._changed.items():
setattr(django_settings, attr, value)
for attr in self._added:
delattr(django_settings, attr)
self._changed = {}
self._added = []
if self.isolated:
self._isolated_overrides = BaseSettings()
|
python
|
def revert(self):
"""
Revert any changes made to settings.
"""
for attr, value in self._changed.items():
setattr(django_settings, attr, value)
for attr in self._added:
delattr(django_settings, attr)
self._changed = {}
self._added = []
if self.isolated:
self._isolated_overrides = BaseSettings()
|
[
"def",
"revert",
"(",
"self",
")",
":",
"for",
"attr",
",",
"value",
"in",
"self",
".",
"_changed",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"django_settings",
",",
"attr",
",",
"value",
")",
"for",
"attr",
"in",
"self",
".",
"_added",
":",
"delattr",
"(",
"django_settings",
",",
"attr",
")",
"self",
".",
"_changed",
"=",
"{",
"}",
"self",
".",
"_added",
"=",
"[",
"]",
"if",
"self",
".",
"isolated",
":",
"self",
".",
"_isolated_overrides",
"=",
"BaseSettings",
"(",
")"
] |
Revert any changes made to settings.
|
[
"Revert",
"any",
"changes",
"made",
"to",
"settings",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/conf.py#L32-L43
|
8,696
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/source_generators.py
|
pil_image
|
def pil_image(source, exif_orientation=True, **options):
"""
Try to open the source file directly using PIL, ignoring any errors.
exif_orientation
If EXIF orientation data is present, perform any required reorientation
before passing the data along the processing pipeline.
"""
# Use a BytesIO wrapper because if the source is an incomplete file like
# object, PIL may have problems with it. For example, some image types
# require tell and seek methods that are not present on all storage
# File objects.
if not source:
return
source = BytesIO(source.read())
image = Image.open(source)
# Fully load the image now to catch any problems with the image contents.
try:
# An "Image file truncated" exception can occur for some images that
# are still mostly valid -- we'll swallow the exception.
image.load()
except IOError:
pass
# Try a second time to catch any other potential exceptions.
image.load()
if exif_orientation:
image = utils.exif_orientation(image)
return image
|
python
|
def pil_image(source, exif_orientation=True, **options):
"""
Try to open the source file directly using PIL, ignoring any errors.
exif_orientation
If EXIF orientation data is present, perform any required reorientation
before passing the data along the processing pipeline.
"""
# Use a BytesIO wrapper because if the source is an incomplete file like
# object, PIL may have problems with it. For example, some image types
# require tell and seek methods that are not present on all storage
# File objects.
if not source:
return
source = BytesIO(source.read())
image = Image.open(source)
# Fully load the image now to catch any problems with the image contents.
try:
# An "Image file truncated" exception can occur for some images that
# are still mostly valid -- we'll swallow the exception.
image.load()
except IOError:
pass
# Try a second time to catch any other potential exceptions.
image.load()
if exif_orientation:
image = utils.exif_orientation(image)
return image
|
[
"def",
"pil_image",
"(",
"source",
",",
"exif_orientation",
"=",
"True",
",",
"*",
"*",
"options",
")",
":",
"# Use a BytesIO wrapper because if the source is an incomplete file like",
"# object, PIL may have problems with it. For example, some image types",
"# require tell and seek methods that are not present on all storage",
"# File objects.",
"if",
"not",
"source",
":",
"return",
"source",
"=",
"BytesIO",
"(",
"source",
".",
"read",
"(",
")",
")",
"image",
"=",
"Image",
".",
"open",
"(",
"source",
")",
"# Fully load the image now to catch any problems with the image contents.",
"try",
":",
"# An \"Image file truncated\" exception can occur for some images that",
"# are still mostly valid -- we'll swallow the exception.",
"image",
".",
"load",
"(",
")",
"except",
"IOError",
":",
"pass",
"# Try a second time to catch any other potential exceptions.",
"image",
".",
"load",
"(",
")",
"if",
"exif_orientation",
":",
"image",
"=",
"utils",
".",
"exif_orientation",
"(",
"image",
")",
"return",
"image"
] |
Try to open the source file directly using PIL, ignoring any errors.
exif_orientation
If EXIF orientation data is present, perform any required reorientation
before passing the data along the processing pipeline.
|
[
"Try",
"to",
"open",
"the",
"source",
"file",
"directly",
"using",
"PIL",
"ignoring",
"any",
"errors",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/source_generators.py#L14-L45
|
8,697
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/optimize/post_processor.py
|
optimize_thumbnail
|
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[
determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(
optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warning(
'{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e)
|
python
|
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[
determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(
optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warning(
'{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e)
|
[
"def",
"optimize_thumbnail",
"(",
"thumbnail",
")",
":",
"try",
":",
"optimize_command",
"=",
"settings",
".",
"THUMBNAIL_OPTIMIZE_COMMAND",
"[",
"determinetype",
"(",
"thumbnail",
".",
"path",
")",
"]",
"if",
"not",
"optimize_command",
":",
"return",
"except",
"(",
"TypeError",
",",
"KeyError",
",",
"NotImplementedError",
")",
":",
"return",
"storage",
"=",
"thumbnail",
".",
"storage",
"try",
":",
"with",
"NamedTemporaryFile",
"(",
")",
"as",
"temp_file",
":",
"thumbnail",
".",
"seek",
"(",
"0",
")",
"temp_file",
".",
"write",
"(",
"thumbnail",
".",
"read",
"(",
")",
")",
"temp_file",
".",
"flush",
"(",
")",
"optimize_command",
"=",
"optimize_command",
".",
"format",
"(",
"filename",
"=",
"temp_file",
".",
"name",
")",
"output",
"=",
"check_output",
"(",
"optimize_command",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"shell",
"=",
"True",
")",
"if",
"output",
":",
"logger",
".",
"warning",
"(",
"'{0} returned {1}'",
".",
"format",
"(",
"optimize_command",
",",
"output",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'{0} returned nothing'",
".",
"format",
"(",
"optimize_command",
")",
")",
"with",
"open",
"(",
"temp_file",
".",
"name",
",",
"'rb'",
")",
"as",
"f",
":",
"thumbnail",
".",
"file",
"=",
"ContentFile",
"(",
"f",
".",
"read",
"(",
")",
")",
"storage",
".",
"delete",
"(",
"thumbnail",
".",
"path",
")",
"storage",
".",
"save",
"(",
"thumbnail",
".",
"path",
",",
"thumbnail",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")"
] |
Optimize thumbnail images by removing unnecessary data
|
[
"Optimize",
"thumbnail",
"images",
"by",
"removing",
"unnecessary",
"data"
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/optimize/post_processor.py#L37-L65
|
8,698
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/templatetags/thumbnail.py
|
thumbnail
|
def thumbnail(parser, token):
"""
Creates a thumbnail of an ImageField.
Basic tag Syntax::
{% thumbnail [source] [size] [options] %}
*source* must be a ``File`` object, usually an Image/FileField of a model
instance.
*size* can either be:
* the name of an alias
* the size in the format ``[width]x[height]`` (for example,
``{% thumbnail person.photo 100x50 %}``) or
* a variable containing a valid size (i.e. either a string in the
``[width]x[height]`` format or a tuple containing two integers):
``{% thumbnail person.photo size_var %}``.
*options* are a space separated list of options which are used when
processing the image to a thumbnail such as ``sharpen``, ``crop`` and
``quality=90``.
If *size* is specified as an alias name, *options* are used to override
and/or supplement the options defined in that alias.
The thumbnail tag can also place a
:class:`~easy_thumbnails.files.ThumbnailFile` object in the context,
providing access to the properties of the thumbnail such as the height and
width::
{% thumbnail [source] [size] [options] as [variable] %}
When ``as [variable]`` is used, the tag doesn't output anything. Instead,
use the variable like a standard ``ImageFieldFile`` object::
{% thumbnail obj.picture 200x200 upscale as thumb %}
<img src="{{ thumb.url }}"
width="{{ thumb.width }}"
height="{{ thumb.height }}" />
**Debugging**
By default, if there is an error creating the thumbnail or resolving the
image variable then the thumbnail tag will just return an empty string (and
if there was a context variable to be set then it will also be set to an
empty string).
For example, you will not see an error if the thumbnail could not
be written to directory because of permissions error. To display those
errors rather than failing silently, set ``THUMBNAIL_DEBUG = True`` in
your Django project's settings module.
"""
args = token.split_contents()
tag = args[0]
# Check to see if we're setting to a context variable.
if len(args) > 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
if len(args) < 3:
raise TemplateSyntaxError(
"Invalid syntax. Expected "
"'{%% %s source size [option1 option2 ...] %%}' or "
"'{%% %s source size [option1 option2 ...] as variable %%}'" %
(tag, tag))
opts = {}
# The first argument is the source file.
source_var = parser.compile_filter(args[1])
# The second argument is the requested size. If it's the static "10x10"
# format, wrap it in quotes so that it is compiled correctly.
size = args[2]
match = RE_SIZE.match(size)
if match:
size = '"%s"' % size
opts['size'] = parser.compile_filter(size)
# All further arguments are options.
args_list = split_args(args[3:]).items()
for arg, value in args_list:
if arg in VALID_OPTIONS:
if value and value is not True:
value = parser.compile_filter(value)
opts[arg] = value
else:
raise TemplateSyntaxError("'%s' tag received a bad argument: "
"'%s'" % (tag, arg))
return ThumbnailNode(source_var, opts=opts, context_name=context_name)
|
python
|
def thumbnail(parser, token):
"""
Creates a thumbnail of an ImageField.
Basic tag Syntax::
{% thumbnail [source] [size] [options] %}
*source* must be a ``File`` object, usually an Image/FileField of a model
instance.
*size* can either be:
* the name of an alias
* the size in the format ``[width]x[height]`` (for example,
``{% thumbnail person.photo 100x50 %}``) or
* a variable containing a valid size (i.e. either a string in the
``[width]x[height]`` format or a tuple containing two integers):
``{% thumbnail person.photo size_var %}``.
*options* are a space separated list of options which are used when
processing the image to a thumbnail such as ``sharpen``, ``crop`` and
``quality=90``.
If *size* is specified as an alias name, *options* are used to override
and/or supplement the options defined in that alias.
The thumbnail tag can also place a
:class:`~easy_thumbnails.files.ThumbnailFile` object in the context,
providing access to the properties of the thumbnail such as the height and
width::
{% thumbnail [source] [size] [options] as [variable] %}
When ``as [variable]`` is used, the tag doesn't output anything. Instead,
use the variable like a standard ``ImageFieldFile`` object::
{% thumbnail obj.picture 200x200 upscale as thumb %}
<img src="{{ thumb.url }}"
width="{{ thumb.width }}"
height="{{ thumb.height }}" />
**Debugging**
By default, if there is an error creating the thumbnail or resolving the
image variable then the thumbnail tag will just return an empty string (and
if there was a context variable to be set then it will also be set to an
empty string).
For example, you will not see an error if the thumbnail could not
be written to directory because of permissions error. To display those
errors rather than failing silently, set ``THUMBNAIL_DEBUG = True`` in
your Django project's settings module.
"""
args = token.split_contents()
tag = args[0]
# Check to see if we're setting to a context variable.
if len(args) > 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
if len(args) < 3:
raise TemplateSyntaxError(
"Invalid syntax. Expected "
"'{%% %s source size [option1 option2 ...] %%}' or "
"'{%% %s source size [option1 option2 ...] as variable %%}'" %
(tag, tag))
opts = {}
# The first argument is the source file.
source_var = parser.compile_filter(args[1])
# The second argument is the requested size. If it's the static "10x10"
# format, wrap it in quotes so that it is compiled correctly.
size = args[2]
match = RE_SIZE.match(size)
if match:
size = '"%s"' % size
opts['size'] = parser.compile_filter(size)
# All further arguments are options.
args_list = split_args(args[3:]).items()
for arg, value in args_list:
if arg in VALID_OPTIONS:
if value and value is not True:
value = parser.compile_filter(value)
opts[arg] = value
else:
raise TemplateSyntaxError("'%s' tag received a bad argument: "
"'%s'" % (tag, arg))
return ThumbnailNode(source_var, opts=opts, context_name=context_name)
|
[
"def",
"thumbnail",
"(",
"parser",
",",
"token",
")",
":",
"args",
"=",
"token",
".",
"split_contents",
"(",
")",
"tag",
"=",
"args",
"[",
"0",
"]",
"# Check to see if we're setting to a context variable.",
"if",
"len",
"(",
"args",
")",
">",
"4",
"and",
"args",
"[",
"-",
"2",
"]",
"==",
"'as'",
":",
"context_name",
"=",
"args",
"[",
"-",
"1",
"]",
"args",
"=",
"args",
"[",
":",
"-",
"2",
"]",
"else",
":",
"context_name",
"=",
"None",
"if",
"len",
"(",
"args",
")",
"<",
"3",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"Invalid syntax. Expected \"",
"\"'{%% %s source size [option1 option2 ...] %%}' or \"",
"\"'{%% %s source size [option1 option2 ...] as variable %%}'\"",
"%",
"(",
"tag",
",",
"tag",
")",
")",
"opts",
"=",
"{",
"}",
"# The first argument is the source file.",
"source_var",
"=",
"parser",
".",
"compile_filter",
"(",
"args",
"[",
"1",
"]",
")",
"# The second argument is the requested size. If it's the static \"10x10\"",
"# format, wrap it in quotes so that it is compiled correctly.",
"size",
"=",
"args",
"[",
"2",
"]",
"match",
"=",
"RE_SIZE",
".",
"match",
"(",
"size",
")",
"if",
"match",
":",
"size",
"=",
"'\"%s\"'",
"%",
"size",
"opts",
"[",
"'size'",
"]",
"=",
"parser",
".",
"compile_filter",
"(",
"size",
")",
"# All further arguments are options.",
"args_list",
"=",
"split_args",
"(",
"args",
"[",
"3",
":",
"]",
")",
".",
"items",
"(",
")",
"for",
"arg",
",",
"value",
"in",
"args_list",
":",
"if",
"arg",
"in",
"VALID_OPTIONS",
":",
"if",
"value",
"and",
"value",
"is",
"not",
"True",
":",
"value",
"=",
"parser",
".",
"compile_filter",
"(",
"value",
")",
"opts",
"[",
"arg",
"]",
"=",
"value",
"else",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'%s' tag received a bad argument: \"",
"\"'%s'\"",
"%",
"(",
"tag",
",",
"arg",
")",
")",
"return",
"ThumbnailNode",
"(",
"source_var",
",",
"opts",
"=",
"opts",
",",
"context_name",
"=",
"context_name",
")"
] |
Creates a thumbnail of an ImageField.
Basic tag Syntax::
{% thumbnail [source] [size] [options] %}
*source* must be a ``File`` object, usually an Image/FileField of a model
instance.
*size* can either be:
* the name of an alias
* the size in the format ``[width]x[height]`` (for example,
``{% thumbnail person.photo 100x50 %}``) or
* a variable containing a valid size (i.e. either a string in the
``[width]x[height]`` format or a tuple containing two integers):
``{% thumbnail person.photo size_var %}``.
*options* are a space separated list of options which are used when
processing the image to a thumbnail such as ``sharpen``, ``crop`` and
``quality=90``.
If *size* is specified as an alias name, *options* are used to override
and/or supplement the options defined in that alias.
The thumbnail tag can also place a
:class:`~easy_thumbnails.files.ThumbnailFile` object in the context,
providing access to the properties of the thumbnail such as the height and
width::
{% thumbnail [source] [size] [options] as [variable] %}
When ``as [variable]`` is used, the tag doesn't output anything. Instead,
use the variable like a standard ``ImageFieldFile`` object::
{% thumbnail obj.picture 200x200 upscale as thumb %}
<img src="{{ thumb.url }}"
width="{{ thumb.width }}"
height="{{ thumb.height }}" />
**Debugging**
By default, if there is an error creating the thumbnail or resolving the
image variable then the thumbnail tag will just return an empty string (and
if there was a context variable to be set then it will also be set to an
empty string).
For example, you will not see an error if the thumbnail could not
be written to directory because of permissions error. To display those
errors rather than failing silently, set ``THUMBNAIL_DEBUG = True`` in
your Django project's settings module.
|
[
"Creates",
"a",
"thumbnail",
"of",
"an",
"ImageField",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/templatetags/thumbnail.py#L135-L232
|
8,699
|
SmileyChris/easy-thumbnails
|
easy_thumbnails/templatetags/thumbnail.py
|
thumbnail_url
|
def thumbnail_url(source, alias):
"""
Return the thumbnail url for a source file using an aliased set of
thumbnail options.
If no matching alias is found, returns an empty string.
Example usage::
<img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
"""
try:
thumb = get_thumbnailer(source)[alias]
except Exception:
return ''
return thumb.url
|
python
|
def thumbnail_url(source, alias):
"""
Return the thumbnail url for a source file using an aliased set of
thumbnail options.
If no matching alias is found, returns an empty string.
Example usage::
<img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
"""
try:
thumb = get_thumbnailer(source)[alias]
except Exception:
return ''
return thumb.url
|
[
"def",
"thumbnail_url",
"(",
"source",
",",
"alias",
")",
":",
"try",
":",
"thumb",
"=",
"get_thumbnailer",
"(",
"source",
")",
"[",
"alias",
"]",
"except",
"Exception",
":",
"return",
"''",
"return",
"thumb",
".",
"url"
] |
Return the thumbnail url for a source file using an aliased set of
thumbnail options.
If no matching alias is found, returns an empty string.
Example usage::
<img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
|
[
"Return",
"the",
"thumbnail",
"url",
"for",
"a",
"source",
"file",
"using",
"an",
"aliased",
"set",
"of",
"thumbnail",
"options",
"."
] |
b08ab44883bf7b221a98dadb9b589cb95d35b0bf
|
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/templatetags/thumbnail.py#L287-L302
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.